code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
for i in *.png; do autotrace "$i" -despeckle-level 14 --output-file "$(echo $i | sed 's/.png/.svg/')"; done
|
automata/tri-delaunay
|
util/png2svg.sh
|
Shell
|
mit
| 120 |
#@desc 列出指定目录大小
#@author linjunjie
#@keyword du
#@env osx
du -h -d=1 ./shell_handbook/
|
linjunjie/shell_handbook
|
du.sh
|
Shell
|
mit
| 108 |
#!/bin/bash
$(pwd)/docker/run.sh npm install
|
hugo-cardenas/video-backupper
|
docker/init.sh
|
Shell
|
mit
| 45 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2763-1
#
# Security announcement date: 2013-09-24 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:41 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - pyopenssl:0.13-2+deb7u1
# - python-openssl:0.13-2+deb7u1
# - python-openssl-doc:0.13-2+deb7u1
# - python-openssl-dbg:0.13-2+deb7u1
# - python3-openssl:0.13-2+deb7u1
# - python3-openssl-dbg:0.13-2+deb7u1
#
# Last versions recommanded by security team:
# - pyopenssl:0.13-2+deb7u1
# - python-openssl:0.13-2+deb7u1
# - python-openssl-doc:0.13-2+deb7u1
# - python-openssl-dbg:0.13-2+deb7u1
# - python3-openssl:0.13-2+deb7u1
# - python3-openssl-dbg:0.13-2+deb7u1
#
# CVE List:
# - CVE-2013-4314
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade pyopenssl=0.13-2+deb7u1 -y
sudo apt-get install --only-upgrade python-openssl=0.13-2+deb7u1 -y
sudo apt-get install --only-upgrade python-openssl-doc=0.13-2+deb7u1 -y
sudo apt-get install --only-upgrade python-openssl-dbg=0.13-2+deb7u1 -y
sudo apt-get install --only-upgrade python3-openssl=0.13-2+deb7u1 -y
sudo apt-get install --only-upgrade python3-openssl-dbg=0.13-2+deb7u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/x86_64/2013/DSA-2763-1.sh
|
Shell
|
mit
| 1,349 |
#!/bin/bash
rm -r ./lib
rm -r ./samples
rm -r ./screenshots
echo "Directores removed"
mkdir ./screenshots
mkdir ./lib
mkdir ./lib/ios
mkdir ./samples
mkdir ./samples/ios
echo "Directores created"
rsync ../screenshots/* ./screenshots
echo "Screenshots copied"
rsync ../library/bin/release/mTouchPDFReaderLibrary.dll ./lib/ios/
rsync ../library/bin/release/autofac.dll ./lib/ios/
echo "Library dlls copied"
rsync -r --exclude=bin --exclude=obj ../demo/* ./samples/ios
echo "Run mono xamarin-component.exe package component"
|
AlexanderMac/mTouch-PDFReader
|
Component/build_component.sh
|
Shell
|
mit
| 528 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_CONF=default
CND_DISTDIR=dist
TMPDIR=build/${CND_CONF}/${IMAGE_TYPE}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=dist/${CND_CONF}/${IMAGE_TYPE}/debug_express.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}
OUTPUT_BASENAME=debug_express.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}
PACKAGE_TOP_DIR=debugexpress.x/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/package
rm -rf ${TMPDIR}
mkdir -p ${TMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory ${TMPDIR}/debugexpress.x/bin
copyFileToTmpDir "${OUTPUT_PATH}" "${TMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/package/debugexpress.x.tar
cd ${TMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/package/debugexpress.x.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${TMPDIR}
|
ep1cman/FM
|
Code/Debug Express.X/nbproject/Package-default.bash
|
Shell
|
mit
| 1,401 |
docker build -t udid .
|
chedabob/whatismyudid
|
build.sh
|
Shell
|
mit
| 24 |
#!/bin/sh
# failure
../build/examples/test3 -f=9 -f=1.0.0 -s=asdf asdf asdf > tmp.out 2>&1
if cmp -s tmp.out $srcdir/test32.out; then
exit 0
else
exit 1
fi
|
mjkoo/tclap
|
tests/test32.sh
|
Shell
|
mit
| 161 |
#!/bin/bash
set -e
rm -rf public
git clone -b master [email protected]:takus/takus.github.com.git public
hugo
cd public
git add .
git commit --allow-empty -m "Site updated at $(date -u --iso-8601=seconds)"
git push origin master
|
takus/takus.github.com
|
ci/deploy.sh
|
Shell
|
mit
| 229 |
#!/bin/bash
results="../results/inference_queries"
mkdir -p "$results"
echo -e "\tinfer([happens(e(S,A),T),subject(e(S,A),S),act(e(S,A),A)])"
cat "$results/kb.log" | tr -d '[:blank:]' | sed 's/:/\t/g' | sed 's/infer(\[happens(e(/{S\//g' | sed 's/),/,T\//g' | sed 's/,T\/subject/}\t/g' | sed 's/sit/A\/sit/g' | sed 's/stand/A\/stand/g' | cut -f1,3
|
kevinmcareavey/secproblog
|
scripts/format-inference_queries.sh
|
Shell
|
mit
| 347 |
export CLASSPATH=$CLASSPATH:/home/kim/gitspace/java/lib/dom4j-1.6.1.jar:/home/kim/gitspace/java/lib/jaxen-1.1.6.jar
|
sggdv/java
|
demo.sh
|
Shell
|
mit
| 115 |
mkdir MyIcon.iconset
sips -z 16 16 icon.png --out MyIcon.iconset/icon_16x16.png
sips -z 32 32 icon.png --out MyIcon.iconset/[email protected]
sips -z 32 32 icon.png --out MyIcon.iconset/icon_32x32.png
sips -z 64 64 icon.png --out MyIcon.iconset/[email protected]
sips -z 128 128 icon.png --out MyIcon.iconset/icon_128x128.png
sips -z 256 256 icon.png --out MyIcon.iconset/[email protected]
sips -z 256 256 icon.png --out MyIcon.iconset/icon_256x256.png
sips -z 512 512 icon.png --out MyIcon.iconset/[email protected]
sips -z 512 512 icon.png --out MyIcon.iconset/icon_512x512.png
sips -z 1024 1024 icon.png --out MyIcon.iconset/[email protected]
iconutil -c icns MyIcon.iconset
rm -R MyIcon.iconset
|
huzecong/FlowFree
|
icon/build_icns.sh
|
Shell
|
mit
| 731 |
#!/bin/bash
set -ue
BASE_URL=https://thebraithwaites.co.uk/wp-content/uploads/2016/09
DL_DIR=world
WORLD_DIR=~/.minecraft/games/com.mojang/minecraftWorlds/
function download_unzip(){
wget $1
unzip `basename $1`
}
mkdir -p $DL_DIR
cd $DL_DIR
download_unzip $BASE_URL/Tnt-Trouble.zip
download_unzip $BASE_URL/The-Grid-2.zip
download_unzip $BASE_URL/Canyons.zip
download_unzip $BASE_URL/Columbia-Bioshock-Infinite.zip
download_unzip $BASE_URL/Deep-Ocean.zip
download_unzip $BASE_URL/Dense-Forest.zip
download_unzip $BASE_URL/Lava-Citadel.zip
download_unzip $BASE_URL/Level-Screenshots.zip
download_unzip $BASE_URL/Nether-Nightmare.zip
download_unzip $BASE_URL/Paradise-Cove.zip
download_unzip $BASE_URL/Plaza.zip
download_unzip $BASE_URL/prisonEscape.zip
download_unzip $BASE_URL/Temple-of-Notch.zip
download_unzip $BASE_URL/The-Grid.zip
download_unzip $BASE_URL/The-Island.zip
download_unzip $BASE_URL/The-N.R.A.M-world-save.zip
download_unzip $BASE_URL/The-Underground.zip
download_unzip $BASE_URL/Volcano.zip
download_unzip $BASE_URL/Canyons.zip
download_unzip $BASE_URL/MW3-Seatown.zip
download_unzip $BASE_URL/Hamster-Escape-Part-1.zip
cp -rf * $WORLD_DIR
exit 0
|
takahasi/utility
|
minecraft-pi/get-world.sh
|
Shell
|
mit
| 1,176 |
#!/bin/bash
[[ ! -e /.dockerenv ]] && [[ ! -e /.dockerinit ]] && exit 0
apt-get update -yqq
apt-get install -yqq git python3 python3-dev python3-pip wget
|
number13dev/mincloud
|
ci/docker_install.sh
|
Shell
|
mit
| 155 |
#!/bin/sh
xrdb -merge $HOME/.Xresources
xrandr --dpi 96
xrandr --output eDP1 --mode 2560x1440 --pos 1920x0 --rotate normal --output DP1 --off --output DP2-1 --off --output DP2-2 --mode 1920x1200 --pos 0x0 --rotate normal --output DP2-3 --off --output HDMI2 --off --output HDMI1 --off --output DP2 --off
|
chrcoe/dotfiles
|
screenlayout.symlink/docked-single_DP2-2.sh
|
Shell
|
mit
| 303 |
#!/bin/sh
Service_Dir=~/.kde/share/kde4/services/ServiceMenus/
mkdir $Service_Dir -p
cd $Service_Dir
wget -c https://foreachsam.github.io/book-editor-atom/book/resource/integration/atom.desktop
|
foreachsam/book-editor-atom
|
book/resource/integration/atom-install.sh
|
Shell
|
cc0-1.0
| 195 |
export JAVA_PATH=/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
if [ ! -f $JAVA_PATH ]
then
export JAVA_PATH=/usr/lib/jvm/java-8-oracle/bin/java
fi
CORENLP_OPTIONS="-parse.flags \" -makeCopulaHead\"" CORENLP=stanford-corenlp-full-* python3 -m corenlp --memory 1g -p 8999 &
for job in `jobs -p`; do echo $job; wait $job; done
|
ProjetPP/Deployment
|
run_corenlp.sh
|
Shell
|
cc0-1.0
| 336 |
#!/bin/bash
if [ -z $1 ]; then
echo "Usage: $0 <dbr_file>"
else
head -1 $1 | tr '","' '\n' | grep -v '^ *$' | awk '{print NR" "$0}'
fi
exit
|
bmarcondes/toolbox
|
dbr_colums.sh
|
Shell
|
gpl-2.0
| 145 |
#!/bin/bash
echo "gen_misc.sh version 20150511"
echo ""
echo "Please follow below steps(1-5) to generate specific bin(s):"
echo "STEP 1: choose boot version(0=boot_v1.1, 1=boot_v1.2+, 2=none)"
echo "enter(0/1/2, default 2):"
read input
if [ -z "$input" ]; then
boot=none
elif [ $input == 0 ]; then
boot=old
elif [ $input == 1 ]; then
boot=new
else
boot=none
fi
echo "boot mode: $boot"
echo ""
echo "STEP 2: choose bin generate(0=eagle.flash.bin+eagle.irom0text.bin, 1=user1.bin, 2=user2.bin)"
echo "enter (0/1/2, default 0):"
read input
if [ -z "$input" ]; then
if [ $boot != none ]; then
boot=none
echo "ignore boot"
fi
app=0
echo "generate bin: eagle.flash.bin+eagle.irom0text.bin"
elif [ $input == 1 ]; then
if [ $boot == none ]; then
app=0
echo "choose no boot before"
echo "generate bin: eagle.flash.bin+eagle.irom0text.bin"
else
app=1
echo "generate bin: user1.bin"
fi
elif [ $input == 2 ]; then
if [ $boot == none ]; then
app=0
echo "choose no boot before"
echo "generate bin: eagle.flash.bin+eagle.irom0text.bin"
else
app=2
echo "generate bin: user2.bin"
fi
else
if [ $boot != none ]; then
boot=none
echo "ignore boot"
fi
app=0
echo "generate bin: eagle.flash.bin+eagle.irom0text.bin"
fi
echo ""
echo "STEP 3: choose spi speed(0=20MHz, 1=26.7MHz, 2=40MHz, 3=80MHz)"
echo "enter (0/1/2/3, default 2):"
read input
if [ -z "$input" ]; then
spi_speed=40
elif [ $input == 0 ]; then
spi_speed=20
elif [ $input == 1 ]; then
spi_speed=26.7
elif [ $input == 3 ]; then
spi_speed=80
else
spi_speed=40
fi
echo "spi speed: $spi_speed MHz"
echo ""
echo "STEP 4: choose spi mode(0=QIO, 1=QOUT, 2=DIO, 3=DOUT)"
echo "enter (0/1/2/3, default 0):"
read input
if [ -z "$input" ]; then
spi_mode=QIO
elif [ $input == 1 ]; then
spi_mode=QOUT
elif [ $input == 2 ]; then
spi_mode=DIO
elif [ $input == 3 ]; then
spi_mode=DOUT
else
spi_mode=QIO
fi
echo "spi mode: $spi_mode"
echo ""
echo "STEP 5: choose spi size and map"
echo " 0= 512KB( 256KB+ 256KB)"
echo " 2=1024KB( 512KB+ 512KB)"
echo " 3=2048KB( 512KB+ 512KB)"
echo " 4=4096KB( 512KB+ 512KB)"
echo " 5=2048KB(1024KB+1024KB)"
echo " 6=4096KB(1024KB+1024KB)"
echo "enter (0/2/3/4/5/6, default 0):"
read input
if [ -z "$input" ]; then
spi_size_map=0
echo "spi size: 512KB"
echo "spi ota map: 256KB + 256KB"
elif [ $input == 2 ]; then
spi_size_map=2
echo "spi size: 1024KB"
echo "spi ota map: 512KB + 512KB"
elif [ $input == 3 ]; then
spi_size_map=3
echo "spi size: 2048KB"
echo "spi ota map: 512KB + 512KB"
elif [ $input == 4 ]; then
spi_size_map=4
echo "spi size: 4096KB"
echo "spi ota map: 512KB + 512KB"
elif [ $input == 5 ]; then
spi_size_map=5
echo "spi size: 2048KB"
echo "spi ota map: 1024KB + 1024KB"
elif [ $input == 6 ]; then
spi_size_map=6
echo "spi size: 4096KB"
echo "spi ota map: 1024KB + 1024KB"
else
spi_size_map=0
echo "spi size: 512KB"
echo "spi ota map: 256KB + 256KB"
fi
echo ""
touch user/user_main.c
echo ""
echo "start..."
echo ""
echo "make COMPILE=gcc BOOT=$boot APP=$app SPI_SPEED=$spi_speed SPI_MODE=$spi_mode SPI_SIZE_MAP=$spi_size_map"
make COMPILE=gcc BOOT=$boot APP=$app SPI_SPEED=$spi_speed SPI_MODE=$spi_mode SPI_SIZE_MAP=$spi_size_map
|
nvl1109/esp_iot_device
|
gen_misc.sh
|
Shell
|
gpl-2.0
| 3,387 |
#!/bin/sh
# Copyright (C) 2010-2012 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
. lib/inittest
test -e LOCAL_LVMPOLLD && skip
extend() {
lvextend --use-policies --config "activation { snapshot_autoextend_threshold = $1 }" $vg/snap
}
write_() {
dd if=/dev/zero of="$DM_DEV_DIR/$vg/snap" bs=1k count=$2 seek=$1 oflag=direct
}
percent_() {
get lv_field $vg/snap snap_percent | cut -d. -f1
}
wait_for_change_() {
# dmeventd only checks every 10 seconds :(
for i in $(seq 1 25) ; do
test "$(percent_)" != "$1" && return
sleep 1
done
return 1 # timeout
}
aux prepare_dmeventd
aux prepare_vg 2
lvcreate -aey -L16M -n base $vg
lvcreate -s -L4M -n snap $vg/base
write_ 0 1000
test 24 -eq $(percent_)
lvchange --monitor y $vg/snap
write_ 1000 1700
pre=$(percent_)
# Normally the usage should be ~66% here, however on slower systems
# dmeventd could be actually 'fast' enough to have COW already resized now
# so mark test skipped if we are bellow 50% by now
test $pre -gt 50 || skip
wait_for_change_ $pre
test $pre -gt $(percent_)
# check that a second extension happens; we used to fail to extend when the
# utilisation ended up between THRESH and (THRESH + 10)... see RHBZ 754198
# (the utilisation after the write should be 57 %)
write_ 2700 2000
pre=$(percent_)
# Mark test as skipped if already resized...
test $pre -gt 70 || skip
wait_for_change_ $pre
test $pre -gt $(percent_)
vgremove -f $vg
|
jh80chung/lvm2
|
test/shell/lvextend-snapshot-dmeventd.sh
|
Shell
|
gpl-2.0
| 1,802 |
for (( ;; ))
do
sleep 1
cat /sys/class/gpio/gpio12/value
done
|
ElvisLouis/code
|
work/project/Edison/elvis/work/cpp/listen.sh
|
Shell
|
gpl-2.0
| 64 |
#!/usr/bin/env bash
#pragma repo-format darcs-2
. lib
mkdir future
cd future
darcs init
touch titi
darcs add titi
darcs record -am titi
cat > _darcs/format <<EOF
hashed|gobbledygook
darcs-2
EOF
cat _darcs/format
cd ..
if grep darcs-1 .darcs/defaults; then
exit 200
fi
# get future repo: should be ok
darcs get future temp1
cd temp1
darcs changes
touch toto
darcs add toto
darcs record -am 'blah'
cd ..
rm -rf temp1 future
|
DavidAlphaFox/darcs
|
tests/issue1978.sh
|
Shell
|
gpl-2.0
| 433 |
convert images/OCS-467.png -crop 1711x5122+0+0 +repage images/OCS-467-A.png
convert images/OCS-467.png -crop 1735x5122+1711+0 +repage images/OCS-467-B.png
#/OCS-467.png
#
#
#
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/middlesplit.OCS-467.sh
|
Shell
|
gpl-2.0
| 175 |
USERNAME=gor
HOSTNAME=myarch
# Check if it is really UEFI, the output should lists the UEFI variables
efivar -l
# Check for connection
output=$(./ping_test.sh | tail -1)
if [ "$output" == "ip is up" ]; then
echo -e "connection is up"
else
echo -e "connection is down. trying to acquire IP....."
dhcpcd
fi
# Partitioning. Use gdisk: it's an fdisk equilevant for GPT (GUID partition Table) which you need for UEFI boot.
##gdisk /dev/sda < partition.txt
# Create filesystems
mkfs.ext4 /dev/sda2
##mkfs.ext4 /dev/sda3
mkfs.fat -F32 /dev/sda1
# Mount the filesystems
mount /dev/sda2 /mnt
mkdir /mnt/boot
##mkdir /mnt/home
mount /dev/sda1 /mnt/boot
##mount /dev/sda3 /mnt/home
# And install the system
pacstrap /mnt base base-devel
##pacman-key --init && pacman-key --populate archlinux
##pacman -Sy
# Set up fstab
genfstab -U -p /mnt >> /mnt/etc/fstab
# Configuring the system
arch-chroot /mnt /bin/bash
# Create locale file
# Remove the "#" in front of the locale(s) you need, en_US.UTF-8 in my case
nano /etc/locale.gen
# Save the file and generate the locales
locale-gen
# locale.conf
echo LANG=en_US.UTF-8 > /etc/locale.conf
export LANG=en_US.UTF-8
# Set up the hostname (edit 2nd line to customize)
echo "setting hostname as $HOSTNAME"
echo $HOSTNAME> /etc/hostname
#install bootloader
#pacman -S grub-efi-x86_64
#mkdir -p /boot/efi
#mount -t vfat /dev/sda1 /boot/efi
#modprobe dm-mod
#grub-install --target=x86_64-efi --efi-directory=/boot/efi --bootloader-id=arch_grub --boot-directory=/boot/efi/EFI --recheck --debug
#mkdir -p /boot/efi/EFI/grub/locale
#cp /usr/share/locale/en\@quot/LC_MESSAGES/grub.mo /boot/efi/EFI/grub/locale/en.mo
#grub-mkconfig -o /boot/efi/EFI/grub/grub.cfg
# Install the bootloader
# The mount command will most likely result in an error due to it being loaded already
mount -t efivarfs efivarfs /sys/firmware/efi/efivars
pacman -S gummiboot
gummiboot install
# Copy configuration file to add an entry for Arch Linux to the gummiboot manager
cp arch.conf /boot/loader/entries/
# Make sure we have a network connection after we reboot
systemctl enable dhcpcd.service
# Set root password
echo "set root password"
passwd
# Create a user (edit the first line)
useradd -m -g users -G wheel -s /bin/bash $USERNAME
# Create a password for user
echo "set password for $USERNAME"
passwd $USERNAME
# Add user to the sudoers group
echo '$USERNAME ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
# Exit out of the chroot, unmount and reboot
exit
#umount -R /mnt
#reboot
|
keeper575/arch-linux-uefi-installer
|
install.sh
|
Shell
|
gpl-2.0
| 2,506 |
#!/bin/sh
#
# Build a fat binary on Mac OS X, thanks Ryan!
# Number of CPUs (for make -j)
NCPU=`sysctl -n hw.ncpu`
if test x$NJOB = x; then
NJOB=$NCPU
fi
# SDK path
if test x$SDK_PATH = x; then
SDK_PATH=/Developer/SDKs
fi
# Generic, cross-platform CFLAGS you always want go here.
CFLAGS="-O3 -g -pipe"
# We dynamically load X11, so using the system X11 headers is fine.
BASE_CONFIG_FLAGS="--build=`uname -p`-apple-darwin \
--x-includes=/usr/X11R6/include --x-libraries=/usr/X11R6/lib"
# PowerPC 32-bit compiler flags
CONFIG_PPC="--host=powerpc-apple-darwin"
CC_PPC="gcc-4.0"
CXX_PPC="g++-4.0"
BUILD_FLAGS_PPC="-arch ppc -mmacosx-version-min=10.4"
# Intel 32-bit compiler flags
CONFIG_X86="--host=i386-apple-darwin"
CC_X86="gcc"
CXX_X86="g++"
BUILD_FLAGS_X86="-arch i386 -mmacosx-version-min=10.4"
# Intel 64-bit compiler flags
CONFIG_X64="--host=x86_64-apple-darwin"
CC_X64="gcc"
CXX_X64="g++"
BUILD_FLAGS_X64="-arch x86_64 -mmacosx-version-min=10.6"
#
# Find the configure script
#
srcdir=`dirname $0`/..
srcdir=`cd $srcdir && pwd`
auxdir=$srcdir/build-scripts
cd $srcdir
allow_ppc="yes"
which gcc-4.0 >/dev/null 2>/dev/null
if [ "x$?" = "x1" ]; then
#echo "WARNING: Can't find gcc-4.0, which means you don't have Xcode 3."
#echo "WARNING: Therefore, we can't do PowerPC support."
allow_ppc="no"
fi
#
# Figure out which phase to build:
# all,
# configure, configure-ppc, configure-x86, configure-x64
# make, make-ppc, make-x86, make-x64, merge
# install
# clean
if test x"$1" = x; then
phase=all
else
phase="$1"
fi
case $phase in
all)
configure_ppc="$allow_ppc"
configure_x86="yes"
configure_x64="yes"
make_ppc="$allow_ppc"
make_x86="yes"
make_x64="yes"
merge="yes"
;;
configure)
configure_ppc="$allow_ppc"
configure_x86="yes"
configure_x64="yes"
;;
configure-ppc)
configure_ppc="$allow_ppc"
;;
configure-x86)
configure_x86="yes"
;;
configure-x64)
configure_x64="yes"
;;
make)
make_ppc="$allow_ppc"
make_x86="yes"
make_x64="yes"
merge="yes"
;;
make-ppc)
make_ppc="$allow_ppc"
;;
make-x86)
make_x86="yes"
;;
make-x64)
make_x64="yes"
;;
merge)
merge="yes"
;;
install)
install_bin="yes"
install_hdrs="yes"
install_lib="yes"
install_data="yes"
;;
install-bin)
install_bin="yes"
;;
install-hdrs)
install_hdrs="yes"
;;
install-lib)
install_lib="yes"
;;
install-data)
install_data="yes"
;;
clean)
clean_ppc="yes"
clean_x86="yes"
clean_x64="yes"
;;
clean-ppc)
clean_ppc="yes"
;;
clean-x86)
clean_x86="yes"
;;
clean-x64)
clean_x64="yes"
;;
*)
echo "Usage: $0 [all|configure[-ppc|-x86|-x64]|make[-ppc|-x86|-x64]|merge|install|clean[-ppc|-x86|-x64]]"
exit 1
;;
esac
case `uname -p` in
*86)
native_path=x86
;;
*powerpc)
native_path=ppc
;;
x86_64)
native_path=x64
;;
*)
echo "Couldn't figure out native architecture path"
exit 1
;;
esac
#
# Create the build directories
#
for dir in build build/ppc build/x86 build/x64; do
if test -d $dir; then
:
else
mkdir $dir || exit 1
fi
done
#
# Build the PowerPC 32-bit binary
#
if test x$configure_ppc = xyes; then
(cd build/ppc && \
sh ../../configure $BASE_CONFIG_FLAGS $CONFIG_PPC CC="$CC_PPC" CXX="$CXX_PPC" CFLAGS="$CFLAGS $BUILD_FLAGS_PPC $CFLAGS_PPC" LDFLAGS="$BUILD_FLAGS_PPC $LFLAGS_PPC") || exit 2
fi
if test x$make_ppc = xyes; then
(cd build/ppc && make -j$NJOB) || exit 3
fi
#
# Build the Intel 32-bit binary
#
if test x$configure_x86 = xyes; then
(cd build/x86 && \
sh ../../configure $BASE_CONFIG_FLAGS $CONFIG_X86 CC="$CC_X86" CXX="$CXX_X86" CFLAGS="$CFLAGS $BUILD_FLAGS_X86 $CFLAGS_X86" LDFLAGS="$BUILD_FLAGS_X86 $LFLAGS_X86") || exit 2
fi
if test x$make_x86 = xyes; then
(cd build/x86 && make -j$NJOB) || exit 3
fi
#
# Build the Intel 64-bit binary
#
if test x$configure_x64 = xyes; then
(cd build/x64 && \
sh ../../configure $BASE_CONFIG_FLAGS $CONFIG_X64 CC="$CC_X64" CXX="$CXX_X64" CFLAGS="$CFLAGS $BUILD_FLAGS_X64 $CFLAGS_X64" LDFLAGS="$BUILD_FLAGS_X64 $LFLAGS_X64") || exit 2
fi
if test x$make_x64 = xyes; then
(cd build/x64 && make -j$NJOB) || exit 3
fi
#
# Combine into fat binary
#
if test x$merge = xyes; then
output=.libs
sh $auxdir/mkinstalldirs build/$output
cd build
target=`find . -mindepth 4 -maxdepth 4 -type f -name '*.dylib' | head -1 | sed 's|.*/||'`
(lipo -create -o $output/$target `find . -mindepth 4 -maxdepth 4 -type f -name "*.dylib"` &&
ln -sf $target $output/libSDL2.dylib &&
lipo -create -o $output/libSDL2.a */build/.libs/libSDL2.a &&
cp $native_path/build/.libs/libSDL2.la $output &&
cp $native_path/build/.libs/libSDL2.lai $output &&
cp $native_path/build/libSDL2.la . &&
lipo -create -o libSDL2main.a */build/libSDL2main.a &&
echo "Build complete!" &&
echo "Files can be found in the build directory.") || exit 4
cd ..
fi
#
# Install
#
do_install()
{
echo $*
$* || exit 5
}
if test x$prefix = x; then
prefix=/usr/local
fi
if test x$exec_prefix = x; then
exec_prefix=$prefix
fi
if test x$bindir = x; then
bindir=$exec_prefix/bin
fi
if test x$libdir = x; then
libdir=$exec_prefix/lib
fi
if test x$includedir = x; then
includedir=$prefix/include
fi
if test x$datadir = x; then
datadir=$prefix/share
fi
if test x$mandir = x; then
mandir=$prefix/man
fi
if test x$install_bin = xyes; then
do_install sh $auxdir/mkinstalldirs $bindir
do_install /usr/bin/install -c -m 755 build/$native_path/sdl2-config $bindir/sdl2-config
fi
if test x$install_hdrs = xyes; then
do_install sh $auxdir/mkinstalldirs $includedir/SDL2
for src in $srcdir/include/*.h; do \
file=`echo $src | sed -e 's|^.*/||'`; \
do_install /usr/bin/install -c -m 644 $src $includedir/SDL2/$file; \
done
do_install /usr/bin/install -c -m 644 $srcdir/include/SDL_config_macosx.h $includedir/SDL2/SDL_config.h
fi
if test x$install_lib = xyes; then
do_install sh $auxdir/mkinstalldirs $libdir
do_install sh build/$native_path/libtool --mode=install /usr/bin/install -c build/libSDL2.la $libdir/libSDL2.la
do_install /usr/bin/install -c -m 644 build/libSDL2main.a $libdir/libSDL2main.a
do_install ranlib $libdir/libSDL2main.a
fi
if test x$install_data = xyes; then
do_install sh $auxdir/mkinstalldirs $datadir/aclocal
do_install /usr/bin/install -c -m 644 $srcdir/sdl2.m4 $datadir/aclocal/sdl2.m4
fi
#
# Clean up
#
do_clean()
{
echo $*
$* || exit 6
}
if test x$clean_ppc = xyes; then
do_clean rm -r build/ppc
fi
if test x$clean_x86 = xyes; then
do_clean rm -r build/x86
fi
if test x$clean_x64 = xyes; then
do_clean rm -r build/x64
fi
|
ZiberLTD/windows
|
sdl/build-scripts/fatbuild.sh
|
Shell
|
gpl-2.0
| 7,184 |
pip uninstall vishap -y
rm build -rf
rm dist -rf
rm builddocs.zip
rm src/vishap.egg-info -rf
|
barseghyanartur/vishap
|
scripts/uninstall.sh
|
Shell
|
gpl-2.0
| 93 |
#!/bin/bash
dir_include=/home/lijiaying/Research/GitHub/ZILUv2/scripts
. $dir_include"/include.sh"
Nv=0
Nb=0
home_path=/home/lijiaying/Research/GitHub/ZILUv2
dir_cfg="cfg"
dir_test="test"
dir_temp="tmp"
dir_tool="tools"
dir_parser="parser"
prefix=$1
mkdir -p tmp
file_cfg=$prefix".cfg"
file_cpp=$prefix".cpp"
file_var=$prefix".var"
file_inv=$prefix".inv"
path_cfg=$dir_cfg"/"$file_cfg
path_cpp=$dir_test"/"$file_cpp
path_var=$dir_temp"/"$file_var
path_inv=$dir_temp"/"$file_inv
prefix_path_inv=$dir_temp"/"$prefix
if [ $# -lt 1 ]; then
echo "./build_project.sh needs more parameters"
echo "./build_project.sh cofig_prefix"
echo "try it again..."
exit 1
fi
#**********************************************************************************************
# Learning phase
#**********************************************************************************************
##########################################################################
# Prepare the target loop program
##########################################################################
echo -n -e $blue"Converting the given config file to a valid cplusplus file..."$normal
cd $dir_parser
make
make clean
cd ..
cat $path_cfg | $dir_parser"/parser" -t 2 -o $path_cpp -v $path_var -i $prefix_path_inv -c $2 -N $dir_temp"/"$prefix".nvnb" -V
read -r Nv Nb < $dir_temp"/"$prefix".nvnb"
i=0
while [ $i -lt $Nb ]; do
cat $path_cfg | $dir_parser"/parser" -t 3 -o $dir_temp"/"$prefix"_b"$i".cpp" -b $i
i=$(($i+1))
done
#cat $path_cfg | $dir_tool"/parser/parser" -t 3 -o $dir_tmp"/"$prefix"_b0.cpp" -b 0
#cat $path_cfg | $dir_tool"/parser/parser" -t 3 -o $dir_tmp"/"$prefix"_b1.cpp" -b 1
#cat test/new2.cfg| ./parser -t 2 -c new2.ce -o new2.cpp -v new2.var -i new2.inv
#Nv=$?
echo -e $green$bold"[DONE]"$normal
##########################################################################
# Generate CMakeLists from cmake.base and Nv value
##########################################################################
echo -n -e $blue"Generating CMakeLists file for further construction..."$normal
cmakefile="./CMakeLists.txt"
echo "cmake_minimum_required (VERSION 2.8)" > $cmakefile
#echo "set(Nv "$Nv")" >> $cmakefile
echo "set(Nv "$Nv")" >> $cmakefile
echo "set(Nb "$Nb")" >> $cmakefile
echo "set(Project "$prefix")" >> $cmakefile
echo "set(ProjectHome "$(pwd)")" >> $cmakefile
#echo $line
#echo ${line[1]}" --- "${line[2]}
#echo "set(Nv "${line[1]}")" >> $cmakefile
#echo "set(Nb "${line[2]}")" >> $cmakefile
#cat Nv_Nb >> $cmakefile
#if [ $# -ge 4 ]; then
# if [ $4 -eq 1 ]; then
# echo "add_definitions(-D__QAS_POSITIVE)" >> $cmakefile
# fi
# if [ $4 -eq -1 ]; then
# echo "add_definitions(-D__QAS_NEGATIVE)" >> $cmakefile
# fi
# if [ $4 -eq 0 ]; then
# echo "add_definitions(-D__SELECTIVE_SAMPLING_ENABLED)" >> $cmakefile
# fi
#fi
cat ./cmake.in >> $cmakefile
echo "add_executable("$prefix" "$path_cpp" \${DIR_SRCS} \${HEADER})" >> $cmakefile
echo "target_link_libraries("$prefix" \${Z3_LIBRARY})" >> $cmakefile
echo "target_link_libraries("$prefix" \${GSL_LIBRARIES})" >> $cmakefile
echo -e $green$bold"[DONE]"$normal
##########################################################################
# Build the project
##########################################################################
echo -e $blue"Build the project..."$normal
cd build
#rm -rf *
cmake .. > /dev/null
make $prefix
if [ $? -ne 0 ]; then
echo -e $red$bold"[FAIL]make error, contact developer to fix project source code first..."$normal
cd ..
exit 1
fi
#echo -e $green$bold"[DONE]"$normal
cd ..
exit 0
|
lijiaying/ZILUv2
|
backup/build.sh
|
Shell
|
gpl-2.0
| 3,561 |
#!/bin/bash
##
## Gebruik: sg-extract.sh bestand xmltag
## Bijvoorbeeld: sg-extract.sh polissen.xml kenteken
##
txtrst=$(tput sgr0) # Text reset
txtbld=$(tput bold) # Bold
START=$(date +%s)
STARTPP=$(date +%s)
cat << "EOF"
_ _
_____ _| |_ _ __ __ _ ___| |_
/ _ \ \/ / __| '__/ _` |/ __| __|
| __/> <| |_| | | (_| | (__| |_
\___/_/\_\\__|_| \__,_|\___|\__|
EOF
echo
echo
echo
echo "${txtbld}Extract it!${txtrst}"
echo "-------------------------------"
echo "pretty printing ${txtbld}$file${txtrst}"
echo "afhankelijk van de bestandsgrootte kan dit lang duren..."
xmllint --format $1 > PP_$1
ENDPP=$(date +%s)
DIFF=$(( $ENDPP - $STARTPP ))
echo "klaar met pretty printing na $DIFF seconden"
grep "$2" PP_$1 > "$2"_${1%.*}.txt
sort "$2"_${1%.*}.txt | uniq -dc > meerdere_"$2"_${1%.*}.txt
cat "$2"_${1%.*}.txt | sort > gesorteerd_"$2"_${1%.*}.txt
cat "$2"_${1%.*}.txt | sort > meerdere_gesorteerd_"$2"_${1%.*}.txt
cat "$2"_${1%.*}.txt | (while read LINE; do echo -e "${#LINE}\t$LINE"; done) | sort -rn | cut -f 2- > sort_op_lengte_"$2"_${1%.*}.txt
sed -i -e '1i/De tag <'$2'> gefilterd uit '${1}'\' "$2"_${1%.*}.txt
sed -i -e '1i/De tag <'$2'> gefilterd uit '${1}' met daarvoor het aantal keer dat de tag voorkomt in het bestand\' meerdere_"$2"_${1%.*}.txt
sed -i -e '1i/De tag <'$2'> gefilterd uit '${1}' gesorteerd op alfabet\' gesorteerd_"$2"_${1%.*}.txt
sed -i -e '1i/De tag <'$2'> gefilterd uit '${1}'\ die meerder keren voorkomen, gesorteerd op aantal keer dat de tag voorkomt in het bestand' meerdere_gesorteerd_"$2"_${1%.*}.txt
sed -i -e '1i/De tag <'$2'> gefilterd uit '${1}'\ gesorteerd op lengte' sort_op_lengte_"$2"_${1%.*}.txt
#rm PP_${1}
echo -e "-------------------------------"
echo -e "Klaar met filteren bestand"
echo -e "-------------------------------"
echo -e "Lijst van alle tags genaamd ${txtbld}'$2'${txtrst}"
echo -e "staan in bestand ${txtbld}'$2_${1%.*}.txt'${txtrst}"
echo -e "-------------------------------"
echo -e "Lijst van alle tags genaamd ${txtbld}'$2'${txtrst} die vaker dan één keer voorkomen"
echo -e "staan in bestand ${txtbld}'meerdere_$2_${1%.*}.txt'${txtrst},"
echo -e "met voor de tag het aantal keer dat deze voorkomt"
echo -e "In het bestand ${txtbld}'meerdere_gesorteerd_$2_${1%.*}.txt'${txtrst} staan de tags"
echo -e "gesorteerd op volgorde van aantal in bestand"
echo -e "-------------------------------"
echo -e "-------------------------------"
echo -e "Klaar met verwerken van $COUNTER bestanden"
echo -e "-------------------------------"
|
wtrdk/sg-scripts
|
sg-extract.sh
|
Shell
|
gpl-2.0
| 2,583 |
#!/bin/bash -
#===============================================================================
#
# FILE: return-big-number.sh
#
# USAGE: ./return-big-number.sh
#
# DESCRIPTION: 获得大整数"返回值"
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: YOUR NAME (),
# ORGANIZATION:
# CREATED: 2015/05/03 18:00
# REVISION: ---
#===============================================================================
set -o nounset # Treat unset variables as an error
Return_val= # 用于保存函数特大返回值的全局变量
alt_return_test ()
{
fvar=$1
Return_val=$fvar
return # 返回0(成功).
}
alt_return_test 1
echo $? # 0
echo "return value = $Return_val" # 1
alt_return_test 256
echo "return value = $Return_val" # 256
alt_return_test 257
echo "return value = $Return_val" # 257
alt_return_test 25701
echo "return value = $Return_val" # 25701
exit 0
|
yaowenqiang/bash
|
function/return-big-number.sh
|
Shell
|
gpl-2.0
| 1,026 |
gst-launch-1.0 -e \
decklinkvideosrc mode=1080p30 connection=sdi device-number=4 \
! video/x-raw, width=1920, height=1080 \
! videoconvert \
! videoscale \
! video/x-raw, width=1920, height=1080 \
! x264enc bframes=0 key-int-max=60 tune=zerolatency pass=cbr bitrate=4000 speed-preset=ultrafast \
! tee name=t \
! queue \
! flvmux streamable=true name=mux \
! rtmpsink location="rtmp://live-lhr.twitch.tv/app/live_49236445_gfLQWmuxEpuo7pWgJlokyBInEB12tJ" \
decklinkaudiosrc connection=embedded device-number=4 \
! audioconvert \
! avenc_aac compliance=1 bitrate=160000 \
! mux. \
# t. \
# ! queue \
# ! avdec_h264 \
# ! videoconvert \
# ! xvimagesink
#! filesink location=test.flv
#! tee name=t \
#! queue \
#! flvdemux \
#! avdec_h264 \
#! xvimagesink
#
# ! vaapipostproc scale-method=hq,
# ! video/x-raw, width=1920, height=1080
# ! vaapih264enc init-qp=23 keyframe-period=120
#! rtmpsink location="rtmp://live-lhr.twitch.tv/app/live_49382179_Zrbr9wIynJZrp7BOt7HzQPtX4XcqaQ?bandwidthtest=true" \
#REAL ONE ! rtmpsink location="rtmp://live-lhr.twitch.tv/app/live_49382179_Zrbr9wIynJZrp7BOt7HzQPtX4XcqaQ" \
|
gamernetwork/gutbuster
|
stream.sh
|
Shell
|
gpl-2.0
| 1,366 |
# update desktop entries
if [ -x usr/bin/update-desktop-database ]; then
usr/bin/update-desktop-database 1> /dev/null 2> /dev/null
fi
# update mime databases
if [ -x usr/bin/update-mime-database ]; then
usr/bin/update-mime-database usr/share/mime 1> /dev/null 2> /dev/null
fi
|
mmwanga/gsb-build-system
|
src/go/abiword/doinst.sh
|
Shell
|
gpl-2.0
| 281 |
#! /usr/bin/env bash
#
# Copyright (C) 2009 - 2019 Internet Neutral Exchange Association Company Limited By Guarantee.
# All Rights Reserved.
#
# This file is part of IXP Manager.
#
# IXP Manager is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, version 2.0 of the License.
#
# IXP Manager is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License v2.0
# along with IXP Manager. If not, see:
#
# http://www.gnu.org/licenses/gpl-2.0.html
# Example script for updating TACACS configuration from IXP Manager
#
# **ALTER FOR YOUR OWN ENVIRONMENT**
# See: http://docs.ixpmanager.org/features/tacacs/
# Target configuration file:
DEST="/usr/local/etc/tac_plus.conf"
KEY="your-ixp-manager-api-key"
URL="https://ixp.example.com/api/v4/user/formatted"
USERS="alice,joe,mike,mary"
# paths to utilities / init.d scripts
CURL="/usr/local/bin/curl"
TACACS="/usr/local/sbin/tac_plus"
TACACSRC="/usr/local/etc/rc.d/tac_plus"
# path to this script:
pushd `dirname $0` > /dev/null
SCRIPTPATH=`pwd`
popd > /dev/null
# Header config for your TACACS file:
cat >${DEST}.$$ <<END_CONF
#
# IXP Manager TACACS+ Config file
#
# Generated by ${SCRIPTPATH}/$(basename $0) at $(date)
#
# You TACACS key:
key = "soopersecret"
accounting file = /var/log/tac_plus/tac_plus.log
END_CONF
# Now pull users from IXP Manager:
cmd="${CURL} --fail -s --data \"bcrypt=2a&users=${USERS}\" -X POST \
-H \"Content-Type: application/x-www-form-urlencoded\" \
-H \"X-IXP-Manager-API-Key: ${KEY}\" ${URL} >>${DEST}.$$"
eval $cmd
if [[ $? -ne 0 ]]; then
echo "ERROR: non-zero return from API call to update TACACS+ config file"
rm -f ${DEST}.$$
exit
fi
# Footer config for your TACACS file:
cat >>${DEST}.$$ <<END_CONF
# config for group 'admin'
group = admin {
default service = permit
service = exec {
priv-lvl=15
}
}
# RANCID user for RANCID / Oxidized scripts:
user = rancid {
# ...
}
END_CONF
# Has the config file changed?
cat ${DEST} | egrep -v '^#.*$' >${DEST}.filtered
cat ${DEST}.$$ | egrep -v '^#.*$' >${DEST}.$$.filtered
diff ${DEST}.filtered ${DEST}.$$.filtered >/dev/null
DIFF=$?
rm -f ${DEST}.filtered ${DEST}.$$.filtered
if [[ $DIFF -eq 0 ]]; then
rm -f ${DEST}.$$
exit 0;
fi
# It has - let's make sure the new one parses okay:
$TACACS -P -C ${DEST}.$$ &>/dev/null
if [[ $? -ne 0 ]]; then
echo "ERROR: non-zero return from config check / parse on TACACS+ config file"
rm -f ${DEST}.$$
exit
fi
mv ${DEST}.$$ ${DEST}
# Parsed okay - get PID for tac_plus and have it reload the config (or else start it):
PID=$( ps -ax | grep tac_plus | grep -v grep | awk '{print $1}' )
if [[ -n $PID ]]; then
# reload the config
kill -s SIGUSR1 $PID
exit 0
fi
# not running?
$TACACSRC stop >/dev/null
$TACACSRC start >/dev/null
|
inex/IXP-Manager
|
tools/runtime/tacacs/update-tacacs.sh
|
Shell
|
gpl-2.0
| 3,168 |
NDK=/Users/shutup/Documents/cocos2dx/android-ndk-r10e
SRC_PATH=/Users/shutup/Documents/player/3rd_libs/libvorbis-1.3.5
PLATFORM=$NDK/platforms/android-8/arch-arm/
PREBUILT=$NDK/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64
CROSS_PREFIX=$PREBUILT/bin/arm-linux-androideabi-
CPU=arm
PREFIX=/Users/shutup/Documents/player/3rd_libs/build/android/$CPU
OPTIMIZE_CFLAGS=""
NEED_LIBS_PATH=/Users/shutup/Documents/player/3rd_libs/build/android/arm
export PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$NEED_LIBS_PATH/lib/pkgconfig
cd $SRC_PATH
make uninstall
make clean
./configure --prefix=$PREFIX \
--with-pic \
--host=arm-linux \
--build=i686 \
--with-sysroot=$PLATFORM \
--with-ogg \
--disable-shared \
--enable-static \
CC="${CROSS_PREFIX}gcc --sysroot=$PLATFORM" \
CXX="${CROSS_PREFIX}g++ --sysroot=$PLATFORM" \
RANLIB="${CROSS_PREFIX}ranlib" \
AR="${CROSS_PREFIX}ar" \
STRIP="${CROSS_PREFIX}strip" \
NM="${CROSS_PREFIX}nm" \
CFLAGS="-O3 $OPTIMIZE_CFLAGS --sysroot=$PLATFORM" \
CXXFLAGS="-O3 $OPTIMIZE_CFLAGS --sysroot=$PLATFORM"
make -j 4&& make install
|
shutup/ffmpeg-4-android
|
alone/build_vorbis.sh
|
Shell
|
gpl-2.0
| 1,096 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux-x86
CND_CONF=Release
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=so
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/ejercicio05
OUTPUT_BASENAME=ejercicio05
PACKAGE_TOP_DIR=ejercicio05/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/ejercicio05/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/ejercicio05.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/ejercicio05.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
pdavila13/Program_C
|
ejerciciosExamen/ejercicio05/nbproject/Package-Release.bash
|
Shell
|
gpl-2.0
| 1,469 |
#! /bin/sh
# Copyright (C) 2010-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Diagnose if the autoconf input is named configure.in.
# Diagnose if both configure.in and configure.ac are present, prefer
# configure.ac.
. test-init.sh
cat >configure.ac <<EOF
AC_INIT([$me], [1.0])
AM_INIT_AUTOMAKE
AC_CONFIG_FILES([Makefile])
EOF
cat >configure.in <<EOF
AC_INIT([$me], [1.0])
AM_INIT_AUTOMAKE([an-invalid-automake-option])
AC_CONFIG_FILES([Makefile])
EOF
: >Makefile.am
$ACLOCAL 2>stderr && { cat stderr >&2; exit 1; }
cat stderr >&2
grep 'configure\.ac.*configure\.in.*both present' stderr
$ACLOCAL -Wno-error 2>stderr || { cat stderr >&2; exit 1; }
cat stderr >&2
grep 'configure\.ac.*configure\.in.*both present' stderr
grep 'proceeding.*configure\.ac' stderr
# Ensure we really proceed with configure.ac.
AUTOMAKE_fails -Werror
grep 'configure\.ac.*configure\.in.*both present' stderr
grep 'proceeding.*configure\.ac' stderr
AUTOMAKE_run -Wno-error
grep 'configure\.ac.*configure\.in.*both present' stderr
grep 'proceeding.*configure\.ac' stderr
mv -f configure.ac configure.in
AUTOMAKE_fails
grep "autoconf input.*'configure.ac', not 'configure.in'" stderr
:
|
pylam/automake
|
t/configure.sh
|
Shell
|
gpl-2.0
| 1,786 |
#!/bin/bash
# Colorize and add text parameters
red=$(tput setaf 1) # red
cya=$(tput setaf 6) # cyan
txtbld=$(tput bold) # Bold
bldred=${txtbld}$(tput setaf 1) # red
bldcya=${txtbld}$(tput setaf 6) # cyan
txtrst=$(tput sgr0) # Reset
version=1.8
DATE_START=$(date +"%s")
##########################################################################
echo -e "${bldcya}Do you want to clean up? ${txtrst} [N/y]"
read cleanup
if [ "$cleanup" == "y" ]; then
echo -e "Complete Clean? [N/y]"
read cleanoption
if [ "$cleanoption" == "n" ] || [ "$cleanoption" == "N" ]; then
echo -e "${bldcya}make clean ${txtrst}"
make clean
fi
if [ "$cleanoption" == "y" ]; then
echo -e "${bldcya}make clean mrproper ${txtrst}"
make clean mrproper
fi
fi
###########################################################################
if [ -e .version ]; then
rm .version
fi
echo -e "${bldcya}Do you want to edit the kernel version? ${txtrst} [N/y]"
read kernelversion
if [ "$kernelversion" == "y" ]; then
echo -e "${bldcya}What version has your kernel? ${txtrst}"
echo "${bldred}NUMBERS ONLY! ${txtrst}"
read number
echo $number >> .version
fi
###########################################################################
make tuna_defconfig
cp arch/arm/configs/gk_tuna_defconfig .config
sed -i s/CONFIG_LOCALVERSION=\".*\"/CONFIG_LOCALVERSION=\"-GraKernel_${version}\"/ .config
###########################################################################
echo -e "${bldcya}This could take a while .... ${txtrst}"
nice -n 10 make modules -j4 ARCH=arm
nice -n 10 make -j4 ARCH=arm
###########################################################################
if [ -e arch/arm/boot/zImage ]; then
if [ -d romswitcher ]; then
cd romswitcher
git pull
cd ..
else
git clone [email protected]:Grarak/RomSwitcher-tuna.git -b master romswitcher
fi
find -name "zImage" -exec cp -vf {} romswitcher/ \;
find -name "*.ko" -exec cp -vf {} romswitcher/boot.img-ramdisk/lib/modules/ \;
cd romswitcher
echo $version > boot.img-ramdisk/sbin/version
./build.sh
echo -e "${bldcya} Finished!! ${txtrst}"
DATE_END=$(date +"%s")
DIFF=$(($DATE_END - $DATE_START))
echo "Build completed in $(($DIFF / 60)) minute(s) and $(($DIFF % 60)) seconds."
date '+%a, %d %b %Y %H:%M:%S'
else
echo "${bldred} KERNEL DID NOT BUILD! ${txtrst}"
fi
exit 0
############################################################################
|
Grarak/grakernel-tuna
|
build.sh
|
Shell
|
gpl-2.0
| 2,568 |
#!/bin/sh
#
#
if /usr/bin/test ! "$(id -u)" = "0"
then
printf -- "ERROR: This script must be run as the root user\n"
exit 1
fi
CURDIR=$(cd $(dirname $0) && pwd)
MODULES_PATH="${CURDIR}/modules"
DEFAULT_MANIFEST="${CURDIR}/manifests/site.pp"
PUPPET_OPTS="$@"
if /usr/bin/test ! -f ${DEFAULT_MANIFEST}
then
printf -- "Manifest ${DEFAULT_MANIFEST} file doesn't exist ... exiting ..."
exit 1
fi
if /usr/bin/test -d ${MODULES_PATH}
then
PUPPET_OPTS="${PUPPET_OPTS} --modulepath ${MODULES_PATH}"
fi
/usr/bin/puppet apply ${PUPPET_OPTS} ${DEFAULT_MANIFEST}
|
fccagou/puppet-local-apply
|
puppet-apply.sh
|
Shell
|
gpl-2.0
| 576 |
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# make_link rel_path source_dir dest_dir
make_link() {
ln -s $2/$1 $3/$1
}
#########################################
# Bash
#########################################
BASH_IT_DIR="${HOME}/.bash_it"
if [ ! -d $BASH_IT_DIR ]; then
git clone --depth=1 [email protected]:ntwyman/bash-it.git $BASH_IT_DIR
fi
# make_bash_link rel_path
make_bash_link() {
make_link $1 "$DIR/bash_it" $BASH_IT_DIR
}
make_bash_link "aliases/custom.aliases.bash"
make_bash_link "completion/custom.completion.bash"
make_bash_link "custom/local.bash"
make_bash_link "custom/all_elixir_auto_complete.bash"
make_bash_link "plugins/custom.plugins.bash"
#########################################
# Emacs
#########################################
EMACS_D_DIR="$HOME/.emacs.d"
if [ ! -d $EMACS_D_DIR ]; then
git clone git://github.com/bbatsov/prelude.git $EMACS_D_DIR
fi
make_emacs_link() {
make_link $1 "$DIR/emacs.d" $EMACS_D_DIR
}
make_emacs_link "prelude-modules.el"
make_emacs_link "personal/custom.el"
#########################################
# Misc
#########################################
ln -s $DIR/tmux.conf $HOME/.tmux.conf
ln -s $DIR/inputrc $HOME/.inputrc
|
ntwyman/config
|
setup.sh
|
Shell
|
gpl-2.0
| 1,233 |
#python3 sobol_dispatcher.py -d ms-ov -n -N 8192 -MS -i 64 -r
#-net "'basic.net'"
#python3 sobol_dispatcher.py -d inv -n -N 1024 -i 32 -r -MS -o 0 0 -oe 0 0 -u 0 0 -ue 0 0 -D 0 0 -g 0 0 -e 0 0
#python3 sobol_dispatcher.py -d inv-ov -n -N 1024 -i 32 -r -MS -o 0.2 0.2 -oe 0 0 -u 0 0 -ue 0 0 -D 0 0 -g 0 0 -e 0 0
#python3 sobol_dispatcher.py -d inv-D -n -N 1024 -i 32 -r -MS -t -o 0 0 -oe 0 0 -u 0 0 -ue 0 0 -D 1 1 -g 1 1 -e 0 0
#python3 sobol_dispatcher.py -d inv-Dov -n -N 1024 -i 32 -r -MS -t -o 0.2 0.2 -oe 0 0 -u 0 0 -ue 0 0 -D 1 1 -g 1 1 -e 0 0
python3 sobol_dispatcher.py -d inv -x -n -N 1024 -i 32 -r -S -o 0 0 -oe 0 0 -u 0 0 -ue 0 0 -D 0 0 -g 0 0 -e 0 0
python3 sobol_dispatcher.py -d inv-ov -x -n -N 1024 -i 32 -r -S -o 0.2 0.2 -oe 0 0 -u 0 0 -ue 0 0 -D 0 0 -g 0 0 -e 0 0
python3 sobol_dispatcher.py -d inv-D -x -n -N 1024 -i 32 -r -S -t -o 0 0 -oe 0 0 -u 0 0 -ue 0 0 -D 1 1 -g 1 1 -e 0 0
python3 sobol_dispatcher.py -d inv-Dov -x -n -N 1024 -i 32 -r -S -t -o 0.2 0.2 -oe 0 0 -u 0 0 -ue 0 0 -D 1 1 -g 1 1 -e 0 0
maybe_sub.sh -p 16 Rscript collate.R inv
maybe_sub.sh -p 16 Rscript collate.R inv-ov
maybe_sub.sh -p 16 Rscript collate.R inv-D
maybe_sub.sh -p 16 Rscript collate.R inv-Dov
maybe_sub.sh -e -p 8 python3 learn.py ../grid/inv.dat inv
maybe_sub.sh -e -p 8 python3 learn.py ../grid/inv-ov.dat inv-ov
maybe_sub.sh -e -p 8 python3 learn.py ../grid/inv-D.dat inv-D
maybe_sub.sh -e -p 8 python3 learn.py ../grid/inv-Dov.dat inv-Dov
#python3 sobol_dispatcher.py -d inv-ov -n -s 21024 -N 7168 -i 32 -r -MS -o 0.2 0.2 -oe 0 0 -u 0 0 -ue 0 0 -D 0 0 -g 0 0 -e 0 0
#python3 sobol_dispatcher.py -d inv-D -n -s 21024 -N 7168 -i 32 -r -MS -t -o 0 0 -oe 0 0 -u 0 0 -ue 0 0 -D 1 1 -g 1 1 -e 0 0
#python3 sobol_dispatcher.py -d inv-Dov -n -s 21024 -N 7168 -i 32 -r -MS -t -o 0.2 0.2 -oe 0 0 -u 0 0 -ue 0 0 -D 1 1 -g 1 1 -e 0 0
#maybe_sub.sh -p 16 ./dispatch.sh -d testseis -M 1.235342 -Y 0.292697
|
earlbellinger/asteroseismology
|
grid/run_inv.sh
|
Shell
|
gpl-2.0
| 2,012 |
#!/bin/bash
# Populate git repositories on first run
if [ ! -d /home/patches/patches ]; then
/bin/su - patches -c 'git clone https://github.com/stefanha/patches.git'
fi
if [ ! -d /home/patches/qemu.git ]; then
/bin/su - patches -c 'git clone --mirror git://git.qemu-project.org/qemu.git'
fi
exec /usr/sbin/crond -n
|
stefanha/qemu-patches
|
run.sh
|
Shell
|
gpl-2.0
| 319 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2018 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Do a little torture test with various threads accessing files.
. ./tup.sh
cat > Tupfile << HERE
: foreach *.c |> gcc %f -o %o -pthread |> ok.exe
: ok.exe |> ./ok.exe |>
HERE
cat > ok.c << HERE
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <pthread.h>
#define NUM_THREADS 32
#define NUM_FILES 200
static void *thread(void *arg)
{
int fd;
int i;
for(i=0; i<NUM_FILES; i++) {
fd = open("foo.txt", O_RDONLY);
if(fd < 0) {
perror("foo.txt");
return (void*)-1;
}
close(fd);
fd = open("bar.txt", O_RDONLY);
if(fd < 0) {
perror("bar.txt");
return (void*)-1;
}
close(fd);
}
return NULL;
}
int main(void)
{
pthread_t pids[NUM_THREADS];
int x;
for(x=0; x<NUM_THREADS; x++) {
if(pthread_create(&pids[x], NULL, thread, NULL) != 0) {
fprintf(stderr, "pthread create error\\n");
return 1;
}
}
for(x=0; x<NUM_THREADS; x++) {
void *res;
if(pthread_join(pids[x], &res) != 0) {
fprintf(stderr, "pthread join error\\n");
return 1;
}
if(res != NULL)
return 1;
}
return 0;
}
HERE
tup touch foo.txt bar.txt
update
eotup
|
jonatanolofsson/tup
|
test/t4203-pthreads.sh
|
Shell
|
gpl-2.0
| 1,837 |
#!/bin/bash
#
# Build a tweaked libmagic from file-4.26
# Extra ocfa magic entries embedded in magic.mgc
#
# Store lib in ./libmagic/lib to build file module
# against the static lib libmagic.a
prefix=/usr/local/digiwash
tar zxf file-4.26_ocfa.tar.gz
cd file-4.26_ocfa
./configure --prefix=$prefix --exec-prefix=$prefix --includedir=$prefix/inc
make
sudo make install
cd ..
|
DNPA/OcfaModules
|
extractor/file426/build_libmagic.sh
|
Shell
|
gpl-2.0
| 381 |
#
# Copyright (C) 2010 OpenWrt.org
#
. /lib/ramips.sh
PART_NAME=firmware
RAMFS_COPY_DATA=/lib/ramips.sh
platform_check_image() {
local board=$(ramips_board_name)
local magic="$(get_magic_long "$1")"
[ "$#" -gt 1 ] && return 1
case "$board" in
3g150b|\
3g300m|\
a5-v11|\
ai-br100|\
air3gii|\
all0239-3g|\
all0256n|\
all5002|\
all5003|\
ar725w|\
asl26555|\
awapn2403|\
awm002-evb|\
awm003-evb|\
bc2|\
broadway|\
carambola|\
cf-wr800n|\
cs-qr10|\
d105|\
dap-1350|\
db-wrt01|\
dcs-930|\
dcs-930l-b1|\
dir-300-b1|\
dir-300-b7|\
dir-320-b1|\
dir-600-b1|\
dir-600-b2|\
dir-615-d|\
dir-615-h1|\
dir-620-a1|\
dir-620-d1|\
dir-810l|\
duzun-dm06|\
e1700|\
esr-9753|\
ex2700|\
f7c027|\
firewrt|\
fonera20n|\
freestation5|\
gl-mt300a|\
gl-mt300n|\
gl-mt750|\
hc5*61|\
hg255d|\
hlk-rm04|\
hpm|\
ht-tm02|\
hw550-3g|\
ip2202|\
jhr-n805r|\
jhr-n825r|\
jhr-n926r|\
linkits7688|\
linkits7688d|\
m2m|\
m3|\
m4|\
mac1200rv2|\
microwrt|\
miniembplug|\
miniembwifi|\
miwifi-mini|\
miwifi-nano|\
mlw221|\
mlwg2|\
mofi3500-3gn|\
mpr-a1|\
mpr-a2|\
mr-102n|\
mt7628|\
mzk-750dhp|\
mzk-dp150n|\
mzk-ex300np|\
mzk-ex750np|\
mzk-w300nh2|\
mzk-wdpr|\
nbg-419n|\
nbg-419n2|\
newifi-d1|\
nixcore|\
nw718|\
oy-0001|\
pbr-d1|\
pbr-m1|\
psg1208|\
psg1218|\
psr-680w|\
px-4885|\
rb750gr3|\
re6500|\
rp-n53|\
rt5350f-olinuxino|\
rt5350f-olinuxino-evb|\
rt-g32-b1|\
rt-n10-plus|\
rt-n13u|\
rt-n14u|\
rt-n15|\
rt-n56u|\
rut5xx|\
sap-g3200u3|\
sk-wb8|\
sl-r7205|\
tew-691gr|\
tew-692gr|\
tew-714tru|\
timecloud|\
tiny-ac|\
ur-326n4g|\
ur-336un|\
v22rw-2x2|\
vocore|\
vr500|\
w150m|\
w306r-v20|\
w502u|\
wf-2881|\
whr-1166d|\
whr-300hp2|\
whr-600d|\
whr-g300n|\
widora-neo|\
witi|\
wizfi630a|\
wl-330n|\
wl-330n3g|\
wl-341v3|\
wl-351|\
wli-tx4-ag300n|\
wmr-300|\
wnce2001|\
wndr3700v5|\
wr512-3gn|\
wr6202|\
wrh-300cr|\
wrtnode|\
wrtnode2r |\
wrtnode2p |\
wsr-600|\
wt1520|\
wt3020|\
wzr-agl300nh|\
x5|\
x8|\
y1|\
y1s|\
zbt-ape522ii|\
zbt-cpe102|\
zbt-wa05|\
zbt-we826|\
zbt-wg2626|\
zbt-wg3526|\
zbt-wr8305rt|\
zte-q7|\
youku-yk1)
[ "$magic" != "27051956" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
3g-6200n|\
3g-6200nl|\
br-6475nd)
[ "$magic" != "43535953" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
ar670w)
[ "$magic" != "6d000080" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
c20i|\
c50)
[ "$magic" != "03000000" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
cy-swr1100|\
dch-m225|\
dir-610-a1|\
dir-645|\
dir-860l-b1)
[ "$magic" != "5ea3a417" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
wsr-1166)
[ "$magic" != "48445230" ] && {
echo "Invalid image type."
return 1
}
return 0
;;
ubnt-erx)
nand_do_platform_check "$board" "$1"
return $?;
;;
esac
echo "Sysupgrade is not yet supported on $board."
return 1
}
platform_nand_pre_upgrade() {
local board=$(ramips_board_name)
case "$board" in
ubnt-erx)
platform_upgrade_ubnt_erx "$ARGV"
;;
esac
}
platform_pre_upgrade() {
local board=$(ramips_board_name)
case "$board" in
ubnt-erx)
nand_do_upgrade "$ARGV"
;;
esac
}
platform_do_upgrade() {
local board=$(ramips_board_name)
case "$board" in
*)
default_do_upgrade "$ARGV"
;;
esac
}
disable_watchdog() {
killall watchdog
( ps | grep -v 'grep' | grep '/dev/watchdog' ) && {
echo 'Could not disable watchdog'
return 1
}
}
blink_led() {
. /etc/diag.sh; set_state upgrade
}
append sysupgrade_pre_upgrade disable_watchdog
append sysupgrade_pre_upgrade blink_led
|
f3flight/lede
|
target/linux/ramips/base-files/lib/upgrade/platform.sh
|
Shell
|
gpl-2.0
| 3,694 |
ffmpeg -i data.mpg -ss 00:08:00 -to 00:11:20 -c copy cut.mp4; #Cuts the data from data.mpg from 1:00 to 11:20 and writes it to cut.mp4
ffmpeg -i cut.mp4 -ab 160k -ac 1 -ar 44100 -vn audio.wav; #Extracts the audio from the video
ffmpeg -i input.jpg -vf scale=320:-1 output_320.png
#https://trac.ffmpeg.org/wiki/Create%20a%20thumbnail%20image%20every%20X%20seconds%20of%20the%20video
ffmpeg -i out50.mpg -vf select="eq(pict_type\,PICT_TYPE_I)" -vsync 0 data/%d.png -loglevel debug 2>&1 | grep select:1 > keyframes.txt #used to display i frames
ffprobe -select_streams v -show_frames out50.mpg; #used to select frame info
ffmpeg -i video.mpg -vf scale=100:100 -acodec copy out.mpg
scp -P 5022 [email protected]:/data/2015-04-28_0635_US_KABC_Jimmy_Kimmel_Live.mpg .
|
RedHenLab/Audio
|
GSoC2015/CommercialDetection/src/ffmpeg_scripts.sh
|
Shell
|
gpl-2.0
| 783 |
#!/bin/bash
compiler=gfortran
for file in ./*.f90 ; do
$compiler -c $file
done
for file in ./*.f ; do
$compiler -c $file
done
for file in ./*.F90 ; do
$compiler -c $file
done
# renorm-library.f renorm-main.f90 enorm-renorm-ndelta.f90 renorm-nocore.f90 renorm-phaseshift.f90 renorm-potentials.f renorm-vkrg.f90 renorm-vlowk.f90 -o exe.x
|
schuncknf/TALENT
|
undefined_reference/Project-Lorenzo-Contessi/Morten_Black_Box/compileme.sh
|
Shell
|
gpl-2.0
| 338 |
#!/bin/bash
RUN_FOLDER=$(dirname $0)
SUPP_FILE="${RUN_FOLDER}"/default.supp
valgrind --leak-check=full --show-leak-kinds=definite --suppressions="${SUPP_FILE}" $@
|
hexagonal-sun/bic
|
etc/bic_valgrind.sh
|
Shell
|
gpl-2.0
| 165 |
#!/bin/bash
##########################################################################
## ##
## This script converts PSF files into a PSI files. ##
## It may need modification depending upon your requirments ##
## ##
## (C) Copyright Henri Shustak 2007 ##
## This Script is Released under the GNU GPL License ##
## Lucid Information Systems ##
## http://www.lucidsystms.org ##
## ##
## v0003 ##
## ##
## ##
##########################################################################
## Version History
# v0003 : added basic support for RAW queues
# v0002 : minor updates to deal with modifications of the format
# v0001 : basic implementation
# Other Settings Which May need Configuration
INPUTDIRECTORY="${1}" # This may need to be changed for your settings
OUTPUTDIRECTORY="${2}" # This may need to be changed for your settings
# The base directory for the Printer PPD and requirments - no trailing slash is required
base_ppd_directory=""
base_requirments_directory=""
file_name="" # This is set to the printer name
printer_name="" # read from csv file (first field)
printer_description="" # read from csv file (second field)
printer_location="" # read from csv file
printer_network_address="" # read from csv file
printer_ppd="" # This will use generic PPD unless a PPD is specified within the csv file
printer_psi_site="My Place is Great"
printer_psi_version="2"
default_printer_ppd=":generic"
function clean_exit {
if [ -f "${tmp_output}" ] ; then
rm "${tmp_output}"
fi
exit $exit_status
}
function generate_printer_system_information_file {
# This function simply generates the PSF.
# You will most likly want to modify the section below this
# function, where the input file is parsed.
PSIfile="$OUTPUTDIRECTORY""/""$printer_name"
if [ -f "${PSIfile}" ] ; then
echo "WARNING! : printer setup file already exisits"
echo " $printer_name not created"
return -1
else
touch "${PSIfile}"
echo "*% *********" >> "${PSIfile}"
echo "*% Printer Setup Information" >> "${PSIfile}"
echo -e "*% PSIVersion\t\t\t${printer_psi_version}" >> "${PSIfile}"
echo -e "*% PSIName\t\t\t${printer_name}" >> "${PSIfile}"
echo -e "*% PSILocation\t\t\t${printer_location}" >> "${PSIfile}"
echo -e "*% PSIDescription\t\t${printer_description}" >> "${PSIfile}"
echo -e "*% PSIInterface\t\t\t${printer_network_address}" >> "${PSIfile}"
echo -e "*% PSIPackages\t\t\t${printer_requirements}" >> "${PSIfile}"
echo -e "*% PSISite\t\t\t${printer_psi_site}" >> "${PSIfile}"
echo -e "*% PSIPPD\t\t\t${printer_ppd}" >> "${PSIfile}"
echo "*% *********" >> "${PSIfile}"
echo "" >> "${PSIfile}"
fi
return 0
}
# Preflight Check
function pre_flight_check {
if [ "$num_argumnets" -lt "1" ] ; then
echo "ERROR ! : No argument provided. This script will now exit."
echo " Usage : sh-convert-psf-to-psi.bash /path/to/input_directory/ /path/to/output_directory/"
exit_status=-127
clean_exit
fi
# Check that the input csv file exists
if ! [ -d "${INPUTDIRECTORY}" ] ; then
echo "ERROR ! : The input directory could not be found."
echo " Usage : sh-convert-psf-to-psi.bash /path/to/input_directory/ /path/to/output_directory/"
exit_status=-127
clean_exit
fi
# Check the output directory exists
if ! [ -d "${OUTPUTDIRECTORY}" ] ; then
echo "ERROR ! : The output dose not exist."
echo " Usage : sh-convert-psf-to-psi.bash /path/to/input_directory/ /path/to/output_directory/"
exit_status=-127
clean_exit
fi
}
function generate_input_file {
find ${INPUTDIRECTORY}* -maxdepth 1 -mindepth 1 | grep -v ".DS_Store" > "${tmp_output}"
if [ $? != 0 ] ; then
exit_status=-129
clean_exit
fi
}
# Other Varibles
tmp_output=/tmp/convert_psf_printer-setup_`date|md5`
exit_status=0
INPUTFILE="${tmp_output}"
export printer_setup_log=/dev/null
# Locate the PSF_SearchKeys - This loads the current search keys
path_from_root="/ExampleFiles/ImportExport/printer-setup/sh-convert-psf-to-psi.bash"
printer_setup_directory=`echo "${0}" | awk -F "${path_from_root}" '{ print $1 }'`
psf_search_keys_conf_file="${printer_setup_directory}/PrinterSetupUtilities/PSF_SearchKeys_MacOS_104.config"
psf_reserved_options_conf_file="${printer_setup_directory}/PrinterSetupUtilities/PSF_Reserved_Options.config"
psf_parsing_utility="${printer_setup_directory}/PrinterSetupUtilities/ParsePSF.sh"
# load the search keys and reserved psf settings
source "${psf_search_keys_conf_file}"
source "${psf_reserved_options_conf_file}"
# Run Preflight Check
num_argumnets=$#
pre_flight_check
# Generate Input File
generate_input_file
# Load any appropriate data into the output direcotrys file.
run=0
sucess=0
exec < "${INPUTFILE}"
a1=start
while [ "$a1" != "" ] ; do
read a1
if [ "$a1" != "" ] ; then
# Configure Varibles
# This is probably the section you will want to modify
# Printer Setup File
file_to_parse="${a1}"
if ! [ -d $file_to_parse ] ; then
# Printer Name
current_PSF_search_key="${printer_name_search_key}"
printer_name=`"${psf_parsing_utility}" "${file_to_parse}" "${current_PSF_search_key}"`
# Printer Description
current_PSF_search_key="${printer_description_search_key}"
printer_description=`"${psf_parsing_utility}" "${file_to_parse}" "${current_PSF_search_key}"`
# Printer Location
current_PSF_search_key="${printer_location_search_key}"
printer_location=`"${psf_parsing_utility}" "${file_to_parse}" "${current_PSF_search_key}"`
# Printer Network Addrss
current_PSF_search_key="${printer_network_address_search_key}"
printer_network_address=`"${psf_parsing_utility}" "${file_to_parse}" "${current_PSF_search_key}"`
# Printer PPD
current_PSF_search_key="${printer_ppd_search_key}"
printer_ppd=`"${psf_parsing_utility}" "${file_to_parse}" "${current_PSF_search_key}"`
if [ "${printer_ppd}" == "" ] ; then
printer_ppd="${default_printer_ppd}"
else
if [ "${printer_ppd}" == "${printer_is_raw_lower_case_reserved}" ] || [ "${printer_ppd}" == "${printer_is_raw_upper_case_reserved}" ] ; then
printer_ppd=""
else
if [ "${base_ppd_directory}" != "" ] ; then
printer_ppd="${base_ppd_directory}/${printer_ppd}"
fi
fi
fi
# Printer Requirements
current_PSF_search_key="${printer_requirements_search_key}"
printer_requirements=`"${psf_parsing_utility}" "${file_to_parse}" "${current_PSF_search_key}"`
if [ "${printer_requirements}" != "" ] && [ "${base_requirments_directory}" != "" ] ; then
printer_requirements="${base_requirments_directory}/${printer_requirements}"
fi
# Add line to the SimplePrint CVS file
generate_printer_system_information_file
if [ $? == 0 ] ; then
(( sucess++ ))
fi
# increment run
(( run++ ))
fi
fi
done
# Final reporting of the PSF generation
parseed=$run
echo ""
echo "Parsed $parseed PrinterSetupFiles"
echo "Sucessfully added $sucess Printers to the PSI File."
echo ""
clean_exit
exit 0
|
henri/printer-setup
|
ExampleFiles/ImportExport/printer-setup/sh-convert-psf-to-psi.bash
|
Shell
|
gpl-3.0
| 8,579 |
#!/bin/sh
BBPATH="$PWD/BusyBee_"
if [ -f "${BBPATH}linux" ]; then
BBPATH="${BBPATH}linux"
elif [ -f "${BBPATH}mac" ]; then
BBPATH="${BBPATH}mac"
fi
while [ ! -f $BBPATH ]; do
read -p "Location of BusyBee (FullPath): " BBPATH
done
eval "$BBPATH"
if [ $? -gt 0 ]; then
echo "Passed in Arguments did not allow BusyBee to execute properly. Please check the configuration file run install again."
exit
fi
if crontab -l | grep -q "$BBPATH"; then
crontab -l | grep -v "$BBPATH" | crontab -
fi
(crontab -l 2>/dev/null; echo "*/5 * * * * cd $PWD;$BBPATH") | crontab -
|
dixonwille/busybee
|
UnixInstall.sh
|
Shell
|
gpl-3.0
| 589 |
if [[ "$1" == "" ]] ; then
MS="L2010_20981"
#MS="L2010_08450"
else
MS="$1"
fi
for (( a=1 ; $a<23 ; a++ )) ; do
if (( $a<10 )) ; then
lse="lse00$a"
else
lse="lse0$a"
fi
if [[ "`ssh ${lse} -C ls /data3/|grep ${MS}`" != "" ]] ; then
lsesubs="`ssh ${lse} -C \"find /data3/${MS}/\"|grep SB|grep \\\.MS\/ -v`"
for n in ${lsesubs} ; do
echo ${lse}\:$n
done
fi
done
|
jjdmol/LOFAR
|
CEP/DP3/AOFlagger/scripts/distributed/list.sh
|
Shell
|
gpl-3.0
| 396 |
#!/bin/bash
# Scripts that run during deployment.
# XXX: DELETE ME: THIS IS NOT RUN ANYMORE
echo "Collecting static files..."
python manage.py collectstatic --noinput
echo "Executing database migrations..."
python manage.py migrate
echo "Compiling messages..."
python manage.py compilemessages
echo "Killing gunicorn"
kill -HUP `cat /gunicorn.pid`
|
domino14/Webolith
|
scripts/deploy.sh
|
Shell
|
gpl-3.0
| 348 |
#!/bin/bash
###########################################################
# installs IUS Community YUM Repository
# https://iuscommunity.org/pages/Repos.html
# for access to Python 2.7, 3.2, 3.3 and 3.4 on CentOS as default
# is Python 2.6 for CentOS 6
#
# i.e. python 3.4
# yum -y install python34u python34u-devel python34u-pip python34u-setuptools python34u-tools --enablerepo=ius
# rpm -ql python34u python34u-devel python34u-pip python34u-setuptools python34u-tools python34u-tkinter
#
# rpm -ql python34u python34u-pip | grep bin
# /usr/bin/pydoc3
# /usr/bin/pydoc3.4
# /usr/bin/python3
# /usr/bin/python3.4
# /usr/bin/python3.4m
# /usr/bin/pyvenv
# /usr/bin/pyvenv-3.4
# /usr/bin/pip3
# /usr/bin/pip3.4
#
# https://docs.python.org/3/library/venv.html
###########################################################
DT=$(date +"%d%m%y-%H%M%S")
CENTMINLOGDIR='/root/centminlogs'
DIR_TMP='/svr-setup'
FORCE_IPVFOUR='y' # curl/wget commands through script force IPv4
###########################################################
# set locale temporarily to english
# due to some non-english locale issues
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export LANGUAGE=en_US.UTF-8
export LC_CTYPE=en_US.UTF-8
shopt -s expand_aliases
for g in "" e f; do
alias ${g}grep="LC_ALL=C ${g}grep" # speed-up grep, egrep, fgrep
done
CENTOSVER=$(awk '{ print $3 }' /etc/redhat-release)
if [ ! -d "$CENTMINLOGDIR" ]; then
mkdir -p "$CENTMINLOGDIR"
fi
if [ "$CENTOSVER" == 'release' ]; then
CENTOSVER=$(awk '{ print $4 }' /etc/redhat-release | cut -d . -f1,2)
if [[ "$(cat /etc/redhat-release | awk '{ print $4 }' | cut -d . -f1)" = '7' ]]; then
CENTOS_SEVEN='7'
elif [[ "$(cat /etc/redhat-release | awk '{ print $4 }' | cut -d . -f1)" = '8' ]]; then
CENTOS_EIGHT='8'
fi
fi
if [[ "$(cat /etc/redhat-release | awk '{ print $3 }' | cut -d . -f1)" = '6' ]]; then
CENTOS_SIX='6'
fi
# Check for Redhat Enterprise Linux 7.x
if [ "$CENTOSVER" == 'Enterprise' ]; then
CENTOSVER=$(awk '{ print $7 }' /etc/redhat-release)
if [[ "$(awk '{ print $1,$2 }' /etc/redhat-release)" = 'Red Hat' && "$(awk '{ print $7 }' /etc/redhat-release | cut -d . -f1)" = '7' ]]; then
CENTOS_SEVEN='7'
REDHAT_SEVEN='y'
fi
fi
if [[ -f /etc/system-release && "$(awk '{print $1,$2,$3}' /etc/system-release)" = 'Amazon Linux AMI' ]]; then
CENTOS_SIX='6'
fi
###########################################################
# Setup Colours
black='\E[30;40m'
red='\E[31;40m'
green='\E[32;40m'
yellow='\E[33;40m'
blue='\E[34;40m'
magenta='\E[35;40m'
cyan='\E[36;40m'
white='\E[37;40m'
boldblack='\E[1;30;40m'
boldred='\E[1;31;40m'
boldgreen='\E[1;32;40m'
boldyellow='\E[1;33;40m'
boldblue='\E[1;34;40m'
boldmagenta='\E[1;35;40m'
boldcyan='\E[1;36;40m'
boldwhite='\E[1;37;40m'
Reset="tput sgr0" # Reset text attributes to normal
#+ without clearing screen.
cecho () # Coloured-echo.
# Argument $1 = message
# Argument $2 = color
{
message=$1
color=$2
echo -e "$color$message" ; $Reset
return
}
###########################################################
starttime=$(TZ=UTC date +%s.%N)
{
if [[ "$CENTOS_SIX" = '6' ]]; then
rpm --import https://repo.ius.io/RPM-GPG-KEY-IUS-6
yum -y install https://repo.ius.io/ius-release-el6.rpm
elif [[ "$CENTOS_SEVEN" = '7' ]]; then
rpm --import https://repo.ius.io/RPM-GPG-KEY-IUS-7
yum -y install https://repo.ius.io/ius-release-el7.rpm
fi
# disable by default the ius.repo
sed -i 's/enabled=1/enabled=0/' /etc/yum.repos.d/ius.repo
if [ -f /etc/yum.repos.d/ius.repo ]; then
\cp -pf /etc/yum.repos.d/ius.repo /etc/yum.repos.d/ius.OLD
if [ -n "$(grep ^priority /etc/yum.repos.d/ius.repo)" ]
then
#echo priorities already set for ius.repo
PRIOREXISTS=1
else
echo "setting yum priorities for ius.repo"
ex -s /etc/yum.repos.d/ius.repo << EOF
:/\[ius/ , /gpgkey/
:a
priority=98
.
:w
:/\[ius-debuginfo/ , /gpgkey/
:a
priority=98
.
:w
:/\[ius-source/ , /gpgkey/
:a
priority=98
.
:w
:q
EOF
cecho "*************************************************" $boldgreen
cecho "Fixing ius.repo YUM Priorities" $boldgreen
cecho "*************************************************" $boldgreen
echo "cat /etc/yum.repos.d/ius.repo"
cat /etc/yum.repos.d/ius.repo
echo ""
fi
fi # repo file check
cecho "*************************************************" $boldgreen
cecho "Installing Python 3.4" $boldgreen
cecho "*************************************************" $boldgreen
# install Python 3.4 besides system default Python 2.6
yum -y install python34u python34u-devel python34u-pip python34u-setuptools python34u-tools --enablerepo=ius
rpm -ql python34u python34u-devel python34u-pip python34u-setuptools python34u-tools python34u-tkinter | grep bin
# switch in favour of epel python34 version
if [[ "$(rpm -qa python34u)" ]]; then
# remove ius community python34u
yum -y remove python34u python34u-devel python34u-pip python34u-setuptools python34u-tools python34u-libs python34u-tkinter
# install epel python34
yum -y install python34 python34-devel python34-pip python34-setuptools python34-tools python34-libs python34-tkinter
fi
if [[ ! "$(rpm -qa cmake3)" || ! "$(rpm -qa cmake3-data)" ]]; then
# reinstall removed dependencies from above removed ius community packages
yum -y install cmake3 cmake3-data
fi
} 2>&1 | tee ${CENTMINLOGDIR}/python34-install_${DT}.log
endtime=$(TZ=UTC date +%s.%N)
INSTALLTIME=$(echo "scale=2;$endtime - $starttime"|bc )
echo "" >> ${CENTMINLOGDIR}/python34-install_${DT}.log
echo "Python 3.4 Install Time: $INSTALLTIME seconds" >> ${CENTMINLOGDIR}/python34-install_${DT}.log
|
centminmod/centminmod
|
addons/python34_install.sh
|
Shell
|
gpl-3.0
| 5,761 |
cd ~/downloads
#php
yum install php php-devel
#php扩展
yum install php-mysql php-gd php-ldap php-odbc php-pear php-xml php-xmlrpc
yum list installed | grep mysql
yum install mysql
yum install mysql-devel
#yum install mysql-server (无法安装)
#方法1
#MariaDB数据库管理系统是MySQL的一个分支,主要由开源社区在维护,采用GPL授权许可。开发这个分支的原因之一是:甲骨文公司收购了MySQL后,有将MySQL闭源的潜在风险,因此社区采用分支的方式来避开这个风险。MariaDB的目的是完全兼容MySQL,包括API和命令行,使之能轻松成为MySQL的代替品。
#yum install mariadb-server mariadb
#方法2 (可行)
wget http://dev.mysql.com/get/mysql-community-release-el7-5.noarch.rpm
rpm -ivh mysql-community-release-el7-5.noarch.rpm
yum install mysql-community-server
#systemctl restart mysqld
#登录mysql: mysql -uroot -p
#设置mysql密码: mysadmin -u root -p password 123456
#(或者)刷新密码:update user set password=password('123456') where user='root';
#设置远程访问:GRANT ALL PRIVILEGES ON *.* TO root@"%" IDENTIFIED BY "123456";
cd /usr/share/mysql
mysql -u root -p
update user set password=password('123456') where user='root';
update user set host="%" where user='root' and host='localhost';
#刷新才会生效:flush privileges;
#reset mysql
vi /etc/my.cnf
# skip-grant-tables
mysql -u root -p
|
mikestrange/shell
|
mysql.sh
|
Shell
|
gpl-3.0
| 1,404 |
ifconfig
sockstat -4 -6
freebsd-update fetch
freebsd-update install
pkg update
pkg upgrade
sysrc ifconfig_em1="inet 192.168.30.1/24 up"
service netif restart
pkg install -y gsed bash bash-completion sudo isc-dhcp44-server-4.4.2_1 apache24-2.4.48 bind916-9.16.18 curl php74 mod_php74 php74-mbstring php74-pecl-mcrypt php74-zlib php74-curl php74-gd php74-json mariadb105-server-10.5.10 mariadb105-client-10.5.10 php74-mysqli pure-ftpd openssl
sysrc dhcpd_enable=YES
sysrc named_enable="YES"
sysrc apache24_enable="yes"
sysrc mysql_enable="yes"
sysrc mysql_args="--bind-address=127.0.0.1"
sysrc pureftpd_enable=yes
cat << EOF > /usr/local/etc/dhcpd.conf
authoritative;
log-facility local7;
subnet 192.168.30.0 netmask 255.255.255.0 {
range 192.168.30.51 192.168.30.100;
option routers 192.168.30.1;
option domain-name-servers 8.8.8.8;
option domain-name "ellanoteama.net";
option broadcast-address 192.168.30.255;
default-lease-time 600;
max-lease-time 7200;
}
EOF
sed -i -e '/listen-on/ s/127.0.0.1;/127.0.0.1; 192.168.30.1;/g' /usr/local/etc/namedb/named.conf
gsed -i -e '/listen-on/a\ forwarders { 8.8.8.8; };' /usr/local/etc/namedb/named.conf
cat << EOF >> /usr/local/etc/namedb/named.conf
zone "ellanoteama.net" {
type master;
file "/usr/local/etc/namedb/master/db.ellanoteama.net";
};
zone "30.168.192.in-addr.arpa" {
type master;
file "/usr/local/etc/namedb/master/ellanoteama.net.rev";
};
EOF
cat << EOF > /usr/local/etc/namedb/master/db.ellanoteama.net
\$TTL 604800
@ IN SOA ns1.ellanoteama.net. admin.ellanoteama.net. (
2018101901; Serial
3H; Refresh
15M; Retry
2W; Expiry
1D ); Minimum
; name servers - NS records
IN NS ns1.ellanoteama.net.
; name servers - A records
ns1.ellanoteama.net. IN A 192.168.30.1
; other servers - A records
www.ellanoteama.net. IN A 192.168.30.1
ftp.ellanoteama.net. IN A 192.168.30.1
dns1.ellanoteama.net. IN A 192.168.30.1
mysql.ellanoteama.net. IN A 192.168.30.1
dxtr.ellanoteama.net. IN A 192.168.30.1
EOF
cat << EOF > /usr/local/etc/namedb/master/ellanoteama.net.rev
\$TTL 3h
@ SOA ellanoteama.net. ns1.ellanoteama.net. 42 1d 12h 1w 3h
; Serial, Refresh, Retry, Expire, Neg. cache TTL
NS localhost.
1 PTR www.ellanoteama.net
1 PTR ftp.ellanoteama.net
1 PTR dns1.ellanoteama.net
EOF
cat << EOF > /etc/resolv.conf
nameserver 127.0.0.1
EOF
cat << EOF > /usr/local/etc/apache24/Includes/php.conf
<IfModule dir_module>
DirectoryIndex index.php index.html
<FilesMatch "\.php\$">
SetHandler application/x-httpd-php
</FilesMatch>
<FilesMatch "\.phps\$">
SetHandler application/x-httpd-php-source
</FilesMatch>
</IfModule>
EOF
cp /usr/local/etc/pure-ftpd.conf.sample /usr/local/etc/pure-ftpd.conf
sed -i -e 's/^VerboseLog.*$/VerboseLog yes/g' /usr/local/etc/pure-ftpd.conf
sed -i -e 's/^# PureDB.*$/PureDB \/usr\/local\/etc\/pureftpd.pdb/g' /usr/local/etc/pure-ftpd.conf
sed -i -e 's/^# CreateHomeDir.*$/CreateHomeDir yes/g' /usr/local/etc/pure-ftpd.conf
service isc-dhcpd start
service named start
service apache24 start
service mysql-server start
/usr/local/bin/mysql_secure_installation
service mysql-server restart
service pure-ftpd start
pw useradd vftp -s /sbin/nologin -w no -d /home/vftp -c "Virtual User Pure-FTPd" -m
pure-pw useradd dxtr -u vftp -g vftp -d /home/vftp/dxtr
pure-pw mkdb
service pure-ftpd restart
ifconfig
sockstat -4 -6
nslookup ftp.ellanoteama.net
nslookup www.ellanoteama.net
nslookup 192.168.30.1
curl http://www.ellanoteama.net
ftp ftp.ellanoteama.net
echo '<?php phpinfo(); ?>' | tee -a /usr/local/www/apache24/data/info.php
curl http://www.ellanoteama.net/info.php
echo "Done!"
|
tuxtter/myScripts
|
bsdservices.sh
|
Shell
|
gpl-3.0
| 4,153 |
#!/usr/bin/env bash
# You must have virtualenv installed, and the virtualenv command in your path for this to work.
# Assuming you have python installed, you can install virtualenv using the command below.
# curl -O https://raw.github.com/pypa/virtualenv/master/virtualenv.py
# This should be run from the project directory, not inside the project dir
virtualenv --system-site-packages .env
. ./.env/bin/activate
pip install --upgrade -r ./podiobooks/requirements_dev.txt
|
podiobooks/podiobooks
|
devscripts/virtualenv/setup_dev_env.sh
|
Shell
|
gpl-3.0
| 473 |
#!/bin/bash
#check if given host is reachable by ping
iface="enp4s0f1"
[[ ! -z $2 ]] && iface="$2"
ping -c1 -W1 -q -I $iface $1 &>/dev/null
status=$( echo $? )
if [[ $status == 0 ]] ; then
#Connection success!
echo 1
exit 0
else
#Connection failure
echo 0
exit 1
fi
|
xcy7e/bash_scripts
|
scripts/check_host_up.sh
|
Shell
|
gpl-3.0
| 300 |
#!/bin/sh
nmap -p T:21,22,23,25,32,79,80,110,111,135,137,139,143,389,443,445,855,871,873,993,995,1090,1098,1099, \
1100,1101,1198,1433,1556,1577,1882,1900,3128,3260,3306,3343,3389,3528,3873,4095,4105,4444,4445,4446, \
4447,4448,4457,4712,4713,4750,5001,5353,5666,5701,5900,6400,6410,7900,8000,8001,8005,8009,8080,8083, \
8118,8222,8333,8443,8512,8730,9092,10000,12489,13184,13724,13782,17185,18080,20080,20389,20390,20391, \
20394,20396,20398,20404,20410,20411,20443,22001,22002,22099,27864,36972,44441,44442,44443,44444,47001, \
48000,48001,48004,48005,48006,48007,48008,48009,48010,49152,49153,49154,52752,U:161 \
--script banner,finger,ftp-anon,http-comments-displayer,http-git,http-gitweb-projects-enum,http-headers, \
http-methods,http-open-proxy,http-php-version,http-title,http-trace,http-vmware-path-vuln, \
http-vuln-cve2010-0738,ajp-headers,imap-capabilities,iscsi-enum-targets,iscsi-info,ldap-rootdse, \
ms-sql-info,msrpc-enum,mysql-info,nbstat,nfs-ls,nfs-showmount,nfs-statfs,p2p-conficker,pop3-capabilities, \
realvnc-auth-bypass,rmi-dumpregistry,rpcinfo,rsync-list-modules,smb-check-vulns,smb-enum-shares, \
smb-os-discovery,smb-system-info,snmp-interfaces,snmp-netstat,upnp-info,snmp-sysdescr,ssl-heartbleed, \
vnc-info --script-args=checkconficker=1,safe=1,smbbasic=1 -oA nmapout -sS -Pn -sV -sU --max-retries 2 \
--open scanned_hosts
|
HynekPetrak/nmap2web
|
scan.sh
|
Shell
|
gpl-3.0
| 1,352 |
elm-live Main.elm --output=elm.js
|
surprisetalk/elm-bulma
|
demo/elm-live.sh
|
Shell
|
gpl-3.0
| 34 |
langs=(ar cs de fr)
for x in `seq 1 4`
do
i=$langs[$x]
next=`echo $x+1 | bc`
for y in `seq $next 4`
do
j=$langs[$y]
nextnext=`echo $y+1 | bc`
for z in `seq $nextnext 4`
do
k=$langs[$z]
nextnextnext=`echo $z+1 | bc`
for a in `seq $nextnextnext 4`
do
l=$langs[$a]
echo $i $j $k $l
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$i"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/5s5t_"$i"_"$j"_"$k"_"$l"_en/"$i"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$i"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/5s5t_"$i"_"$j"_"$k"_"$l"_en/en_"$i"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$j"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/5s5t_"$i"_"$j"_"$k"_"$l"_en/"$j"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$j"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/5s5t_"$i"_"$j"_"$k"_"$l"_en/en_"$j"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$k"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/5s5t_"$i"_"$j"_"$k"_"$l"_en/"$k"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$k"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/5s5t_"$i"_"$j"_"$k"_"$l"_en/en_"$k"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$l"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/5s5t_"$i"_"$j"_"$k"_"$l"_en/"$l"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$l"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/5s5t_"$i"_"$j"_"$k"_"$l"_en/en_"$l"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
done
done
done
done
#for x in `seq 1 4`
#do
#i=$langs[$x]
#next=`echo $x+1 | bc`
#for y in `seq $next 4`
#do
#j=$langs[$y]
#nextnext=`echo $y+1 | bc`
#for z in `seq $nextnext 4`
#do
#k=$langs[$z]
#echo $i $j $k
#python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/4s4t_en_"$i"_"$j"_"$k"/"$i"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$i"_en/mlnmt.test.tgt.raw
#python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/4s4t_en_"$i"_"$j"_"$k"/en_"$i"_test_beam_12/src-shard.trans.unk_replaced.restored.merged /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$i"/mlnmt.test.tgt.raw
#python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/4s4t_en_"$i"_"$j"_"$k"/"$j"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$j"_en/mlnmt.test.tgt.raw
#python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/4s4t_en_"$i"_"$j"_"$k"/en_"$j"_test_beam_12/src-shard.trans.unk_replaced.restored.merged /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$j"/mlnmt.test.tgt.raw
#python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/4s4t_en_"$i"_"$j"_"$k"/"$k"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$k"_en/mlnmt.test.tgt.raw
#python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/4s4t_en_"$i"_"$j"_"$k"/en_"$k"_test_beam_12/src-shard.trans.unk_replaced.restored.merged /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$k"/mlnmt.test.tgt.raw
#done
#done
#done
#for x in `seq 1 4`
#do
#i=$langs[$x]
#next=`echo $x+1 | bc`
#for y in `seq $next 4`
#do
#j=$langs[$y]
#if [[ $i != $j ]]
#then
#echo $i $j
#python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/3s3t_"$i"_"$j"_en/"$i"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$i"_en/mlnmt.test.tgt.raw
#python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/3s3t_"$i"_"$j"_en/en_"$i"_test_beam_12/src-shard.trans.unk_replaced.restored.merged /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$i"/mlnmt.test.tgt.raw
#python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/3s3t_"$i"_"$j"_en/"$j"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$j"_en/mlnmt.test.tgt.raw
#python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/3s3t_"$i"_"$j"_en/en_"$j"_test_beam_12/src-shard.trans.unk_replaced.restored.merged /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$j"/mlnmt.test.tgt.raw
#fi
#done
#done
for x in `seq 1 4`
do
i=$langs[$x]
echo $i en
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$i"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/2s2t_"$i"_en/"$i"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$i"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/2s2t_"$i"_en/en_"$i"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
done
for x in `seq 1 4`
do
i=$langs[$x]
next=`echo $x+1 | bc`
for y in `seq $next 4`
do
j=$langs[$y]
nextnext=`echo $y+1 | bc`
for z in `seq $nextnext 4`
do
k=$langs[$z]
nextnextnext=`echo $z+1 | bc`
for a in `seq $nextnextnext 4`
do
l=$langs[$a]
echo $i $j $k $l
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$i"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/1s4t_en_"$i"_"$j"_"$k"_"$l"/en_"$i"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$j"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/1s4t_en_"$i"_"$j"_"$k"_"$l"/en_"$j"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$k"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/1s4t_en_"$i"_"$j"_"$k"_"$l"/en_"$k"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$l"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/1s4t_en_"$i"_"$j"_"$k"_"$l"/en_"$l"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
done
done
done
done
for x in `seq 1 4`
do
i=$langs[$x]
next=`echo $x+1 | bc`
for y in `seq $next 4`
do
j=$langs[$y]
nextnext=`echo $y+1 | bc`
for z in `seq $nextnext 4`
do
k=$langs[$z]
nextnextnext=`echo $z+1 | bc`
for a in `seq $nextnextnext 4`
do
l=$langs[$a]
echo $i $j $k $l
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$i"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/4s1t_"$i"_"$j"_"$k"_"$l"_en/"$i"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$j"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/4s1t_"$i"_"$j"_"$k"_"$l"_en/"$j"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$k"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/4s1t_"$i"_"$j"_"$k"_"$l"_en/"$k"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$l"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/4s1t_"$i"_"$j"_"$k"_"$l"_en/"$l"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
done
done
done
done
for x in `seq 1 4`
do
i=$langs[$x]
next=`echo $x+1 | bc`
for y in `seq $next 4`
do
j=$langs[$y]
nextnext=`echo $y+1 | bc`
for z in `seq $nextnext 4`
do
k=$langs[$z]
echo $i $j $k
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$i"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/1s3t_en_"$i"_"$j"_"$k"/en_"$i"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$j"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/1s3t_en_"$i"_"$j"_"$k"/en_"$j"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$k"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/1s3t_en_"$i"_"$j"_"$k"/en_"$k"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
done
done
done
for x in `seq 1 4`
do
i=$langs[$x]
next=`echo $x+1 | bc`
for y in `seq $next 4`
do
j=$langs[$y]
nextnext=`echo $y+1 | bc`
for z in `seq $nextnext 4`
do
k=$langs[$z]
echo $i $j $k
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$i"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/3s1t_"$i"_"$j"_"$k"_en/"$i"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$j"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/3s1t_"$i"_"$j"_"$k"_en/"$j"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$k"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/3s1t_"$i"_"$j"_"$k"_en/"$k"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
done
done
done
for x in `seq 1 4`
do
i=$langs[$x]
next=`echo $x+1 | bc`
for y in `seq $next 4`
do
j=$langs[$y]
echo $i $j
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$i"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/2s1t_"$i"_"$j"_en/"$i"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$j"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/2s1t_"$i"_"$j"_en/"$j"_en_test_beam_12/src-shard.trans.unk_replaced.restored.merged
done
done
for x in `seq 1 4`
do
i=$langs[$x]
next=`echo $x+1 | bc`
for y in `seq $next 4`
do
j=$langs[$y]
echo $i $j
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$i"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/1s2t_en_"$i"_"$j"/en_"$i"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$j"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/1s2t_en_"$i"_"$j"/en_"$j"_test_beam_12/src-shard.trans.unk_replaced.restored.merged
done
done
for x in `seq 1 4`
do
i=$langs[$x]
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$i"/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/1s1t_en_"$i"/test_beam_12/src-shard.trans.unk_replaced.restored.merged
python bleu_computer.py /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$i"_en/mlnmt.test.tgt.raw /windroot/raj/corpora_downloads/IWSLT2016/1s1t_"$i"_en/test_beam_12/src-shard.trans.unk_replaced.restored.merged
done
|
prajdabre/knmt
|
nmt_chainer/bleu_all.sh
|
Shell
|
gpl-3.0
| 11,133 |
#!/bin/sh
set -eux
. ./extra/gitlab/env.sh
cmake . -DCMAKE_INCLUDE_PATH=$CACHE_DIR/usr/lib -DENABLE_TESTS=OFF -DENABLE_WERROR=OFF
make VERBOSE=1
|
LawlingBear/uTox
|
extra/gitlab/osx-script.sh
|
Shell
|
gpl-3.0
| 147 |
#!/bin/sh
GEM_PATH=/home/ubuntu/every_word_the_musical/vendor/bundle/ruby/2.0.0:
ruby2.0 every_word.rb
|
davidbalbert/every_word_the_musical
|
run.sh
|
Shell
|
gpl-3.0
| 104 |
#!/bin/sh
# Liquid War 6 is a unique multiplayer wargame.
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Christian Mauduit <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Liquid War 6 homepage : http://www.gnu.org/software/liquidwar6/
# Contact author : [email protected]
OPT_EXTRA="opt-extra:"
MAKEFILE_MAIN=perf/Makefile.main
MAKEFILE_EXTRA=perf/Makefile.extra
MAKEFILE_AM=perf/Makefile.am
cp $MAKEFILE_MAIN $MAKEFILE_AM
for i in bot cfg cli cns cnx dat dsp dyn gen gfx glb gui hlp img ker ldr map mat msg net nod p2p pil scm sim snd srv sys tsk vox ; do
OPT_EXTRA="$OPT_EXTRA liquidwar6${i}-test-cpu.jpeg liquidwar6${i}-test-heap.jpeg"
echo >> $MAKEFILE_AM
cat $MAKEFILE_EXTRA | sed "s/xyz/${i}/g" >> $MAKEFILE_AM
done
echo >> $MAKEFILE_AM
echo "$OPT_EXTRA" >> $MAKEFILE_AM
|
lijiaqigreat/liquidwar-web
|
reference/doc/perf-update.sh
|
Shell
|
gpl-3.0
| 1,438 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=Cygwin-Windows
CND_CONF=Release
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=dll
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libactuador.a
OUTPUT_BASENAME=libactuador.a
PACKAGE_TOP_DIR=Actuador/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/Actuador/lib"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}lib/${OUTPUT_BASENAME}" 0644
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/Actuador.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/Actuador.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
gatATAC/motorCtrl
|
nbproject/Package-Release.bash
|
Shell
|
gpl-3.0
| 1,463 |
#!/bin/bash
# VIRT-M
# cd /tmp && yum install wget && wget https://github.com/munishgaurav5/ks/raw/master/ins/virt.sh && chmod 777 virt.sh && ./virt.sh
egrep '(vmx|svm)' /proc/cpuinfo
yum groupinstall "Virtualization Host" -y
yum install -y qemu-kvm qemu-img libvirt virt-install libvirt-python virt-manager virt-install libvirt-client virt-viewer
lsmod |grep kvm
modprobe kvm
lsmod |grep kvm
systemctl start libvirt-guests.service
systemctl start libvirtd
systemctl enable libvirt-guests.service
systemctl enable libvirtd
systemctl status libvirt-guests.service
systemctl status libvirtd
echo ""
echo ""
echo "Reboot manually Now !!"
echo ""
# IN GUI
# virt-manager
# virt-install --name=ArkitRHEL7 --ram=1024 --vcpus=1 --cdrom=/var/lib/libvirt/images/rhel-server-7.3-x86_64-dvd.iso --os-type=linux --os-variant=rhel7 --network bridge=br0 --graphics=spice --disk path=/var/lib/libvirt/images/rhel7.dsk,size=20
|
munishgaurav5/ks
|
ins/virt.sh
|
Shell
|
gpl-3.0
| 924 |
#!/bin/bash
##################################################################
# (C) ZE Computer- Medienservice, Humboldt-Universitaet zu Berlin
# Written by Daniel Rohde <[email protected]>
##################################################################
DEBUG=0
DONTCOMPRESS=0
FORCEMINIFY=0
while getopts "dnf" arg; do
case "${arg}" in
d)
DEBUG=1
;;
n)
DONTCOMPRESS=1
;;
f) FORCEMINIFY=1
;;
esac
done
QUERYSTRING="lib/QueryString/jquery-querystring.js"
MYSANHELPER="lib/MySwissArmyKnife/jquery-myswissarmyknife.js lib/MySwissArmyKnife/myswissarmyknife.css"
MYKEYBOARDEVENTHANDLER="lib/MyKeyboardEventHandler/jquery-mykeyboardeventhandler.js"
MYTOOLTIPLIB="lib/MyTooltip/jquery-mytooltip.js lib/MyTooltip/mytooltip.css"
MYPOPUPLIB="lib/MyPopup/jquery-mypopup.js lib/MyPopup/mypopup.css"
MYINPLACEEDITOR="lib/MyInplaceEditor/jquery-myinplaceeditor.js"
MYTABLEMANAGER="lib/MyTableManager/jquery-mytablemanager.js lib/MyTableManager/mytablemanager.css"
MYCOUNTDOWNTIMER="lib/MyCountdownTimer/jquery-mycountdowntimer.js"
MYFOLDERTREE="lib/MyFolderTree/jquery-myfoldertree.js lib/MyFolderTree/myfoldertree.css"
MYSPLITPANE="lib/MySplitPane/jquery-mysplitpane.js lib/MySplitPane/mysplitpane.css"
MYMAIN="script.js style.css svg/inlinestyle.css"
MYLIBS="${QUERYSTRING} ${MYSANHELPER} ${MYKEYBOARDEVENTHANDLER} ${MYTOOLTIPLIB} ${MYPOPUPLIB} ${MYINPLACEEDITOR} ${MYTABLEMANAGER} ${MYCOUNTDOWNTIMER} ${MYFOLDERTREE} ${MYSPLITPANE} ${MYMAIN}"
COMPLETE="complete"
SPRITE="svg/sprite.svg"
rm -f ${COMPLETE}.min.*
for file in $MYLIBS ; do
dir=$(dirname "${file}")
bfn=$(basename "${file}")
ext=${bfn#*.}
bn=$(basename "${bfn}" ".${ext}")
newfile="${dir}/${bn}.min.${ext}"
complfile="${COMPLETE}.min.${ext}"
if test "${DONTCOMPRESS}" -eq 0 ; then
grep -q '/**INCLUDE(' "${file}"
if test \( $? -eq 0 \) -o \( ${FORCEMINIFY} -eq 1 \) -o \( ! -e ${newfile} \) -o \( "${file}" -nt "${newfile}" \) ; then
test ${DEBUG} -ne 0 && echo "Minify $file to $newfile and concat to $complfile"
perl prepjs.pl "$file" | node_modules/yuglify/bin/yuglify --terminal --type "${ext}" \
| tee -a "${complfile}" \
> "${newfile}"
else
test ${DEBUG} -ne 0 && echo "Nothing to minify for ${file} -> concat only."
perl prepjs.pl "${newfile}" >> "${complfile}"
fi
else
test ${DEBUG} -ne 0 && echo "Copy $file to $newfile and concat to $complfile"
perl prepjs.pl "${file}" | tee -a "${complfile}" > "${newfile}"
fi
done
test ${DEBUG} -ne 0 && echo "(brotli|gzip) ${COMPLETE}.min.*"
for f in ${COMPLETE}.min.* $SPRITE ; do
test ! -e $f && continue
brotli < "${f}" > "${f}.br"
gzip -f "${f}"
done
exit 0
|
DanRohde/webdavcgi
|
htdocs/views/simple/minify.sh
|
Shell
|
gpl-3.0
| 2,853 |
#!/bin/sh
# -- run in the current working (submission) directory --
if test X$PBS_ENVIRONMENT = XPBS_BATCH; then cd $PBS_O_WORKDIR; fi
# load the required modules
export PATH=/appl/schroedingerFreeMaestro/2013-3:${PATH}
para_testapp
|
marcindulak/accts
|
accts/schrodinger/FreeMaestro-2013-3/para_testapp.sh
|
Shell
|
gpl-3.0
| 236 |
#!/bin/bash
wget -O musmus.fa.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/M_musculus/protein/protein.fa.gz
wget -O homsap.fa.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/H_sapiens/protein/protein.fa.gz
wget -O danrer.fa.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/D_rerio/protein/protein.fa.gz
wget -O taegut.fa.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/Taeniopygia_guttata/protein/protein.fa.gz
wget -O galgal.fa.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/Gallus_gallus/protein/protein.fa.gz
wget -O melgal.fa.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/Meleagris_gallopavo/protein/protein.fa.gz
wget -O ficalb.fa.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/Ficedula_albicollis/protein/protein.fa.gz
wget -O strcam.fa.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/Struthio_camelus_australis/protein/protein.fa.gz
wget -O tingut.fa.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/Tinamus_guttatus/protein/protein.fa.gz
wget -O anocar.fa.gz ftp://ftp.ncbi.nlm.nih.gov/genomes/Anolis_carolinensis/protein/protein.fa.gz
cat *.fa.gz > maker_protevi_othersp.fa.gz
gunzip maker_protevi_othersp.fa.gz
rm *.fa.gz
|
tsackton/ratite-genomics
|
02_annotation/maker/run1/get_proteins.sh
|
Shell
|
gpl-3.0
| 1,051 |
#!/bin/bash
## service_check.sh
## version 1.5.1
## Written by Tyler Francis on 2016-09-27 for Jelec USA.
## A list of everyone you want to email when a problem occurs, separated by spaces.
alertEmail="[email protected]"
alertSMS="[email protected]"
## How often do you want to receive emails about the same event? Measured in seconds
frequency=3600
## remove the old log, since it was probably already emailed out.
## the leading slash makes sure no goofy aliases of rm are being used.
\rm -f service_check.email
\rm -f service_check.sms
## identify this script in emails,
## in case you stop liking it in a few years and want to change or disable it.
SCRIPTPATH=`pwd -P`/service_check.sh
hostname=`hostname`
function checkNxfilter {
## Check the machine in question to make sure NxFilter is running
## Do this by SSH-ing in just long enough to ask systemd for a 0 (no error, AKA success) or 1 (error)
## Since NxFilter does some DNS work for us, I can't trust hostname resolution, so I'll use the IP instead.
if ssh $3@$2 -o ConnectTimeout=10 -o BatchMode=yes "systemctl is-active nxfilter.service" ; then
echo "NxFilter seems fine on $1." >> service_check.email
else
## section header for email
echo "Problem found with Nx Filter on $1." >> service_check.email
echo "Problem found with Nx Filter on $1." >> service_check.sms
echo "`\date +%Y/%m/%d\ %H:%M`" >> service_check.email
echo "________" >> service_check.email
## Let's make sure I can actually see the server, and use that to compose the body of this section.
if ping -c 4 $2 | grep ", 0% packet loss" ; then
printf "The server $1 seems to be on, resolving to its expected address of $2, and responding to pings, but either SSH or Nx Filter is not running.\n\nYou should reboot the whole server." >> service_check.email
else
printf "I can't reach $1, so I can't tell if Nx Filter is running; let's assume it isn't.\n\nYou should either reboot the whole server, or SSH in and run\nsudo systemctl start nxfilter.service\n\nYou can also run systemctl status nxfilter.service to see a few log lines and figure out what broke." >> service_check.email
fi
fi
## make some room at the end of a section, to make the text file more human-friendly
echo "" >> service_check.email
echo "" >> service_check.email
echo "" >> service_check.email
echo "" >> service_check.email
echo "" >> service_check.email
echo "" >> service_check.email
echo "" >> service_check.email
}
## now that the function has been defined, run it using the following three arguments each separated by a single space.
## Function # hostname # ip address # username
checkNxfilter mahserver 123.456.7.8 username
checkNxfilter urserver 987.6.54.321 username
## argument $1 $2 $3
echo "If this email is wrong or wasting your attention, feel free to make edits at $SCRIPTPATH on $hostname" >> service_check.email
## now use that text file I was creating during each running of the function as the body of an email.
## but only if an actual error was found.
if cat service_check.email | grep "Problem found" ; then
## To make sure I'm not inundated with email or sms notifications,
## only send a notification at the frequency specified at the beginning of this script.
if test -e "emailFrequency.txt" ; then
## This file exists, meaning this is an ongoing problem.
read first < emailFrequency.txt
now=`\date +%s`
if [ $(($now - $first)) -gt $frequency ] ; then
## if this is true, then the specified time has elapsed, and another email should be sent.
cat service_check.email | mail -s "Error: Some important services aren't running" $alertEmail
cat service_check.sms | mail $alertSMS
## Now reset the counter, so another notification can be sent an hour or whenever from now.
echo `\date +%s` > emailFrequency.txt
else
echo "wait longer"
fi
else
## this is the first occurrence of this issue. Start a counter and email the humans.
echo `\date +%s` > emailFrequency.txt
cat service_check.email | mail -s "Error: Some important services aren't running" $alertEmail
cat service_check.sms | mail $alertSMS
fi
else
## This bit only runs if none of the above servers failed their checks,
## which means either the previous problem was fixed, or there was no previous problem. Let's find out which of those is true.
if test -e "emailFrequency.txt" ; then
## If there is no problem now, but the last time this script ran there was a problem, then a problem has been solved.
echo "Success: all services monitored by service_check.sh have been restored" | mail -s "Success: all services monitored by service_check.sh have been restored" $alertEmail
echo "Success: all services monitored by service_check.sh have been restored" | mail $alertSMS
fi
## Remove the email frequency counter.
\rm -f emailFrequency.txt
fi
## on Debian, I set up the mailer with
## apt install exim4-daemon-light mailutils && dpkg-reconfigure exim4-config
## now I can pipe things to "mail" and it works great.
##
## I configured exim4 to send mail via Gmail's SMTP servers
## I then set up a Google Voice account, which lets me send SMS messages via email.
## Once you set up a Google Voice number, by default, GV will send you an email when you receive an SMS
## Send a text from your cell to your new GV number, and you'll get an email from an address that looks like
## [your gv number].[number that just sent an SMS].[seemingly random characters]@txt.voice.google.com
## any email you send to that address from your Gmail account will be received as an SMS on your cell.
|
Linkz57/r-shell-scripts
|
automation/service_check.sh
|
Shell
|
gpl-3.0
| 5,603 |
:
# $APPASERVER_HOME/utility/graphviz_render.sh
# ---------------------------------------------
# Freely available software: see Appaserver.org
# ---------------------------------------------
if [ "$#" -ne 1 ]
then
echo "Usage: $0 filename_label" 1>&2
exit 1
fi
filename_label=$1
dot -Tps ${filename_label}.gv -o ${filename_label}.ps
#ps2pdf ${filename_label}.ps ${filename_label}.pdf
exit 0
|
timhriley/appaserver
|
utility/graphviz_render.sh
|
Shell
|
gpl-3.0
| 399 |
# gs -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -dNOPAUSE -dQUIET -dBATCH -sOutputFile=output.pdf input.pdf
# gs -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -dNOPAUSE -dQUIET -dBATCH -sOutputFile=capg1_Auxiliar_Administrativo-compressed.pdf capg1_Auxiliar_Administrativo.pdf
# capg1_Auxiliar_Administrativo.pdf
# http://www.ubuntugeek.com/ubuntu-tiphowto-reduce-adobe-acrobat-file-size-from-command-line.html
# convert dragon.gif -resize 120x120\> shrink_dragon.gif
# convert dragon.gif -size 120x120\> shrink_dragon.gif
# online url to pdf: https://docs.zone/web-to-pdf
# http://www.pdfaid.com/html2pdf.aspx
for pdf in *.pdf
do
echo "Processing $pdf"
mv "$pdf" "orig_$pdf"
gs -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -dNOPAUSE -dQUIET -dBATCH -sOutputFile="$pdf" "orig_$pdf"
rm "orig_$pdf"
done
|
MTEySS/concursos
|
utils/compress_pdf.sh
|
Shell
|
gpl-3.0
| 878 |
#!/bin/bash
#SBATCH --job-name=fds_cylinder_change_rot_finer_reso
#SBATCH --workdir=/scratch/niangxiu/fds/apps/
#SBATCH --output=out
#SBATCH --error=out
#SBATCH --nodes=4
#SBATCH --ntasks-per-node=16
#export PYTHONPATH=$PYTHONPATH:/master/home/niangxiu/.local/lib/python3.5/site-packages
export PYTHONPATH=/master/home/niangxiu/.local/lib/python3.5/site-packages
python3 charles.py
#_#SBATCH --nodelist=node8
|
qiqi/fds
|
apps/charles_cylinder3D/main/parallel_job.sh
|
Shell
|
gpl-3.0
| 411 |
#!/bin/sh
. ../meta.sh
# parameters
dir="beta error_lfmm register_lfmm U V data_lfmm lfmm"
for i in $dir
do
cd test_$i
echo "$BLUE ###### file: test_$i ######"
sh test_file.sh
cd ..
done
|
frichote/replop
|
CL_code/code/test/LFMM/test_dir.sh
|
Shell
|
gpl-3.0
| 195 |
#!/bin/bash
zypper addrepo -f http://packman.inode.at/suse/openSUSE_Leap_42.1/ packman
zypper addrepo -f http://download.opensuse.org/distribution/leap/42.1/repo/oss/ oss
zypper addrepo -f http://download.opensuse.org/distribution/leap/42.1/repo/non-oss/ non-oss
zypper addrepo -f http://download.opensuse.org/update/leap/42.1/oss/ oss-update
zypper addrepo -f http://download.opensuse.org/update/leap/42.1/non-oss/ non-oss-update
zypper addrepo http://download.opensuse.org/repositories/devel:/languages:/python/openSUSE_Leap_42.1/ devel-python
zypper ref
zypper up
zypper dup --from packman
zypper in --from packman gstreamer-plugins-base gstreamer-plugins-bad gstreamer-plugins-bad-orig-addon gstreamer-plugins-good gstreamer-plugins-ugly gstreamer-plugins-vaapi gstreamer-plugins-libav
zypper in python-regex
rm -rf ~/.cache/gstreamer-1.0
|
jtk1rk/xsubedit
|
scripts/old/openSuSE-install-missing.sh
|
Shell
|
gpl-3.0
| 842 |
#!/bin/bash
# set environmental variables
source ../../bin/setsgenvars.sh
progname=analyse
echo Compiling analyse
echo $SGENFC ${progname}.f90 -o ${progname}.x $SGENFLAG $SGENLIB
$SGENFC ${progname}.f90 -o ${progname}.x $SGENFLAG $SGENLIB
echo Copying executable to surfgen directory
cp ${progname}.x $SGENDIR
echo Done
|
virtualzx-nad/surfgen
|
utilities/analyse/install.sh
|
Shell
|
gpl-3.0
| 325 |
# How do I fix mess created by accidentally untarred files in the current dir?
cd /var/www/html/
/bin/rm -f "$(tar ztf /path/to/file.tar.gz)"
# reference
# http://www.cyberciti.biz/open-source/command-line-hacks/20-unix-command-line-tricks-part-i/
|
cloud-sharknado/scripts
|
system/disk/clear_untar_mess.sh
|
Shell
|
gpl-3.0
| 251 |
#!/bin/sh
# Copyright (C) 2011, 2012, 2013, 2014, 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Inetutils.
#
# GNU Inetutils is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# GNU Inetutils is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see `http://www.gnu.org/licenses/'.
# Written by Simon Josefsson
# Prerequisites:
#
# * Shell: SVR4 Bourne shell, or newer.
#
# * chmod(1), id(1), kill(1), mktemp(1), netstat(8), uname(1).
#
# * Detection of sysctl(8) is made. Availability will
# lead to better test coverage.
#
# * Accessed by launched Inetd:
# /etc/nsswitch.conf, /etc/passwd, /etc/protocols.
#
# OpenBSD uses /etc/services directly, not via /etc/nsswitch.conf.
# FIXME: Better test coverage!
#
# Implemented: anonymous-only in inetd-mode.
#
# Wanted: * standalone-mode
# * underprivileged mode.
#
# FIXME: Resolve the hard coded dependency on anonymous mode.
# Address mapping IPv4-to-IPv6 is not uniform an all platforms,
# thus separately using `tcp4' and `tcp6' for streams in `inetd.conf'.
# FIXME: Once functionality attains, reduce code duplication when
# evaluating partial tests.
set -e
. ./tools.sh
FTP=${FTP:-../ftp/ftp$EXEEXT}
FTPD=${FTPD:-../ftpd/ftpd$EXEEXT}
INETD=${INETD:-../src/inetd$EXEEXT}
TARGET=${TARGET:-127.0.0.1}
TARGET6=${TARGET6:-::1}
TARGET46=${TARGET46:-::ffff:127.0.0.1}
# Extended transmission testing.
# This puts contents into $FTPHOME,
# and is therefore not active by default.
do_transfer=false
test "${TRANSFERTEST+yes}" = "yes" && do_transfer=true
# Portability fix for SVR4
PWD="${PWD:-`pwd`}"
# Acting user and target user
#
USER=`func_id_user`
FTPUSER=${FTPUSER:-ftp}
if [ ! -x $FTP ]; then
echo "No FTP client '$FTP' present. Skipping test" >&2
exit 77
elif [ ! -x $FTPD ]; then
echo "No FTP server '$FTPD' present. Skipping test" >&2
exit 77
elif [ ! -x $INETD ]; then
echo "No inetd superserver '$INETD' present. Skipping test" >&2
exit 77
fi
$need_mktemp || exit_no_mktemp
$need_netstat || exit_no_netstat
# The superserver Inetd puts constraints on any chroot
# when running this script, since it needs to look up
# some basic facts stated in the configuration file.
NSSWITCH=/etc/nsswitch.conf
PASSWD=/etc/passwd
PROTOCOLS=/etc/protocols
# Overrides based on systems.
test `uname -s` = OpenBSD && NSSWITCH=/etc/services
if test ! -r $NSSWITCH || test ! -r $PASSWD \
|| test ! -r $PROTOCOLS; then
cat <<-EOT >&2
The use of the superserver Inetd in this script requires
the availability of "$NSSWITCH", "$PASSWD", and
"$PROTOCOLS". At least one of these is now missing.
Therefore skipping test.
EOT
exit 77
fi
if [ $VERBOSE ]; then
set -x
$FTP --version | $SED '1q'
$FTPD --version | $SED '1q'
$INETD --version | $SED '1q'
fi
if [ `func_id_uid` != 0 ]; then
echo "ftpd needs to run as root" >&2
exit 77
fi
if id "$FTPUSER" > /dev/null; then
:
else
echo "anonymous ftpd needs a '$FTPUSER' user" >&2
exit 77
fi
FTPHOME="`eval echo ~"$FTPUSER"`"
if test ! -d "$FTPHOME"; then
save_IFS="$IFS"
IFS=:
set -- `$GREP "^$FTPUSER:" /etc/passwd` # Existence is known as above.
IFS="$save_IFS"
if test -d "$6"; then
FTPHOME="$6"
elif test -d "$5"; then # In cases where GECOS is empty
FTPHOME="$5"
else
echo "The user '$FTPUSER' must have a home directory." >&2
exit 77
fi
fi
if test -d "$FTPHOME" && test -r "$FTPHOME" && test -x "$FTPHOME"; then
: # We have full access to anonymous' home directory.
else
echo "Insufficient access for $FTPUSER's home directory." >&2
exit 77
fi
# Try common subdirectories for writability.
# Result is in DLDIR, usable in chrooted setting.
# Assigns an empty value when no writable candidate
# was found.
if test -z "$DLDIR"; then
# We depend on detection of a usable subdirectory,
# so reactivate this sub-test only with DLDIR known
# to be functional.
do_transfer=false
for DLDIR in /pub /download /downloads /dl /tmp / ; do
test -d $FTPHOME$DLDIR || continue
set -- `ls -ld $FTPHOME$DLDIR`
# Check owner.
test "$3" = $FTPUSER || continue
# Check for write access.
test `expr $1 : 'drwx'` -eq 4 && do_transfer=true && break
DLDIR= # Reset failed value
done
test x"$DLDIR" = x"/" && DLDIR=
fi
# Exit with a hard error, should transfer test be requested,
# but a suitable subdirectory be missing.
if test "${TRANSFERTEST+yes}" = "yes" && \
test $do_transfer = false
then
cat >&2 <<-END
There is no writable subdirectory for transfer test.
Aborting FTP test completely.
END
exit 99
fi
# Note that inetd changes directory to / when --debug is not given so
# all paths must be absolute for things to work.
TMPDIR=`$MKTEMP -d $PWD/tmp.XXXXXXXXXX` ||
{
echo 'Failed at creating test directory. Aborting.' >&2
exit 1
}
posttesting () {
test -n "$TMPDIR" && test -f "$TMPDIR/inetd.pid" \
&& test -r "$TMPDIR/inetd.pid" \
&& { kill "`cat $TMPDIR/inetd.pid`" \
|| kill -9 "`cat $TMPDIR/inetd.pid`"; }
test -n "$TMPDIR" && test -d "$TMPDIR" && rm -rf "$TMPDIR"
$do_transfer && test -n "$FTPHOME" \
&& test -f "$FTPHOME$DLDIR/$PUTME" && rm -f "$FTPHOME$DLDIR/$PUTME" \
|| true
}
trap posttesting 0 1 2 3 15
# locate_port port
#
# Test for IPv4 as well as for IPv6.
locate_port () {
if [ "`uname -s`" = "SunOS" ]; then
$NETSTAT -na -finet -finet6 -Ptcp |
$GREP "\.$1[^0-9]" >/dev/null 2>&1
else
$NETSTAT -na |
$GREP "^$2[46]\{0,2\}.*[^0-9]$1[^0-9]" >/dev/null 2>&1
fi
}
# Files used in transmission tests.
GETME=`$MKTEMP $TMPDIR/file.XXXXXXXX` || do_transfer=false
test -n "$GETME" && GETME=`expr "$GETME" : "$TMPDIR/\(.*\)"`
PUTME=putme.$GETME
# Find an available port number. There will be some
# room left for a race condition, but we try to be
# flexible enough for running copies of this script.
#
if test -z "$PORT"; then
for PORT in 4711 4713 4717 4725 4741 4773 none; do
test $PORT = none && break
if locate_port $PORT; then
continue
else
break
fi
done
if test "$PORT" = 'none'; then
echo 'Our port allocation failed. Skipping test.' >&2
exit 77
fi
fi
cat <<EOT > "$TMPDIR/inetd.conf"
$PORT stream tcp4 nowait $USER $PWD/$FTPD ftpd -A -l
EOT
test "$TEST_IPV6" = "no" ||
cat <<EOT >> "$TMPDIR/inetd.conf"
$PORT stream tcp6 nowait $USER $PWD/$FTPD ftpd -A -l
EOT
if test $? -ne 0; then
echo 'Failed at writing configuration for Inetd. Skipping test.' >&2
exit 77
fi
cat <<EOT > "$TMPDIR/.netrc"
machine $TARGET login $FTPUSER password foobar
EOT
if test $? -ne 0; then
echo 'Failed at writing access file ".netrc". Skipping test.' >&2
exit 77
fi
if test "$TEST_IPV6" != "no"; then
cat <<-EOT >> "$TMPDIR/.netrc"
machine $TARGET6 login $FTPUSER password foobar
machine $TARGET46 login $FTPUSER password foobar
EOT
fi
chmod 600 "$TMPDIR/.netrc"
# Some simple, but variable content.
ls -l > "$TMPDIR/$GETME"
$INETD --pidfile="$TMPDIR/inetd.pid" "$TMPDIR/inetd.conf" ||
{
echo 'Not able to start Inetd. Skipping test.' >&2
exit 1
}
# Wait for inetd to write pid and open socket
sleep 2
test -r "$TMPDIR/inetd.pid" ||
{
cat <<-EOT >&2
Inetd could not write a PID-file, but did claim a start.
This is a serious problem. Doing an emergency abort,
without possibility of killing the Inetd-process.
EOT
exit 1
}
# Test evaluation helper
#
# test_report errno output_file hint_msg
#
test_report () {
test -z "${VERBOSE}" || cat "$2"
if [ $1 != 0 ]; then
echo "Running '$FTP' failed with errno $1." >&2
exit 1
fi
# Did we get access?
if $GREP 'Login failed' "$2" >/dev/null 2>&1; then
echo "Failed login for access using '$3' FTP client." >&2
exit 1
fi
# Standing control connection?
if $GREP 'FTP server status' "$2" >/dev/null 2>&1; then
:
else
echo "Cannot find server status for '$3' FTP client?" >&2
exit 1
fi
# Was data transfer successful?
if $GREP '226 Transfer complete.' "$2" >/dev/null 2>&1; then
:
else
echo "Cannot find transfer result for '$3' FTP client?" >&2
exit 1
fi
}
# Test a passive connection: PASV and IPv4.
#
echo "PASV to $TARGET (IPv4) using inetd."
cat <<STOP |
rstatus
dir
`$do_transfer && test -n "$DLDIR" && echo "\
cd $DLDIR"`
`$do_transfer && echo "\
lcd $TMPDIR
image
put $GETME $PUTME"`
STOP
HOME=$TMPDIR $FTP "$TARGET" $PORT -4 -v -p -t >$TMPDIR/ftp.stdout 2>&1
test_report $? "$TMPDIR/ftp.stdout" "PASV/$TARGET"
$do_transfer && \
if cmp -s "$TMPDIR/$GETME" "$FTPHOME$DLDIR/$PUTME"; then
test "${VERBOSE+yes}" && echo >&2 'Binary transfer succeeded.'
date "+%s" >> "$TMPDIR/$GETME"
else
echo >&2 'Binary transfer failed.'
exit 1
fi
# Test an active connection: PORT and IPv4.
#
echo "PORT to $TARGET (IPv4) using inetd."
cat <<STOP |
rstatus
dir
`$do_transfer && test -n "$DLDIR" && echo "\
cd $DLDIR"`
`$do_transfer && echo "\
lcd $TMPDIR
image
put $GETME $PUTME"`
STOP
HOME=$TMPDIR $FTP "$TARGET" $PORT -4 -v -t >$TMPDIR/ftp.stdout 2>&1
test_report $? "$TMPDIR/ftp.stdout" "PORT/$TARGET"
$do_transfer && \
if cmp -s "$TMPDIR/$GETME" "$FTPHOME$DLDIR/$PUTME"; then
test "${VERBOSE+yes}" && echo >&2 'Binary transfer succeeded.'
date "+%s" >> "$TMPDIR/$GETME"
else
echo >&2 'Binary transfer failed.'
exit 1
fi
# Test a passive connection: EPSV and IPv4.
#
echo "EPSV to $TARGET (IPv4) using inetd."
cat <<STOP |
rstatus
epsv4
dir
STOP
HOME=$TMPDIR $FTP "$TARGET" $PORT -4 -v -p -t >$TMPDIR/ftp.stdout 2>&1
test_report $? "$TMPDIR/ftp.stdout" "EPSV/$TARGET"
# Test a passive connection: EPSV and IPv4.
#
# Set NETRC in environment to regulate login.
#
echo "EPSV to $TARGET (IPv4) using inetd, setting NETRC."
cat <<STOP |
rstatus
epsv4
dir
`$do_transfer && test -n "$DLDIR" && echo "\
cd $DLDIR"`
`$do_transfer && echo "\
lcd $TMPDIR
image
put $GETME $PUTME"`
STOP
NETRC=$TMPDIR/.netrc \
$FTP "$TARGET" $PORT -4 -v -p -t >$TMPDIR/ftp.stdout 2>&1
test_report $? "$TMPDIR/ftp.stdout" "EPSV/$TARGET with NETRC"
$do_transfer && \
if cmp -s "$TMPDIR/$GETME" "$FTPHOME$DLDIR/$PUTME"; then
test "${VERBOSE+yes}" && echo >&2 'Binary transfer succeeded.'
date "+%s" >> "$TMPDIR/$GETME"
else
echo >&2 'Binary transfer failed.'
exit 1
fi
# Test an active connection: EPRT and IPv4.
#
echo "EPRT to $TARGET (IPv4) using inetd."
cat <<STOP |
rstatus
epsv4
dir
`$do_transfer && test -n "$DLDIR" && echo "\
cd $DLDIR"`
`$do_transfer && echo "\
lcd $TMPDIR
image
put $GETME $PUTME"`
STOP
HOME=$TMPDIR $FTP "$TARGET" $PORT -4 -v -t >$TMPDIR/ftp.stdout 2>&1
test_report $? "$TMPDIR/ftp.stdout" "EPRT/$TARGET"
$do_transfer && \
if cmp -s "$TMPDIR/$GETME" "$FTPHOME$DLDIR/$PUTME"; then
test "${VERBOSE+yes}" && echo >&2 'Binary transfer succeeded.'
date "+%s" >> "$TMPDIR/$GETME"
else
echo >&2 'Binary transfer failed.'
exit 1
fi
# Test an active connection: EPRT and IPv4.
#
# Use `-N' to set location of .netrc file.
#
echo "EPRT to $TARGET (IPv4) using inetd, apply the switch -N."
cat <<STOP |
rstatus
epsv4
dir
STOP
$FTP "$TARGET" $PORT -N"$TMPDIR/.netrc" -4 -v -t >$TMPDIR/ftp.stdout 2>&1
test_report $? "$TMPDIR/ftp.stdout" "EPRT/$TARGET"
if test "$TEST_IPV6" != "no" && test -n "$TARGET6"; then
# Test a passive connection: EPSV and IPv6.
#
echo "EPSV to $TARGET6 (IPv6) using inetd."
cat <<-STOP |
rstatus
dir
STOP
HOME=$TMPDIR $FTP "$TARGET6" $PORT -6 -v -p -t >$TMPDIR/ftp.stdout 2>&1
test_report $? "$TMPDIR/ftp.stdout" "EPSV/$TARGET6"
# Test an active connection: EPRT and IPv6.
#
echo "EPRT to $TARGET6 (IPv6) using inetd."
cat <<-STOP |
rstatus
dir
`$do_transfer && test -n "$DLDIR" && echo "\
cd $DLDIR"`
`$do_transfer && echo "\
lcd $TMPDIR
image
put $GETME $PUTME"`
STOP
HOME=$TMPDIR $FTP "$TARGET6" $PORT -6 -v -t >$TMPDIR/ftp.stdout 2>&1
test_report $? "$TMPDIR/ftp.stdout" "EPRT/$TARGET6"
$do_transfer && \
if cmp -s "$TMPDIR/$GETME" "$FTPHOME$DLDIR/$PUTME"; then
test "${VERBOSE+yes}" && echo >&2 'Binary transfer succeeded.'
date "+%s" >> "$TMPDIR/$GETME"
else
echo >&2 'Binary transfer failed.'
exit 1
fi
fi # TEST_IPV6
# Availability of IPv4-mapped IPv6 addresses.
#
# These are impossible on OpenBSD, so a flexible test
# is implemented using sysctl(1) as tool.
# Helpers tp catch relevant execution path.
have_sysctl=false
have_address_mapping=false
# Known exceptions:
#
# OpenSolaris is known to allow address mapping
test `uname -s` = 'SunOS' && have_address_mapping=true
if $have_address_mapping; then
:
else
# Do we have sysctl(1) available?
if sysctl -a >/dev/null 2>&1; then
have_sysctl=true
else
echo "Warning: Not testing IPv4-mapped addresses." >&2
fi
fi
if $have_address_mapping; then
:
elif $have_sysctl; then
# Extract the present setting of
#
# net.ipv6.bindv6only (Linux)
# or
# net.inet6.ip6.v6only (BSD).
#
value_v6only=`sysctl -a 2>/dev/null | $GREP v6only`
if test -n "$value_v6only"; then
value_v6only=`echo $value_v6only | $SED 's/^.*[=:] *//'`
if test "$value_v6only" -eq 0; then
# This is the good value. Keep it.
have_address_mapping=true
else
echo "Warning: Address mapping IPv4-to-Ipv6 is disabled." >&2
# Set a non-zero value for later testing.
value_v6only=2
fi
else
# Simulate a non-mapping answer in cases where "v6only" missed.
value_v6only=2
fi
fi
# Test functionality of IPv4-mapped IPv6 addresses.
#
if $have_address_mapping && test -n "$TARGET46" &&
test "$TEST_IPV6" != "no"; then
# Test a passive connection: EPSV and IPv4-mapped-IPv6.
#
echo "EPSV to $TARGET46 (IPv4-as-IPv6) using inetd."
cat <<-STOP |
rstatus
dir
`$do_transfer && test -n "$DLDIR" && echo "\
cd $DLDIR"`
`$do_transfer && echo "\
lcd $TMPDIR
image
put $GETME $PUTME"`
STOP
HOME=$TMPDIR $FTP "$TARGET46" $PORT -6 -v -p -t >$TMPDIR/ftp.stdout 2>&1
test_report $? "$TMPDIR/ftp.stdout" "EPSV/$TARGET46"
$do_transfer && \
if cmp -s "$TMPDIR/$GETME" "$FTPHOME$DLDIR/$PUTME"; then
test "${VERBOSE+yes}" && echo >&2 'Binary transfer succeeded.'
date "+%s" >> "$TMPDIR/$GETME"
else
echo >&2 'Binary transfer failed.'
exit 1
fi
# Test an active connection: EPRT and IPvIPv6.
#
echo "EPRT to $TARGET46 (IPv4-as-IPv6) using inetd."
cat <<-STOP |
rstatus
dir
`$do_transfer && test -n "$DLDIR" && echo "\
cd $DLDIR"`
`$do_transfer && echo "\
lcd $TMPDIR
image
put $GETME $PUTME"`
STOP
HOME=$TMPDIR $FTP "$TARGET46" $PORT -6 -v -t >$TMPDIR/ftp.stdout 2>&1
test_report $? "$TMPDIR/ftp.stdout" "EPRT/$TARGET46"
$do_transfer && \
if cmp -s "$TMPDIR/$GETME" "$FTPHOME$DLDIR/$PUTME"; then
test "${VERBOSE+yes}" && echo >&2 'Binary transfer succeeded.'
else
echo >&2 'Binary transfer failed.'
exit 1
fi
else
# The IPv4-as-IPv6 tests were not performed.
echo 'Skipping two tests of IPv4 mapped as IPv6.'
fi # have_address_mapping && TEST_IPV6
# Test name mapping with PASV and IPv4.
# Needs a writable destination!
#
if $do_transfer; then
echo "Name mapping test at $TARGET (IPv4) using inetd."
cat <<-STOP |
`test -z "$DLDIR" || echo "cd $DLDIR"`
lcd $TMPDIR
image
nmap \$1.\$2 \$2.\$1
put $GETME
nmap \$1.\$2.\$3 [\$3,copy].\$1.\$2
put $GETME
STOP
HOME=$TMPDIR $FTP "$TARGET" $PORT -4 -v -p -t >$TMPDIR/ftp.stdout 2>&1
sIFS=$IFS
IFS=.
set -- $GETME
IFS=$sIFS
# Are the expected file copies present?
if test -s $FTPHOME$DLDIR/$2.$1 && \
test -s $FTPHOME$DLDIR/copy.$GETME
then
test "${VERBOSE+yes}" && echo >&2 'Name mapping succeeded.'
rm -f $FTPHOME$DLDIR/$2.$1 $FTPHOME$DLDIR/copy.$GETME
else
echo >&2 'Binary transfer failed.'
test -s $FTPHOME$DLDIR/$2.$1 || \
echo >&2 'Mapping "nmap $1.$2 $2.$1" failed.'
test -s $FTPHOME$DLDIR/copy.$GETME || \
echo >&2 'Mapping "nmap $1.$2.$3 [$3,copy].$1.$2" failed.'
rm -f $FTPHOME$DLDIR/$2.$1 $FTPHOME$DLDIR/copy.$GETME
exit 1
fi
fi
exit 0
|
CodeResearch/inetutil-syn
|
tests/ftp-localhost.sh
|
Shell
|
gpl-3.0
| 16,504 |
#!/bin/bash
#--
# Create mysql dump. Abort if error.
#
# @param save_path
# @param options
# @global MYSQL_CONN mysql connection string "-h DBHOST -u DBUSER -pDBPASS DBNAME"
# @abort
# shellcheck disable=SC2086
#--
function _mysql_dump {
test -z "$MYSQL_CONN" && _abort "mysql connection string MYSQL_CONN is empty"
echo "mysqldump ... $2 > $1"
SECONDS=0
nice -n 10 ionice -c2 -n 7 mysqldump --single-transaction --quick $MYSQL_CONN $2 > "$1" || _abort "mysqldump ... $2 > $1 failed"
echo "$((SECONDS / 60)) minutes and $((SECONDS % 60)) seconds elapsed."
if ! test -f "$1"; then
_abort "no such dump [$1]"
fi
if test -z "$(tail -1 "$1" | grep "Dump completed")"; then
_abort "invalid mysql dump [$1]"
fi
}
|
RolandKujundzic/rkscript
|
src/mysql_dump.sh
|
Shell
|
gpl-3.0
| 725 |
sudo pip3 install kombu
sudo apt-get install rabbitmq-server
sudo rabbitmqctl add_user user 123456
sudo rabbitmqctl add_vhost host
sudo rabbitmqctl set_permissions -p host user '.*' '.*' '.*'
|
lexdene/learn-kombu
|
setup.sh
|
Shell
|
gpl-3.0
| 192 |
#!/bin/bash
# This script make the project and execute the test
#
#
# Dynamic Library PATH Check
#
if [ ! `echo $LD_LIBRARY_PATH | grep MATLAB` ] ; then
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/MATLAB/R2017a/bin/glnxa64
fi
# Make the program
make
#
# Part of I/O file name variables
#
# Single run with given CHID
CHID='Facility_008'
indir=/home/tmp/fdscov
outdir=/disk/fdsmat/Facility/
if ! test -d $outdir ; then
mkdir -p $outdir
fi
allfiles=`ls $indir/$CHID/*.bin`
outfilename=$outdir/$CHID.mat
for file in $allfiles
do
infilename=$file
bin/fds2mat << ieof
$infilename
$outfilename
ieof
done
|
accordionwb/Develop
|
matfile/easymake.sh
|
Shell
|
gpl-3.0
| 623 |
#!/bin/bash
set -e
set -u
#trap "echo 'INT signal received'" INT
#trap "echo 'TERM signal received'" TERM
cd `dirname $0`
source ./util.sh
source ./tengine_func.sh
log "PID is $$"
log "CMD is $0 $@"
eval "`parse_argument_and_set_variable config_file virtual_server_names versions tengine_reload dynamic_refresh_post_data dynamic_refresh_url refresh_method func`"
ensure_not_empty func="$func"
if [ "$func"x != "dynamic_refresh_config"x ];then
ensure_not_empty config_file="$config_file" virtual_server_names="$virtual_server_names"
ensure_not_empty versions="$versions" func="$func" tengine_reload="$tengine_reload"
else
ensure_not_empty refresh_method="$refresh_method" dynamic_refresh_post_data="$dynamic_refresh_post_data" dynamic_refresh_url="$dynamic_refresh_url"
fi
$func $@
|
leonindy/camel
|
camel-agent/src/main/resources/script/tengine.sh
|
Shell
|
gpl-3.0
| 791 |
#!/bin/sh
exec /usr/bin/mono --verify-all /usr/lib/mhed/mhed.exe "$@"
|
xvitaly/mhed
|
packaging/linux/mhed.sh
|
Shell
|
gpl-3.0
| 71 |
#!/bin/bash
# Autor: Robson Vaamonde
# Site: www.procedimentosemti.com.br
# Facebook: facebook.com/ProcedimentosEmTI
# Facebook: facebook.com/BoraParaPratica
# YouTube: youtube.com/BoraParaPratica
# Data de criação: 30/11/2016
# Data de atualização: 30/11/2016
# Versão: 0.5
# Testado e homologado para a versão do Ubuntu Server 16.04 LTS x64
# Kernel >= 4.4.x
#
# Criando as váriaveis de validação do diretório
LOG="/var/log/samba/log.pesquisa_arquivos"
DIRETORIO="/arquivos/pti.intra/gestao"
cd $DIRETORIO
LOCAL="`pwd`"
#Fazendo o teste lógico para a localização de arquivos
if [ $DIRETORIO == $LOCAL ]; then
echo -e "Localizando arquivos indevidos na Pasta: $DIRETORIO em: `date`" > $LOG
echo >> $LOG
find -type f -print0 | xargs -0 file -s | egrep ‘(Audio file|video|executable)’ >> $LOG
echo >> $LOG
echo -e "Finalização da localização de arquivos feita com sucesso!!!" >> $LOG
else
echo -e "Diretório $DIRETORIO inexistente, verificar as configurações da váriavél de ambiente, localização dos arquivos, etc" >> $LOG
fi
|
vaamonde/samba4-l2
|
conf/find_files.sh
|
Shell
|
gpl-3.0
| 1,062 |
#!/bin/bash
source $scpt_path/_loadconf.sh
source $scpt_path/_utils.sh
echo "---------------------------------------------"
echo "DOING THE BACKUP"
echo "---------------------------------------------"
if [ "$repo_name" == "" ] ; then
repo_name="${1}"
fi
if [ "$repo_name" == "" ] ; then
echo "Repository name not informed..."
else
repo_target=$(cat $repo_list |grep "${repo_name}:" |cut -d":" -f2)
if [ "$repo_target" == "" ] ; then
echo "Repository '$repo_name' not configured on '$repo_list' config file. Do it, and try again."
echo " "
echo "Current registered data on config file:"
cat $repo_list
else
if ! [ -e ${repo_path} ]; then
action=$(getOption "yn" "Borg Repository '$repo_path' not found, create? (y/n)")
if [ "$action" == "y" ] ; then
$borg_exe init ${repo_path}
fi
fi
if [ -r "${repo_target}" ] ; then
tag="TAG-$(date '+%Y%m%d%H%M%S')"
$borg_exe create -v --stats --progress ${repo_path}::${tag} "${repo_target}"
else
echo "Target directory '${repo_target}' does not exist or I dont have read access..."
fi
fi
fi
|
neogodo/borgshell
|
_dobackup.sh
|
Shell
|
gpl-3.0
| 1,092 |
./build-all-infrastructure.sh
./build-all-microservices.sh
|
psachdev6375/microservices-bootcamp
|
build-all.sh
|
Shell
|
gpl-3.0
| 59 |
USER=root
PASS=
DBNAME=ertheia
DBHOST=localhost
|
pantelis60/L2Scripts_Underground
|
dist/authserver/sql/mysql_settings.sh
|
Shell
|
gpl-3.0
| 48 |
#! /bin/sh
# Show additional screen on VGA
xrandr --output DP-1 --mode 1024x768 --left-of eDP-1
# Start presentation with length of 15 minutes and make the last 2 minutes alarming
pdfpc -d 15 -l 2 06_PrimenaMasinskogUcenjaUStatickojVerifikacijiSoftveraMicovicRankovicStegic.pdf
|
uros-stegic/msnr-projekat
|
prezentacija/start.sh
|
Shell
|
gpl-3.0
| 281 |
tests_path="`pwd`/tests"
# get all test cpp files
compile_list=`ls -l $tests_path | grep ^- | awk '{split($0,arr," "); print arr[9];}'`
for x in $compile_list; do
# check test-shell source files
if [ "$x" != "test_shell.cpp" ]; then
is_source_file=`echo $x | grep -E "(cpp|c)$"`
if test -n "$is_source_file"; then
# check to execute about *.dep file
filename=`echo $x | awk '{split($0,arr,"."); print arr[1];}'`
dep_file_path=$tests_path"/"$filename".dep"
if test -e "$dep_file_path"; then
# TODO : 이곳에 *.dep 파일을 해석한 내용을 입력한다
source $dep_file_path
for i in ${!HEADERS[*]}; do
HEADERS[$i]="-I"$tests_path"/"${HEADERS[$i]}
done
for i in ${!SOURCES[*]}; do
SOURCES[$i]=$tests_path"/"${SOURCES[$i]}
done
# build and execute
build_command="g++ -coverage -O0 -o $tests_path/tests ${HEADERS[@]} ${SOURCES[@]}"
build_command="$build_command $tests_path/test_shell.cpp $tests_path/$x -lgcov -lpthread"
exec_command="$tests_path/tests"
echo $build_command
echo $exec_command
`$build_command`
`$exec_command`
for i in ${!SOURCES[*]}; do
`gcov ${SOURCES[$i]}`
done
#result=$?
#if [ "$result" != "0" ]; then
# echo "$x($result): exit $result"
# exit $result
#fi
else
# TODO : 이곳에 *.dep 파일을 발견하지 못했다는 오류를 출력한다.
echo "Not found file about $dep_file_path"
fi
fi
fi
done
|
IWillFindYou/SecurityBootManager
|
.travis.sh
|
Shell
|
gpl-3.0
| 1,608 |
#!/bin/sh
# Create a virtualenv for working with plainbox.
#
# This ensures that 'plainbox' command exists and is in PATH and that the
# plainbox module is correctly located can be imported.
#
# This is how Zygmunt Krynicki works, feel free to use or adjust to your needs
VENV_PATH=
install_missing=0
# Parse arguments:
while [ -n "$1" ]; do
case "$1" in
--help|-h)
echo "Usage: mk-venv.sh [LOCATION]"
echo ""
echo "Create a virtualenv for working with plainbox in LOCATION"
exit 0
;;
--install-missing)
install_missing=1
shift
;;
*)
if [ -z "$VENV_PATH" ]; then
VENV_PATH="$1"
shift
else
echo "Error: too many arguments: '$1'"
exit 1
fi
;;
esac
done
# Apply defaults to arguments without values
if [ -z "$VENV_PATH" ]; then
# Use sensible defaults for vagrant
if [ "$LOGNAME" = "vagrant" ]; then
VENV_PATH=/tmp/venv
else
VENV_PATH=/ramdisk/venv
fi
fi
# Do a sanity check on lsb_release that is missing on Fedora the last time I
# had a look at it.
if [ "x$(which lsb_release)" = "x" ]; then
echo "This script requires the 'lsb_release' command"
exit 1
fi
# The code below is a mixture of Debian/Ubuntu packages and pypi packages.
# It is designed to work on Ubuntu 12.04 or later.
# There are _some_ differences between how each release is handled.
#
# Non Ubuntu systems are not tested as they don't have the required checkbox
# package. Debian might be supported once we have JobBox and stuff like Fedora
# would need a whole new approach but patches are welcome [CLA required]
if [ "$(lsb_release --short --id)" != "Ubuntu" ] && [ $(lsb_release --short --id --upstream) != "Ubuntu" ]; then
echo "Only Ubuntu is supported by this script."
echo "If you are interested in using it with your distribution"
echo "then please join us in #ubuntu-quality on freenode"
echo
echo "Alternatively you can use vagrant to develop plainbox"
echo "on any operating system, even Windows ;-)"
echo
echo "See: http://www.vagrantup.com/ for details"
exit 1
fi
# From now on we can assume a Debian-like system
# Do some conditional stuff depending on the particular Ubuntu release
enable_system_site=0
install_coverage=0
install_distribute=0
install_pip=0
# We need:
# python3:
# because that's what plainbox is written in
# python3-dev
# because we may pip-install stuff as well and we want to build native extensions
# python3-pkg-resources:
# because it is used by plainbox to locate files and extension points
# python3-setuptools:
# because it is used by setup.py
# python3-lxml:
# because that's how we validate RealaxNG schemas
# python3-mock:
# because that's what we used to construct some of our tests
# python3-sphinx:
# because that's how we build our documentation
# python-virtualenv:
# because that's how we create the virtualenv to work in
# checkbox:
# because plainbox depends on it as a job provider
required_pkgs_base="python3 python3-dev python3-pkg-resources python3-setuptools python3-lxml python3-mock python3-sphinx python-virtualenv checkbox"
# The defaults, install everything from pip and all the base packages
enable_system_site=1
install_distribute=1
install_pip=1
install_coverage=1
install_requests=1
required_pkgs="$required_pkgs_base"
case "$(lsb_release --short --release)" in
12.04|0.2)
# Ubuntu 12.04, this is the LTS release that we have to support despite
# any difficulties. It has python3.2 and all of our core dependencies
# although some packages are old by 13.04 standards, make sure to be
# careful with testing against older APIs.
;;
12.10)
;;
13.04)
# On Raring we can use the system package for python3-requests
install_distribute=0
install_pip=0
install_requests=0
required_pkgs="$required_pkgs_base python3-requests"
;;
*)
echo "Using this version of Ubuntu for development is not supported"
echo "Unsupported version: $(lsb_release --short --release)"
exit 1
;;
esac
# Check if we can create a virtualenv
if [ ! -d $(dirname $VENV_PATH) ]; then
echo "This script requires $(dirname $VENV_PATH) directory to exist"
echo "You can use different directory by passing it as argument"
echo "For a quick temporary location just pass /tmp/venv"
exit 1
fi
# Check if there's one already there
if [ -d $VENV_PATH ]; then
echo "$VENV_PATH seems to already exist"
exit 1
fi
# Ensure that each required package is installed
for pkg_name in $required_pkgs; do
# Ensure virtualenv is installed
if [ "$(dpkg -s $pkg_name 2>/dev/null | grep '^Status' 2>/dev/null)" != "Status: install ok installed" ]; then
if [ "$install_missing" -eq 1 ]; then
echo "Installing required package: $pkg_name"
sudo apt-get install $pkg_name --yes
else
echo "Required package is not installed: '$pkg_name'"
echo "Either install it manually with:"
echo "$ sudo apt-get install $pkg_name"
echo "Or rerun this script with --install-missing"
exit 1
fi
fi
done
# Create a virtualenv
if [ $enable_system_site -eq 1 ]; then
virtualenv --system-site-packages -p python3 $VENV_PATH
else
virtualenv -p python3 $VENV_PATH
fi
# Activate it to install additional stuff
. $VENV_PATH/bin/activate
# Install / upgrade distribute
if [ $install_distribute -eq 1 ]; then
pip install --upgrade https://github.com/checkbox/external-tarballs/raw/master/pypi/coverage-3.6.tar.gz
fi
# Install / upgrade pip
if [ $install_pip -eq 1 ]; then
pip install --upgrade https://github.com/checkbox/external-tarballs/raw/master/pypi/pip-1.3.1.tar.gz
fi
# Install coverage if required
if [ $install_coverage -eq 1 ]; then
pip install --upgrade https://github.com/checkbox/external-tarballs/raw/master/pypi/coverage-3.6.tar.gz
fi
# Install requests if required
if [ $install_requests -eq 1 ]; then
pip install --upgrade https://github.com/checkbox/external-tarballs/raw/master/pypi/requests-1.1.0.tar.gz
fi
# "develop" plainbox
http_proxy=http://127.0.0.1:9/ python3 setup.py develop
echo "To activate your virtualenv run:"
echo "$ . $VENV_PATH/bin/activate"
|
jds2001/ocp-checkbox
|
plainbox/mk-venv.sh
|
Shell
|
gpl-3.0
| 6,491 |
#!/bin/bash
# 2016.08.20 by M.Tyler
# Create a bunch of partial files
create_part()
{
FILENAME="part_$1_$2"
echo Creating $FILENAME
dd if=$2 of=$FILENAME bs=$1 count=1
}
rm part_*
> empty
ls _*.bmp _*.gif _*.png _*.jpg |
while read line
do
create_part "20" "$line"
create_part "400" "$line"
done
ls -l
|
matyler/mtCellEdit
|
test/mtpixyutils/input/get_part.sh
|
Shell
|
gpl-3.0
| 318 |
#!/bin/bash
# Script to backup a PostgreSQL database running in a Docker container (spawned
# via Docker Compose) using "postgres" image (https://hub.docker.com/_/postgres/).
#
# Usage: backup_dockerized_postgresql_database.sh dockercompose_filepath database_service_name_in_dockercompose_file backup_destination_dirpath
#
# How it works:
# * Give it:
# * The Docker Compose dirpath (id. where to use `docker-compose` command from).
# * The name of the Docker Compose service that corresponds to the PostgreSQL server.
# * Where to create backups.
# * the port under which Where to create backups.
# * And it will:
# * Find the container ID (via `docker-compose ps`).
# * Find user, password and database name from POSTGRES_* environment variables from `docker inspect`.
# * Create, fill and copy to the container a .pgpass file.
# * Run `pg_dump`.
# * Print the path of created backup file.
# * Clean.
#
# Notes:
# * Paths to `docker` and `docker-compose` can be adjusted via DOCKER_CLI_FILEPATH
# and DOCKERCOMPOSE_CLI_FILEPATH environment variables.
set -euo pipefail
readonly PROGNAME="$(basename -- "$0")"
readonly DOCKER_CLI_FILEPATH="$(which docker)"
readonly DOCKERCOMPOSE_CLI_FILEPATH="/usr/local/bin/docker-compose"
# PostgreSQL port
readonly DATABASE_PORT=5432
# Arguments handling
if [ "$#" -lt 3 ] ; then
(>&2 echo "Error: Missing arguments.")
(>&2 echo "Usage: ${PROGNAME} dockercompose_filepath database_service_name_in_dockercompose_file backup_destination_dirpath")
exit 2
fi
DOCKERCOMPOSE_DIRPATH="${1}"
DOCKERCOMPOSE_DATABASE_SERVICENAME="${2}"
BACKUP_DESTINATION_DIRPATH="${3}"
if [ ! -d "${DOCKERCOMPOSE_DIRPATH}" ] ; then
(>&2 echo "Error: Invalid argument: dockercompose_filepath (\"${DOCKERCOMPOSE_DIRPATH}\") is not a directory.")
exit 2
fi
if [ -z "${DOCKERCOMPOSE_DATABASE_SERVICENAME}" ] ; then
(>&2 echo "Error: Invalid argument: database_service_name_in_dockercompose_file is empty")
exit 2
fi
if [ ! -d "${BACKUP_DESTINATION_DIRPATH}" ] ; then
(>&2 echo "Error: Invalid argument: backup_destination_dirpath (\"${BACKUP_DESTINATION_DIRPATH}\") is not a directory.")
exit 2
fi
# /Arguments handling
# Move into Docker Compose directory before running docker-compose commands
cd "${DOCKERCOMPOSE_DIRPATH}"
# Get PostgreSQL's container ID
DOCKER_DATABASE_CONTAINERID="$(${DOCKERCOMPOSE_CLI_FILEPATH} ps -q "${DOCKERCOMPOSE_DATABASE_SERVICENAME}")"
# Get user, password and database name from environment variables passed to the
# container (looking for POSTGRES_USER, POSTGRES_PASSWORD and POSTGRES_DB respectively)
DB_USER="$(${DOCKER_CLI_FILEPATH} inspect --format '{{range .Config.Env}}{{println .}}{{end}}' "${DOCKER_DATABASE_CONTAINERID}" | grep '^POSTGRES_USER=' | cut -d '=' -f 2-)"
DB_PASSWORD="$(${DOCKER_CLI_FILEPATH} inspect --format '{{range .Config.Env}}{{println .}}{{end}}' "${DOCKER_DATABASE_CONTAINERID}" | grep '^POSTGRES_PASSWORD=' | cut -d '=' -f 2-)"
DB_NAME="$(${DOCKER_CLI_FILEPATH} inspect --format '{{range .Config.Env}}{{println .}}{{end}}' "${DOCKER_DATABASE_CONTAINERID}" | grep '^POSTGRES_DB=' | cut -d '=' -f 2-)"
# Create a .pgpass file locally to store the PostgreSQL credentials (which is
# then copied into the container)
LOCAL_POSTGRESQL_PGPASSFILE_FILEPATH="$(mktemp)"
echo "${DOCKERCOMPOSE_DATABASE_SERVICENAME}:${DATABASE_PORT}:${DB_NAME}:${DB_USER}:${DB_PASSWORD}" > "${LOCAL_POSTGRESQL_PGPASSFILE_FILEPATH}"
CONTAINER_POSTGRESQL_PGPASSFILE_FILEPATH="/tmp/.pgpass"
${DOCKER_CLI_FILEPATH} cp "${LOCAL_POSTGRESQL_PGPASSFILE_FILEPATH}" "${DOCKER_DATABASE_CONTAINERID}:${CONTAINER_POSTGRESQL_PGPASSFILE_FILEPATH}"
BACKUP_DATE="$(date --utc +'%F_%H-%M-%S_%Z')"
BACKUP_DESTINATION_FILEPATH="${BACKUP_DESTINATION_DIRPATH}/${DB_NAME}.${BACKUP_DATE}.sql"
# Executes `pg_dump` on database $DB_NAME as $DB_USER using the .pgpass file
# designated by "PGPASSFILE" environment variable (=)
${DOCKERCOMPOSE_CLI_FILEPATH} exec \
-T \
--env "PGPASSFILE=${CONTAINER_POSTGRESQL_PGPASSFILE_FILEPATH}" \
"${DOCKERCOMPOSE_DATABASE_SERVICENAME}" \
pg_dump \
--clean \
--create \
--username="${DB_USER}" \
"${DB_NAME}" \
> "${BACKUP_DESTINATION_FILEPATH}"
# Prints the created dump
if [ $? -eq 0 ] ; then
echo "${BACKUP_DESTINATION_FILEPATH}"
fi
# Move back to previous directory (now that all docker-compose commands were executed)
cd - > /dev/null
# Delete now-useless local .pgpass file
rm "${LOCAL_POSTGRESQL_PGPASSFILE_FILEPATH}"
|
C-Duv/sysadmin-scripts
|
backup-tools/docker/backup_dockerized_postgresql.sh
|
Shell
|
gpl-3.0
| 4,497 |
#!/bin/bash
# for development
# ruby ~/NetBeansProjects/muxie-maker/lib/main.rb "$@"
ruby /usr/share/muxie-maker/lib/main.rb "$@"
|
mad3linux/muxie-maker
|
bin/muxie-maker.sh
|
Shell
|
gpl-3.0
| 130 |
#!/usr/bin/env bash
# Run from directory ABOVE scripts
OUT_FILE="data/poker_var_stay_user.out"
IN_FILE="scripts/poker_var_stay_user.json"
SCRIPT=scripts/meansd.py
RND=$RANDOM
TMPJSONFILE="/tmp/poker_var_stay_$RND.json"
TMPCOSTFILE="/tmp/poker_var_stay_cost_$RND.out"
TMPDAYFILE="/tmp/poker_var_stay_day_$RND.out"
TMPSESSFILE="/tmp/poker_var_stay_sess_$RND.out"
TMPQOSFILE="/tmp/poker_var_stay_qos_$RND.out"
rm -f $OUT_FILE
for users in "1000" "2000" "5000" "10000"; do
sed -e "s/XXXXXX/$users/g" $IN_FILE | sed -e "s%DAYTMPFILE%$TMPDAYFILE%g" | sed -e "s%COSTTMPFILE%$TMPCOSTFILE%g" | sed -e "s%QOSTMPFILE%$TMPQOSFILE%g" | sed -e "s%SESSTMPFILE%$TMPSESSFILE%g"> $TMPJSONFILE
src/streamsim.py $TMPJSONFILE
echo -n $users "" >> $OUT_FILE
cat $TMPCOSTFILE | $SCRIPT | tr -d '\n' >> $OUT_FILE
echo -n " " >> $OUT_FILE
cat $TMPQOSFILE | $SCRIPT | tr -d '\n'>> $OUT_FILE
echo -n " " >> $OUT_FILE
cat $TMPSESSFILE | $SCRIPT | tr -d '\n'>> $OUT_FILE
echo >> $OUT_FILE
rm -f $TMPJSONFILE $TMPCOSTFILE $TMPDAYFILE $TMPQOSFILE
done
|
richardclegg/multiuservideostream
|
streamsim/scripts/poker_var_stay_user.sh
|
Shell
|
mpl-2.0
| 1,062 |
#!/usr/bin/env bash
set -e
if [ -z "$OS_PASSWORD" ] || [ -z "$OS_TENANT_ID" ]; then
if [ -s $HOME/.openrc ]; then
printf "Loading OpenStack credentials from $HOME/.openrc...\n"
source $HOME/.openrc
fi
fi
if [ -z "$OS_PASSWORD" ] || [ -z "$OS_TENANT_ID" ]; then
printf "Please load your OpenStack credentials file.\n"
exit 1
fi
SOURCE_DIRECTORY="$(git rev-parse --show-toplevel)"
CONTAINER_NAME="bnetdocs"
if [ ! -d "$SOURCE_DIRECTORY/tmp/sql-backups" ]; then
printf "No sql backups to upload.\n"
exit 0
fi
set -x
pushd "$SOURCE_DIRECTORY/tmp"
swift-3 \
--os-auth-url "$OS_AUTH_URL" \
--auth-version 3 \
--os-project-id "$OS_TENANT_ID" \
--os-username "$OS_USERNAME" \
--os-password "$OS_PASSWORD" \
upload "$CONTAINER_NAME" \
sql-backups
popd
|
BNETDocs/bnetdocs-web
|
bin/upload-backup-databases.sh
|
Shell
|
agpl-3.0
| 787 |
#!/bin/bash
# Create config file using environment variables
sed -e s,MONGODB_SERVER_URL,$MONGODB_SERVER_URL, $CATALINA_HOME/conf/tomee.xml.template >$CATALINA_HOME/conf/tomee.xml
echo ====================================================================
echo Using environment:
echo --------------------------------------------------------------------
echo MONGODB_SERVER_URL=$MONGODB_SERVER_URL
echo CATALINA_HOME=$CATALINA_HOME
echo ====================================================================
echo Using tomee.xml:
echo --------------------------------------------------------------------
cat $CATALINA_HOME/conf/tomee.xml
echo ====================================================================
# Start tomee
$CATALINA_HOME/bin/catalina.sh run
|
mminke/pv-datalogger
|
pvdatavisualizer/src/main/docker/scripts/start.sh
|
Shell
|
agpl-3.0
| 760 |
F=$1
# 64 results in distances of up to 6
# 128 results in distances of up to 4
for SIZE in 64 128 256 512 1024 1536 2048
do
echo $SIZE
convert -resize $SIZE "$F" "`basename "$F" .jpg`-resize${SIZE}.jpg"
done
|
CreativeCommons-Seneca/registry
|
searcher/benchmarks/resize.sh
|
Shell
|
agpl-3.0
| 218 |
#!/bin/sh
# Set IDEAL_VERSION to the commitish we want to check out; typically
# this is the version tag. Since this may not exist before release,
# fall back to the master branch:
VERSIONS="origin/install-script 0.15 origin/master"
PARENT_SCRIPT_URL=https://github.com/mysociety/commonlib/blob/master/bin/install-site.sh
misuse() {
echo The variable $1 was not defined, and it should be.
echo This script should not be run directly - instead, please run:
echo $PARENT_SCRIPT_URL
exit 1
}
# Strictly speaking we don't need to check all of these, but it might
# catch some errors made when changing install-site.sh
[ -z "$DIRECTORY" ] && misuse DIRECTORY
[ -z "$UNIX_USER" ] && misuse UNIX_USER
[ -z "$REPOSITORY" ] && misuse REPOSITORY
[ -z "$REPOSITORY_URL" ] && misuse REPOSITORY_URL
[ -z "$BRANCH" ] && misuse BRANCH
[ -z "$SITE" ] && misuse SITE
[ -z "$DEFAULT_SERVER" ] && misuse DEFAULT_SERVER
[ -z "$HOST" ] && misuse HOST
[ -z "$DISTRIBUTION" ] && misuse DISTRIBUTION
[ -z "$VERSIONS" ] && misuse VERSIONS
[ -z "$DEVELOPMENT_INSTALL" ] && misuse DEVELOPMENT_INSTALL
[ -z "$BIN_DIRECTORY" ] && misuse BIN_DIRECTORY
update_mysociety_apt_sources
# Debian Squeeze Fixes
if [ x"$DISTRIBUTION" = x"debian" ] && [ x"$DISTVERSION" = x"squeeze" ]
then
# Add wheezy repo to get bundler
cat > /etc/apt/sources.list.d/debian-wheezy.list <<EOF
deb http://the.earth.li/debian/ wheezy main contrib non-free
EOF
# Get bundler from wheezy repo and de-prioritise all other
# wheezy packages
cat >> /etc/apt/preferences <<EOF
Package: bundler
Pin: release n=wheezy
Pin-Priority: 990
Package: *
Pin: release n=wheezy
Pin-Priority: 50
EOF
apt-get -qq update
fi
# Ubuntu Precise Fixes
if [ x"$DISTRIBUTION" = x"ubuntu" ] && [ x"$DISTVERSION" = x"precise" ]
then
cat > /etc/apt/sources.list.d/ubuntu-trusty.list <<EOF
deb http://archive.ubuntu.com/ubuntu/ trusty universe
deb-src http://archive.ubuntu.com/ubuntu/ trusty universe
EOF
cat > /etc/apt/sources.list.d/mysociety-launchpad.list <<EOF
deb http://ppa.launchpad.net/mysociety/alaveteli/ubuntu precise main
deb-src http://ppa.launchpad.net/mysociety/alaveteli/ubuntu precise main
EOF
# Get bundler from trusty and de-prioritise all other
# trusty packages
cat >> /etc/apt/preferences <<EOF
Package: ruby-bundler
Pin: release n=trusty
Pin-Priority: 990
Package: *
Pin: release n=trusty
Pin-Priority: 50
EOF
# Get the key for the mysociety ubuntu alaveteli repo
apt-get install -y python-software-properties
add-apt-repository -y ppa:mysociety/alaveteli
apt-get -qq update
fi
apt-get -y update
if [ ! "$DEVELOPMENT_INSTALL" = true ]; then
install_nginx
add_website_to_nginx
# Check out the first available requested version:
su -l -c "cd '$REPOSITORY' && (for v in $VERSIONS; do git checkout $v && break; done)" \
"$UNIX_USER"
fi
install_postfix
# Now there's quite a bit of Postfix configuration that we need to
# make sure is present:
ensure_line_present \
"^ *alaveteli *unix *" \
"alaveteli unix - n n - 50 pipe flags=R user=$UNIX_USER argv=$REPOSITORY/script/mailin" \
/etc/postfix/master.cf 644
ensure_line_present \
"^ *transport_maps *=" \
"transport_maps = regexp:/etc/postfix/transports" \
/etc/postfix/main.cf 644
ensure_line_present \
"^ *local_recipient_maps *=" \
"local_recipient_maps = proxy:unix:passwd.byname regexp:/etc/postfix/recipients" \
/etc/postfix/main.cf 644
ensure_line_present \
"^ *mydestination *=" \
"mydestination = $HOST, $(hostname --fqdn), localhost.localdomain, localhost" \
/etc/postfix/main.cf 644
ensure_line_present \
"^ *myhostname *=" \
"myhostname = $(hostname --fqdn)" \
/etc/postfix/main.cf 644
ensure_line_present \
"^do-not-reply" \
"do-not-reply-to-this-address: :blackhole:" \
/etc/aliases 644
ensure_line_present \
"^mail" \
"mail.* -/var/log/mail/mail.log" \
/etc/rsyslog.d/50-default.conf 644
cat > /etc/postfix/transports <<EOF
/^foi.*/ alaveteli
EOF
cat > /etc/postfix/recipients <<EOF
/^foi.*/ this-is-ignored
/^postmaster@/ this-is-ignored
/^user-support@/ this-is-ignored
/^team@/ this-is-ignored
EOF
if ! egrep '^ */var/log/mail/mail.log *{' /etc/logrotate.d/rsyslog
then
cat >> /etc/logrotate.d/rsyslog <<EOF
/var/log/mail/mail.log {
rotate 30
daily
dateext
missingok
notifempty
compress
delaycompress
sharedscripts
postrotate
reload rsyslog >/dev/null 2>&1 || true
endscript
}
EOF
fi
/etc/init.d/rsyslog restart
newaliases
postmap /etc/postfix/transports
postmap /etc/postfix/recipients
postfix reload
# (end of the Postfix configuration)
install_website_packages
# Give the unix user membership of the adm group so that they can read the mail log files
usermod -a -G adm "$UNIX_USER"
# Make the PostgreSQL user a superuser to avoid the irritating error:
# PG::Error: ERROR: permission denied: "RI_ConstraintTrigger_16564" is a system trigger
# This is only needed for loading the sample data, so the superuser
# permissions are dropped below.
add_postgresql_user --superuser
# create the template_utf8 template we'll use for our databases
sudo -u postgres createdb -T template0 -E UTF-8 template_utf8
sudo -u postgres psql <<EOF
update pg_database set datistemplate=true, datallowconn=false where datname='template_utf8';
EOF
export DEVELOPMENT_INSTALL
su -l -c "$BIN_DIRECTORY/install-as-user '$UNIX_USER' '$HOST' '$DIRECTORY'" "$UNIX_USER"
# Now that the install-as-user script has loaded the sample data, we
# no longer need the PostgreSQL user to be a superuser:
echo "ALTER USER \"$UNIX_USER\" WITH NOSUPERUSER;" | su -l -c 'psql' postgres
# Set up root's crontab:
cd "$REPOSITORY"
echo -n "Creating /etc/cron.d/alaveteli... "
(su -l -c "cd '$REPOSITORY' && bundle exec rake config_files:convert_crontab DEPLOY_USER='$UNIX_USER' VHOST_DIR='$DIRECTORY' VCSPATH='$SITE' SITE='$SITE' CRONTAB=config/crontab-example" "$UNIX_USER") > /etc/cron.d/alaveteli
# There are some other parts to rewrite, so just do them with sed:
sed -r \
-e "/$SITE-purge-varnish/d" \
-e "s,^(MAILTO=).*,\1root@$HOST," \
-i /etc/cron.d/alaveteli
echo $DONE_MSG
if [ ! "$DEVELOPMENT_INSTALL" = true ]; then
echo -n "Creating /etc/init.d/$SITE... "
(su -l -c "cd '$REPOSITORY' && bundle exec rake config_files:convert_init_script DEPLOY_USER='$UNIX_USER' VHOST_DIR='$DIRECTORY' VCSPATH='$SITE' SITE='$SITE' SCRIPT_FILE=config/sysvinit-thin.example" "$UNIX_USER") > /etc/init.d/"$SITE"
chgrp "$UNIX_USER" /etc/init.d/"$SITE"
chmod 754 /etc/init.d/"$SITE"
echo $DONE_MSG
fi
echo -n "Creating /etc/init.d/$SITE-alert-tracks... "
(su -l -c "cd '$REPOSITORY' && bundle exec rake config_files:convert_init_script DEPLOY_USER='$UNIX_USER' VHOST_DIR='$DIRECTORY' SCRIPT_FILE=config/alert-tracks-debian.example" "$UNIX_USER") > /etc/init.d/"$SITE-alert-tracks"
chgrp "$UNIX_USER" /etc/init.d/"$SITE-alert-tracks"
chmod 754 /etc/init.d/"$SITE-alert-tracks"
echo $DONE_MSG
if [ $DEFAULT_SERVER = true ] && [ x != x$EC2_HOSTNAME ]
then
# If we're setting up as the default on an EC2 instance, make sure
# that the /etc/rc.local is set up to run the install script again
# to update the hostname:
overwrite_rc_local
fi
done_msg "Installation complete"; echo
|
4bic/alaveteli
|
script/site-specific-install.sh
|
Shell
|
agpl-3.0
| 7,495 |
#!/bin/sh
if [ ! -f virtualenv/bin/activate ];then
virtualenv --no-site-packages virtualenv/
fi
source virtualenv/bin/activate
pip -q install -r pip-requirements
pip -q install coverage
coverage erase
if [ ! -f spiff/local_settings.py ];then
echo "SECRET_KEY='foo'" > spiff/local_settings.py
fi
coverage run ./manage.py test $@
ret=$?
if [ $ret -eq 0 ];then
coverage report -m --include=\* --omit=\*/migrations/\*,spiff/settings.py,spiff/local_settings.py,manage.py,*/site-packages/*
fi
exit $ret
|
SYNHAK/spiff
|
run_unit_tests.sh
|
Shell
|
agpl-3.0
| 506 |
#!/bin/bash -ue
# Copyright (C) 2013 Percona Inc
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston
# MA 02110-1301 USA.
# Optional dependencies and options documented here: http://www.percona.com/doc/percona-xtradb-cluster/manual/xtrabackup_sst.html
# Make sure to read that before proceeding!
. $(dirname $0)/wsrep_sst_common
ealgo=""
ekey=""
ekeyfile=""
encrypt=0
nproc=1
ecode=0
XTRABACKUP_PID=""
SST_PORT=""
REMOTEIP=""
tcert=""
tpem=""
sockopt=""
progress=""
ttime=0
totime=0
lsn=""
incremental=0
ecmd=""
rlimit=""
sfmt="tar"
strmcmd=""
tfmt=""
tcmd=""
rebuild=0
rebuildcmd=""
payload=0
pvformat="-F '%N => Rate:%r Avg:%a Elapsed:%t %e Bytes: %b %p' "
pvopts="-f -i 10 -N $WSREP_SST_OPT_ROLE "
uextra=0
if which pv &>/dev/null && pv --help | grep -q FORMAT;then
pvopts+=$pvformat
fi
pcmd="pv $pvopts"
declare -a RC
INNOBACKUPEX_BIN=innobackupex
readonly AUTH=(${WSREP_SST_OPT_AUTH//:/ })
DATA="${WSREP_SST_OPT_DATA}"
INFO_FILE="xtrabackup_galera_info"
IST_FILE="xtrabackup_ist"
MAGIC_FILE="${DATA}/${INFO_FILE}"
# Setting the path for ss and ip
export PATH="/usr/sbin:/sbin:$PATH"
timeit(){
local stage=$1
shift
local cmd="$@"
local x1 x2 took extcode
if [[ $ttime -eq 1 ]];then
x1=$(date +%s)
wsrep_log_info "Evaluating $cmd"
eval "$cmd"
extcode=$?
x2=$(date +%s)
took=$(( x2-x1 ))
wsrep_log_info "NOTE: $stage took $took seconds"
totime=$(( totime+took ))
else
wsrep_log_info "Evaluating $cmd"
eval "$cmd"
extcode=$?
fi
return $extcode
}
get_keys()
{
if [[ $encrypt -eq 2 ]];then
return
fi
if [[ $encrypt -eq 0 ]];then
if my_print_defaults -c $WSREP_SST_OPT_CONF xtrabackup | grep -q encrypt;then
wsrep_log_error "Unexpected option combination. SST may fail. Refer to http://www.percona.com/doc/percona-xtradb-cluster/manual/xtrabackup_sst.html "
fi
return
fi
if [[ $sfmt == 'tar' ]];then
wsrep_log_info "NOTE: Xtrabackup-based encryption - encrypt=1 - cannot be enabled with tar format"
encrypt=0
return
fi
wsrep_log_info "Xtrabackup based encryption enabled in my.cnf - Supported only from Xtrabackup 2.1.4"
if [[ -z $ealgo ]];then
wsrep_log_error "FATAL: Encryption algorithm empty from my.cnf, bailing out"
exit 3
fi
if [[ -z $ekey && ! -r $ekeyfile ]];then
wsrep_log_error "FATAL: Either key or keyfile must be readable"
exit 3
fi
if [[ -z $ekey ]];then
ecmd="xbcrypt --encrypt-algo=$ealgo --encrypt-key-file=$ekeyfile"
else
ecmd="xbcrypt --encrypt-algo=$ealgo --encrypt-key=$ekey"
fi
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then
ecmd+=" -d"
fi
}
get_transfer()
{
if [[ -z $SST_PORT ]];then
TSST_PORT=4444
else
TSST_PORT=$SST_PORT
fi
if [[ $tfmt == 'nc' ]];then
if [[ ! -x `which nc` ]];then
wsrep_log_error "nc(netcat) not found in path: $PATH"
exit 2
fi
wsrep_log_info "Using netcat as streamer"
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then
tcmd="nc -dl ${TSST_PORT}"
else
tcmd="nc ${REMOTEIP} ${TSST_PORT}"
fi
else
tfmt='socat'
wsrep_log_info "Using socat as streamer"
if [[ ! -x `which socat` ]];then
wsrep_log_error "socat not found in path: $PATH"
exit 2
fi
if [[ $encrypt -eq 2 ]] && ! socat -V | grep -q OPENSSL;then
wsrep_log_info "NOTE: socat is not openssl enabled, falling back to plain transfer"
encrypt=0
fi
if [[ $encrypt -eq 2 ]];then
wsrep_log_info "Using openssl based encryption with socat"
if [[ -z $tpem || -z $tcert ]];then
wsrep_log_error "Both PEM and CRT files required"
exit 22
fi
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then
wsrep_log_info "Decrypting with PEM $tpem, CA: $tcert"
tcmd="socat -u openssl-listen:${TSST_PORT},reuseaddr,cert=$tpem,cafile=${tcert}${sockopt} stdio"
else
wsrep_log_info "Encrypting with PEM $tpem, CA: $tcert"
tcmd="socat -u stdio openssl-connect:${REMOTEIP}:${TSST_PORT},cert=$tpem,cafile=${tcert}${sockopt}"
fi
else
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then
tcmd="socat -u TCP-LISTEN:${TSST_PORT},reuseaddr${sockopt} stdio"
else
tcmd="socat -u stdio TCP:${REMOTEIP}:${TSST_PORT}${sockopt}"
fi
fi
fi
}
parse_cnf()
{
local group=$1
local var=$2
reval=$(my_print_defaults -c $WSREP_SST_OPT_CONF $group | awk -F= '{if ($1 ~ /_/) { gsub(/_/,"-",$1); print $1"="$2 } else { print $0 }}' | grep -- "--$var=" | cut -d= -f2-)
if [[ -z $reval ]];then
[[ -n $3 ]] && reval=$3
fi
echo $reval
}
get_footprint()
{
pushd $WSREP_SST_OPT_DATA 1>/dev/null
payload=$(du --block-size=1 -c **/*.ibd **/*.MYI **/*.MYI ibdata1 | awk 'END { print $1 }')
if my_print_defaults -c $WSREP_SST_OPT_CONF xtrabackup | grep -q -- "--compress";then
# QuickLZ has around 50% compression ratio
# When compression/compaction used, the progress is only an approximate.
payload=$(( payload*1/2 ))
fi
popd 1>/dev/null
pcmd+=" -s $payload"
adjust_progress
}
adjust_progress()
{
if [[ -n $progress && $progress != '1' ]];then
if [[ -e $progress ]];then
pcmd+=" 2>>$progress"
else
pcmd+=" 2>$progress"
fi
elif [[ -z $progress && -n $rlimit ]];then
# When rlimit is non-zero
pcmd="pv -q"
fi
if [[ -n $rlimit && "$WSREP_SST_OPT_ROLE" == "donor" ]];then
wsrep_log_info "Rate-limiting SST to $rlimit"
pcmd+=" -L \$rlimit"
fi
}
read_cnf()
{
sfmt=$(parse_cnf sst streamfmt "tar")
tfmt=$(parse_cnf sst transferfmt "socat")
tcert=$(parse_cnf sst tca "")
tpem=$(parse_cnf sst tcert "")
encrypt=$(parse_cnf sst encrypt 0)
sockopt=$(parse_cnf sst sockopt "")
progress=$(parse_cnf sst progress "")
rebuild=$(parse_cnf sst rebuild 0)
ttime=$(parse_cnf sst time 0)
incremental=$(parse_cnf sst incremental 0)
ealgo=$(parse_cnf xtrabackup encrypt "")
ekey=$(parse_cnf xtrabackup encrypt-key "")
ekeyfile=$(parse_cnf xtrabackup encrypt-key-file "")
# Refer to http://www.percona.com/doc/percona-xtradb-cluster/manual/xtrabackup_sst.html
if [[ -z $ealgo ]];then
ealgo=$(parse_cnf sst encrypt-algo "")
ekey=$(parse_cnf sst encrypt-key "")
ekeyfile=$(parse_cnf sst encrypt-key-file "")
fi
rlimit=$(parse_cnf sst rlimit "")
uextra=$(parse_cnf sst use_extra 0)
}
get_stream()
{
if [[ $sfmt == 'xbstream' ]];then
wsrep_log_info "Streaming with xbstream"
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then
strmcmd="xbstream -x"
else
strmcmd="xbstream -c \${INFO_FILE} \${IST_FILE}"
fi
else
sfmt="tar"
wsrep_log_info "Streaming with tar"
if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then
strmcmd="tar xfi - --recursive-unlink -h"
else
strmcmd="tar cf - \${INFO_FILE} \${IST_FILE}"
fi
fi
}
get_proc()
{
set +e
nproc=$(grep -c processor /proc/cpuinfo)
[[ -z $nproc || $nproc -eq 0 ]] && nproc=1
set -e
}
sig_joiner_cleanup()
{
wsrep_log_error "Removing $MAGIC_FILE file due to signal"
rm -f "$MAGIC_FILE"
}
cleanup_joiner()
{
# Since this is invoked just after exit NNN
local estatus=$?
if [[ $estatus -ne 0 ]];then
wsrep_log_error "Cleanup after exit with status:$estatus"
fi
if [ "${WSREP_SST_OPT_ROLE}" = "joiner" ];then
wsrep_log_info "Removing the sst_in_progress file"
wsrep_cleanup_progress_file
fi
if [[ -n $progress && -p $progress ]];then
wsrep_log_info "Cleaning up fifo file $progress"
rm $progress
fi
}
check_pid()
{
local pid_file="$1"
[ -r "$pid_file" ] && ps -p $(cat "$pid_file") >/dev/null 2>&1
}
cleanup_donor()
{
# Since this is invoked just after exit NNN
local estatus=$?
if [[ $estatus -ne 0 ]];then
wsrep_log_error "Cleanup after exit with status:$estatus"
fi
if [[ -n $XTRABACKUP_PID ]];then
if check_pid $XTRABACKUP_PID
then
wsrep_log_error "xtrabackup process is still running. Killing... "
kill_xtrabackup
fi
rm -f $XTRABACKUP_PID
fi
rm -f ${DATA}/${IST_FILE}
if [[ -n $progress && -p $progress ]];then
wsrep_log_info "Cleaning up fifo file $progress"
rm $progress
fi
}
kill_xtrabackup()
{
local PID=$(cat $XTRABACKUP_PID)
[ -n "$PID" -a "0" != "$PID" ] && kill $PID && (kill $PID && kill -9 $PID) || :
rm -f "$XTRABACKUP_PID"
}
setup_ports()
{
if [[ "$WSREP_SST_OPT_ROLE" == "donor" ]];then
SST_PORT=$(echo $WSREP_SST_OPT_ADDR | awk -F '[:/]' '{ print $2 }')
REMOTEIP=$(echo $WSREP_SST_OPT_ADDR | awk -F ':' '{ print $1 }')
lsn=$(echo $WSREP_SST_OPT_ADDR | awk -F '[:/]' '{ print $4 }')
else
SST_PORT=$(echo ${WSREP_SST_OPT_ADDR} | awk -F ':' '{ print $2 }')
fi
}
# waits ~10 seconds for nc to open the port and then reports ready
# (regardless of timeout)
wait_for_listen()
{
local PORT=$1
local ADDR=$2
local MODULE=$3
for i in {1..50}
do
ss -p state listening "( sport = :$PORT )" | grep -qE 'socat|nc' && break
sleep 0.2
done
if [[ $incremental -eq 1 ]];then
echo "ready ${ADDR}/${MODULE}/$lsn"
else
echo "ready ${ADDR}/${MODULE}"
fi
}
check_extra()
{
local use_socket=1
if [[ $uextra -eq 1 ]];then
if my_print_defaults -c $WSREP_SST_OPT_CONF mysqld | tr '_' '-' | grep -- "--thread-handling=" | grep -q 'pool-of-threads';then
local eport=$(my_print_defaults -c $WSREP_SST_OPT_CONF mysqld | tr '_' '-' | grep -- "--extra-port=" | cut -d= -f2)
if [[ -n $eport ]];then
# Xtrabackup works only locally.
# Hence, setting host to 127.0.0.1 unconditionally.
wsrep_log_info "SST through extra_port $eport"
INNOEXTRA+=" --host=127.0.0.1 --port=$eport "
use_socket=0
else
wsrep_log_error "Extra port $eport null, failing"
exit 1
fi
else
wsrep_log_info "Thread pool not set, ignore the option use_extra"
fi
fi
if [[ $use_socket -eq 1 ]] && [[ -n "${WSREP_SST_OPT_SOCKET}" ]];then
INNOEXTRA+=" --socket=${WSREP_SST_OPT_SOCKET}"
fi
}
if [[ ! -x `which innobackupex` ]];then
wsrep_log_error "innobackupex not in path: $PATH"
exit 2
fi
rm -f "${MAGIC_FILE}"
if [[ ! ${WSREP_SST_OPT_ROLE} == 'joiner' && ! ${WSREP_SST_OPT_ROLE} == 'donor' ]];then
wsrep_log_error "Invalid role ${WSREP_SST_OPT_ROLE}"
exit 22
fi
read_cnf
setup_ports
get_stream
get_transfer
INNOEXTRA=""
INNOAPPLY="${INNOBACKUPEX_BIN} --defaults-file=${WSREP_SST_OPT_CONF} --apply-log \$rebuildcmd \${DATA} &>\${DATA}/innobackup.prepare.log"
INNOBACKUP="${INNOBACKUPEX_BIN} --defaults-file=${WSREP_SST_OPT_CONF} \$INNOEXTRA --galera-info --stream=\$sfmt \${TMPDIR} 2>\${DATA}/innobackup.backup.log"
if [ "$WSREP_SST_OPT_ROLE" = "donor" ]
then
trap cleanup_donor EXIT
if [ $WSREP_SST_OPT_BYPASS -eq 0 ]
then
TMPDIR="${TMPDIR:-/tmp}"
if [ "${AUTH[0]}" != "(null)" ]; then
INNOEXTRA+=" --user=${AUTH[0]}"
fi
if [ ${#AUTH[*]} -eq 2 ]; then
INNOEXTRA+=" --password=${AUTH[1]}"
elif [ "${AUTH[0]}" != "(null)" ]; then
# Empty password, used for testing, debugging etc.
INNOEXTRA+=" --password="
fi
get_keys
if [[ $encrypt -eq 1 ]];then
if [[ -n $ekey ]];then
INNOEXTRA+=" --encrypt=$ealgo --encrypt-key=$ekey "
else
INNOEXTRA+=" --encrypt=$ealgo --encrypt-key-file=$ekeyfile "
fi
fi
if [[ -n $lsn ]];then
INNOEXTRA+=" --incremental --incremental-lsn=$lsn "
fi
check_extra
wsrep_log_info "Streaming the backup to joiner at ${REMOTEIP} ${SST_PORT}"
if [[ -n $progress ]];then
get_footprint
tcmd="$pcmd | $tcmd"
elif [[ -n $rlimit ]];then
adjust_progress
tcmd="$pcmd | $tcmd"
fi
set +e
timeit "Donor-Transfer" "$INNOBACKUP | $tcmd; RC=( "\${PIPESTATUS[@]}" )"
set -e
if [ ${RC[0]} -ne 0 ]; then
wsrep_log_error "${INNOBACKUPEX_BIN} finished with error: ${RC[0]}. " \
"Check ${DATA}/innobackup.backup.log"
exit 22
elif [[ ${RC[$(( ${#RC[@]}-1 ))]} -eq 1 ]];then
wsrep_log_error "$tcmd finished with error: ${RC[1]}"
exit 22
fi
# innobackupex implicitly writes PID to fixed location in ${TMPDIR}
XTRABACKUP_PID="${TMPDIR}/xtrabackup_pid"
else # BYPASS FOR IST
wsrep_log_info "Bypassing the SST for IST"
STATE="${WSREP_SST_OPT_GTID}"
echo "continue" # now server can resume updating data
echo "${STATE}" > "${MAGIC_FILE}"
echo "1" > "${DATA}/${IST_FILE}"
get_keys
pushd ${DATA} 1>/dev/null
set +e
if [[ $encrypt -eq 1 ]];then
tcmd=" $ecmd | $tcmd"
fi
timeit "Donor-IST-Unencrypted-transfer" "$strmcmd | $tcmd; RC=( "\${PIPESTATUS[@]}" )"
set -e
popd 1>/dev/null
for ecode in "${RC[@]}";do
if [[ $ecode -ne 0 ]];then
wsrep_log_error "Error while streaming data to joiner node: " \
"exit codes: ${RC[@]}"
exit 1
fi
done
fi
echo "done ${WSREP_SST_OPT_GTID}"
wsrep_log_info "Total time on donor: $totime seconds"
elif [ "${WSREP_SST_OPT_ROLE}" = "joiner" ]
then
[[ -e $SST_PROGRESS_FILE ]] && wsrep_log_info "Stale sst_in_progress file: $SST_PROGRESS_FILE"
touch $SST_PROGRESS_FILE
if [[ ! -e ${DATA}/ibdata1 ]];then
incremental=0
fi
if [[ $incremental -eq 1 ]];then
wsrep_log_info "Incremental SST enabled"
#lsn=$(/pxc/bin/mysqld --defaults-file=$WSREP_SST_OPT_CONF --basedir=/pxc --wsrep-recover 2>&1 | grep -o 'log sequence number .*' | cut -d " " -f 4 | head -1)
lsn=$(grep to_lsn xtrabackup_checkpoints | cut -d= -f2 | tr -d ' ')
wsrep_log_info "Recovered LSN: $lsn"
fi
sencrypted=1
nthreads=1
MODULE="xtrabackup_sst"
# May need xtrabackup_checkpoints later on
rm -f ${DATA}/xtrabackup_binary ${DATA}/xtrabackup_galera_info ${DATA}/xtrabackup_logfile
ADDR=${WSREP_SST_OPT_ADDR}
if [ -z "${SST_PORT}" ]
then
SST_PORT=4444
ADDR="$(echo ${WSREP_SST_OPT_ADDR} | awk -F ':' '{ print $1 }'):${SST_PORT}"
fi
wait_for_listen ${SST_PORT} ${ADDR} ${MODULE} &
trap sig_joiner_cleanup HUP PIPE INT TERM
trap cleanup_joiner EXIT
if [[ -n $progress ]];then
adjust_progress
tcmd+=" | $pcmd"
fi
if [[ $incremental -eq 1 ]];then
BDATA=$DATA
DATA=$(mktemp -d)
MAGIC_FILE="${DATA}/${INFO_FILE}"
fi
get_keys
set +e
if [[ $encrypt -eq 1 && $sencrypted -eq 1 ]];then
strmcmd=" $ecmd | $strmcmd"
fi
pushd ${DATA} 1>/dev/null
timeit "Joiner-Recv-Unencrypted" "$tcmd | $strmcmd; RC=( "\${PIPESTATUS[@]}" )"
popd 1>/dev/null
set -e
if [[ $sfmt == 'xbstream' ]];then
# Special handling till lp:1193240 is fixed"
if [[ ${RC[$(( ${#RC[@]}-1 ))]} -eq 1 ]];then
wsrep_log_error "Xbstream failed"
wsrep_log_error "Data directory ${DATA} may not be empty: lp:1193240" \
"Manual intervention required in that case"
exit 32
fi
fi
wait %% # join for wait_for_listen thread
for ecode in "${RC[@]}";do
if [[ $ecode -ne 0 ]];then
wsrep_log_error "Error while getting data from donor node: " \
"exit codes: ${RC[@]}"
exit 32
fi
done
if [ ! -r "${MAGIC_FILE}" ]
then
# this message should cause joiner to abort
wsrep_log_error "xtrabackup process ended without creating '${MAGIC_FILE}'"
wsrep_log_info "Contents of datadir"
wsrep_log_info "$(ls -l ${DATA}/**/*)"
exit 32
fi
if ! ps -p ${WSREP_SST_OPT_PARENT} &>/dev/null
then
wsrep_log_error "Parent mysqld process (PID:${WSREP_SST_OPT_PARENT}) terminated unexpectedly."
exit 32
fi
if [ ! -r "${DATA}/${IST_FILE}" ]
then
wsrep_log_info "Proceeding with SST"
wsrep_log_info "Removing existing ib_logfile files"
if [[ $incremental -ne 1 ]];then
rm -f ${DATA}/ib_logfile*
else
rm -f ${BDATA}/ib_logfile*
fi
get_proc
# Rebuild indexes for compact backups
if grep -q 'compact = 1' ${DATA}/xtrabackup_checkpoints;then
wsrep_log_info "Index compaction detected"
rebuild=1
fi
if [[ $rebuild -eq 1 ]];then
nthreads=$(parse_cnf xtrabackup rebuild-threads $nproc)
wsrep_log_info "Rebuilding during prepare with $nthreads threads"
rebuildcmd="--rebuild-indexes --rebuild-threads=$nthreads"
fi
if test -n "$(find ${DATA} -maxdepth 1 -type f -name '*.qp' -print -quit)";then
wsrep_log_info "Compressed qpress files found"
if [[ ! -x `which qpress` ]];then
wsrep_log_error "qpress not found in path: $PATH"
exit 22
fi
if [[ -n $progress ]] && pv --help | grep -q 'line-mode';then
count=$(find ${DATA} -type f -name '*.qp' | wc -l)
count=$(( count*2 ))
if pv --help | grep -q FORMAT;then
pvopts="-f -s $count -l -N Decompression -F '%N => Rate:%r Elapsed:%t %e Progress: [%b/$count]'"
else
pvopts="-f -s $count -l -N Decompression"
fi
pcmd="pv $pvopts"
adjust_progress
dcmd="$pcmd | xargs -n 2 qpress -T${nproc}d"
else
dcmd="xargs -n 2 qpress -T${nproc}d"
fi
wsrep_log_info "Removing existing ibdata1 file"
rm -f ${DATA}/ibdata1
# Decompress the qpress files
wsrep_log_info "Decompression with $nproc threads"
timeit "Decompression" "find ${DATA} -type f -name '*.qp' -printf '%p\n%h\n' | $dcmd"
extcode=$?
if [[ $extcode -eq 0 ]];then
wsrep_log_info "Removing qpress files after decompression"
find ${DATA} -type f -name '*.qp' -delete
if [[ $? -ne 0 ]];then
wsrep_log_error "Something went wrong with deletion of qpress files. Investigate"
fi
else
wsrep_log_error "Decompression failed. Exit code: $extcode"
exit 22
fi
fi
if [[ $incremental -eq 1 ]];then
# Added --ibbackup=xtrabackup_55 because it fails otherwise citing connection issues.
INNOAPPLY="${INNOBACKUPEX_BIN} --defaults-file=${WSREP_SST_OPT_CONF} \
--ibbackup=xtrabackup_55 --apply-log $rebuildcmd --redo-only $BDATA --incremental-dir=${DATA} &>>${BDATA}/innobackup.prepare.log"
fi
wsrep_log_info "Preparing the backup at ${DATA}"
timeit "Xtrabackup prepare stage" "$INNOAPPLY"
if [[ $incremental -eq 1 ]];then
wsrep_log_info "Cleaning up ${DATA} after incremental SST"
[[ -d ${DATA} ]] && rm -rf ${DATA}
DATA=$BDATA
fi
if [ $? -ne 0 ];
then
wsrep_log_error "${INNOBACKUPEX_BIN} finished with errors. Check ${DATA}/innobackup.prepare.log"
exit 22
fi
else
wsrep_log_info "${IST_FILE} received from donor: Running IST"
fi
if [[ ! -r ${MAGIC_FILE} ]];then
wsrep_log_error "SST magic file ${MAGIC_FILE} not found/readable"
exit 2
fi
cat "${MAGIC_FILE}" # output UUID:seqno
wsrep_log_info "Total time on joiner: $totime seconds"
fi
exit 0
|
allfs/mariadb
|
scripts/wsrep_sst_xtrabackup.sh
|
Shell
|
lgpl-2.1
| 21,464 |
#!/bin/bash
# set PYTHONPATH to OpenTURNS if needed
PYOT=$(echo $HOME/ot/trunk/build/install/lib/python2.7/site-packages)
PYOT=$(echo $PWD/../../../build/install/lib/python2.7/site-packages):$PYOT
export PYTHONPATH=$PYOT:$PYTHONPATH
# start the script
python ot_script.py
|
openturns/otdistfunc
|
python/test/wrapper_python_distributed/start_it.sh
|
Shell
|
lgpl-3.0
| 274 |
#/bin/sh
# @(#) $Id: get_disk_data.sh,v 6.10.1.1 2013-09-12 16:13:15 ralph Exp $
# -------------------------------------------------------------------------
TMP_PREFIX=/var/tmp/get_disk_data-$$-
TMP_DEV_LUN=${TMP_PREFIX}DEV_LUN
TMP_DEV_VG=${TMP_PREFIX}DEV_VG
function exit_message
{
rm -f ${TMP_PREFIX}*
echo $*
exit
}
function ioscan_1131 {
/usr/sbin/ioscan -m lun|awk '/^disk/ {print "\n"} {printf "%s ", $0} END {print "\n"}'|while read f1 f2 lun rest
do
for dev in $rest
do
[[ $dev = @(/dev/[rcd]*isk/disk[0-9]*) ]] && echo $dev $lun
done
done | sort > $TMP_DEV_LUN
[[ $? != 0 ]] && exit_message "$0: error in ioscan"
/usr/sbin/vgdisplay -v 2>/dev/null | awk '/^VG Name/ {vg=$3} /PV Name/ {print $3, vg}'|sort -u > $TMP_DEV_VG
[[ $? != 0 ]] && exit_message error in vgdisplay
join -a 1 -o 2.2,1.1,1.2 $TMP_DEV_VG $TMP_DEV_LUN | sort
}
function ioscan_11i {
/usr/sbin/ioscan -kfnC disk|awk '/^disk/ {print "\n"} {printf "%s ", $0} END {print "\n"}'|while read f1 f2 lun rest
do
for dev in $rest
do
[[ $dev = @(/dev/[rd]*isk/disk[0-9]*) ]] && echo $dev $lun
done
done | sort > $TMP_DEV_LUN
[[ $? != 0 ]] && exit_message "$0: error in ioscan"
/usr/sbin/vgdisplay -v 2>/dev/null | awk '/^VG Name/ {vg=$3} /PV Name/ {print $3, vg}'|sort -u > $TMP_DEV_VG
[[ $? != 0 ]] && exit_message error in vgdisplay
join -a 1 -o 2.2,1.1,1.2 $TMP_DEV_VG $TMP_DEV_LUN | sort
}
# MAIN
os_rel=$(uname -r)
case $os_rel in
B.11.11|B.11.23) ioscan_11i ;;
B.11.31) ioscan_1131 ;;
esac
# cleanup
rm -f ${TMP_PREFIX}*
|
rossonet/templateAr4k
|
cfg2html-master/hpux/plugins/get_disk_data.sh
|
Shell
|
lgpl-3.0
| 1,566 |
#!/bin/bash
find ../ -name 'mysqli.php' -exec sh -c './checkPostgreSQLOfMySQLi.sh $0' {} \;
|
Devoter/rtQuery
|
translator/createPostgreSQLClassesFromMySQLi.sh
|
Shell
|
lgpl-3.0
| 92 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.