code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#! /bin/bash
SCRIPT_FILE=$(which $0)
SCRIPT_ROOT=$(dirname $(realpath "$SCRIPT_FILE"))
PROJECT_ROOT=$(dirname "${SCRIPT_ROOT}")
HOST_ARCH=$(uname -m)
TARGET_ARCH=mips
BUILD_ROOT=build/openwrt-$TARGET_ARCH-release
OBJECT_ROOT=$BUILD_ROOT/object
OUTPUT_ROOT=$BUILD_ROOT/output
CROSS_FLAGS="--build=${HOST_ARCH}-linux --host=mips-openwrt-linux"
export CC=mips-openwrt-linux-gcc
export CFLAGS="-I${PROJECT_ROOT}/${OUTPUT_ROOT}/include"
export CPPFLAGS="-I${PROJECT_ROOT}/${OUTPUT_ROOT}/include"
export LDFLAGS="-L${PROJECT_ROOT}/${OUTPUT_ROOT}/lib"
pushd "$PROJECT_ROOT"
mkdir -p ${OBJECT_ROOT}
# build curl
if [ -e "${OUTPUT_ROOT}/lib/libcurl.a" ]; then
echo "libcurl already built, skip..."
else
mkdir -p ${OBJECT_ROOT}/curl
pushd ${OBJECT_ROOT}/curl
${PROJECT_ROOT}/3rdparty/curl-7.41.0/configure --prefix=$PROJECT_ROOT/$OUTPUT_ROOT $CROSS_FLAGS || exit 1
make || exit 1
make install || exit 1
popd
fi
# build zlib
if [ -e "${OUTPUT_ROOT}/lib/libz.a" ]; then
echo "libz already built, skip..."
else
mkdir -p ${OBJECT_ROOT}/zlib
pushd ${PROJECT_ROOT}/3rdparty/zlib-1.2.8
./configure --prefix=$PROJECT_ROOT/$OUTPUT_ROOT || exit 1
make || exit 1
make install || exit 1
popd
fi
# build libpng
if [ -e "${OUTPUT_ROOT}/lib/libpng16.a" ]; then
echo "libpng16 already built, skip..."
else
mkdir -p ${OBJECT_ROOT}/libpng16
pushd ${OBJECT_ROOT}/libpng16
echo "${CFLAGS}"
${PROJECT_ROOT}/3rdparty/libpng-1.6.17/configure --prefix=$PROJECT_ROOT/$OUTPUT_ROOT $CROSS_FLAGS || exit 1
make CFLAGS="$CFLAGS" LDFLAGS="$LDFLAGS" || exit 1
make install CFLAGS="$CFLAGS" LDFLAGS="$LDFLAGS" || exit 1
popd
fi
# build freetype2
if [ -e "${OUTPUT_ROOT}/lib/libfreetype.a" ]; then
echo "libfreetype2 already built, skip.."
else
mkdir -p ${OBJECT_ROOT}/freetype2
pushd ${OBJECT_ROOT}/freetype2
${PROJECT_ROOT}/3rdparty/freetype-2.5.5/configure --prefix=$PROJECT_ROOT/$OUTPUT_ROOT --with-png=no --with-harfbuzz=no $CROSS_FLAGS || exit 1
make || exit 1
make install || exit 1
popd
fi
# build libusb
if [ -e "${OUTPUT_ROOT}/lib/libusb-1.0.a" ]; then
echo "libusb-1.0 already built, skip..."
else
mkdir -p ${OBJECT_ROOT}/libusb
pushd ${OBJECT_ROOT}/libusb
${PROJECT_ROOT}/3rdparty/libusb-1.0.19/configure --prefix=$PROJECT_ROOT/$OUTPUT_ROOT --disable-udev $CROSS_FLAGS || exit 1
make || exit 1
make install || exit 1
popd
fi
# build rpairmon
./onemake/onemake.py concurrent=8 scheme=release target_arch=mips target_platform=openwrt
# all done
popd
|
cnwzhjs/rpairmon
|
scripts/build_mips_binary.sh
|
Shell
|
gpl-2.0
| 2,585 |
#!/bin/bash
# Copyright (c) 2015 Igor Pecovnik, igor.pecovnik@gma**.com
#
# This file is licensed under the terms of the GNU General Public
# License version 2. This program is licensed "as is" without any
# warranty of any kind, whether express or implied.
# This file is a part of the Armbian build script
# https://github.com/armbian/build/
# Create board support packages
#
# Functions:
# create_board_package
create_board_package()
{
display_alert "Creating board support package" "$BOARD $BRANCH" "info"
local destination=$SRC/.tmp/${RELEASE}/${CHOSEN_ROOTFS}_${REVISION}_${ARCH}
rm -rf "${destination}"
mkdir -p "${destination}"/DEBIAN
# install copy of boot script & environment file
local bootscript_src=${BOOTSCRIPT%%:*}
local bootscript_dst=${BOOTSCRIPT##*:}
mkdir -p "${destination}"/usr/share/armbian/
cp "${SRC}/config/bootscripts/${bootscript_src}" "${destination}/usr/share/armbian/${bootscript_dst}"
[[ -n $BOOTENV_FILE && -f $SRC/config/bootenv/$BOOTENV_FILE ]] && \
cp "${SRC}/config/bootenv/${BOOTENV_FILE}" "${destination}"/usr/share/armbian/armbianEnv.txt
# add configuration for setting uboot environment from userspace with: fw_setenv fw_printenv
if [[ -n $UBOOT_FW_ENV ]]; then
UBOOT_FW_ENV=($(tr ',' ' ' <<< "$UBOOT_FW_ENV"))
mkdir -p "${destination}"/etc
echo "# Device to access offset env size" > "${destination}"/etc/fw_env.config
echo "/dev/mmcblk0 ${UBOOT_FW_ENV[0]} ${UBOOT_FW_ENV[1]}" >> "${destination}"/etc/fw_env.config
fi
# Replaces: base-files is needed to replace /etc/update-motd.d/ files on Xenial
# Replaces: unattended-upgrades may be needed to replace /etc/apt/apt.conf.d/50unattended-upgrades
# (distributions provide good defaults, so this is not needed currently)
# Depends: linux-base is needed for "linux-version" command in initrd cleanup script
cat <<-EOF > "${destination}"/DEBIAN/control
Package: linux-${RELEASE}-root-${DEB_BRANCH}${BOARD}
Version: $REVISION
Architecture: $ARCH
Maintainer: $MAINTAINER <$MAINTAINERMAIL>
Installed-Size: 1
Section: kernel
Priority: optional
Depends: bash, linux-base, u-boot-tools, initramfs-tools, python-apt, lsb-release
Provides: armbian-bsp
Conflicts: armbian-bsp
Suggests: armbian-config
Replaces: zram-config, base-files, armbian-tools-$RELEASE
Recommends: bsdutils, parted, util-linux, toilet
Description: Armbian tweaks for $RELEASE on $BOARD ($BRANCH branch)
EOF
# set up pre install script
cat <<-EOF > "${destination}"/DEBIAN/preinst
#!/bin/sh
# tell people to reboot at next login
[ "\$1" = "upgrade" ] && touch /var/run/.reboot_required
# convert link to file
if [ -L "/etc/network/interfaces" ]; then
cp /etc/network/interfaces /etc/network/interfaces.tmp
rm /etc/network/interfaces
mv /etc/network/interfaces.tmp /etc/network/interfaces
fi
# swap
grep -q vm.swappiness /etc/sysctl.conf
case \$? in
0)
sed -i 's/vm\.swappiness.*/vm.swappiness=100/' /etc/sysctl.conf
;;
*)
echo vm.swappiness=100 >>/etc/sysctl.conf
;;
esac
sysctl -p >/dev/null 2>&1
# disable deprecated services
[ -f "/etc/profile.d/activate_psd_user.sh" ] && rm /etc/profile.d/activate_psd_user.sh
[ -f "/etc/profile.d/check_first_login.sh" ] && rm /etc/profile.d/check_first_login.sh
[ -f "/etc/profile.d/check_first_login_reboot.sh" ] && rm /etc/profile.d/check_first_login_reboot.sh
[ -f "/etc/profile.d/ssh-title.sh" ] && rm /etc/profile.d/ssh-title.sh
[ -f "/etc/update-motd.d/10-header" ] && rm /etc/update-motd.d/10-header
[ -f "/etc/update-motd.d/30-sysinfo" ] && rm /etc/update-motd.d/30-sysinfo
[ -f "/etc/update-motd.d/35-tips" ] && rm /etc/update-motd.d/35-tips
[ -f "/etc/update-motd.d/40-updates" ] && rm /etc/update-motd.d/40-updates
[ -f "/etc/update-motd.d/98-autoreboot-warn" ] && rm /etc/update-motd.d/98-autoreboot-warn
[ -f "/etc/update-motd.d/99-point-to-faq" ] && rm /etc/update-motd.d/99-point-to-faq
[ -f "/etc/update-motd.d/80-esm" ] && rm /etc/update-motd.d/80-esm
[ -f "/etc/update-motd.d/80-livepatch" ] && rm /etc/update-motd.d/80-livepatch
[ -f "/etc/apt/apt.conf.d/50unattended-upgrades" ] && rm /etc/apt/apt.conf.d/50unattended-upgrades
[ -f "/etc/apt/apt.conf.d/02compress-indexes" ] && rm /etc/apt/apt.conf.d/02compress-indexes
[ -f "/etc/apt/apt.conf.d/02periodic" ] && rm /etc/apt/apt.conf.d/02periodic
[ -f "/etc/apt/apt.conf.d/no-languages" ] && rm /etc/apt/apt.conf.d/no-languages
[ -f "/etc/init.d/armhwinfo" ] && rm /etc/init.d/armhwinfo
[ -f "/etc/logrotate.d/armhwinfo" ] && rm /etc/logrotate.d/armhwinfo
[ -f "/etc/init.d/firstrun" ] && rm /etc/init.d/firstrun
[ -f "/etc/init.d/resize2fs" ] && rm /etc/init.d/resize2fs
[ -f "/lib/systemd/system/firstrun-config.service" ] && rm /lib/systemd/system/firstrun-config.service
[ -f "/lib/systemd/system/firstrun.service" ] && rm /lib/systemd/system/firstrun.service
[ -f "/lib/systemd/system/resize2fs.service" ] && rm /lib/systemd/system/resize2fs.service
[ -f "/usr/lib/armbian/apt-updates" ] && rm /usr/lib/armbian/apt-updates
[ -f "/usr/lib/armbian/firstrun-config.sh" ] && rm /usr/lib/armbian/firstrun-config.sh
dpkg-divert --quiet --package linux-${RELEASE}-root-${DEB_BRANCH}${BOARD} --add --rename --divert /etc/mpv/mpv-dist.conf /etc/mpv/mpv.conf
exit 0
EOF
chmod 755 "${destination}"/DEBIAN/preinst
# postrm script
cat <<-EOF > "${destination}"/DEBIAN/postrm
#!/bin/sh
if [ remove = "\$1" ] || [ abort-install = "\$1" ]; then
dpkg-divert --quiet --package linux-${RELEASE}-root-${DEB_BRANCH}${BOARD} --remove --rename --divert /etc/mpv/mpv-dist.conf /etc/mpv/mpv.conf
systemctl disable armbian-hardware-monitor.service armbian-hardware-optimize.service >/dev/null 2>&1
systemctl disable armbian-zram-config.service armbian-ramlog.service >/dev/null 2>&1
fi
exit 0
EOF
chmod 755 "${destination}"/DEBIAN/postrm
# set up post install script
cat <<-EOF > "${destination}"/DEBIAN/postinst
#!/bin/sh
#
# ${BOARD} BSP post installation script
#
systemctl --no-reload enable armbian-ramlog.service
# check if it was disabled in config and disable in new service
if [ -n "\$(grep -w '^ENABLED=false' /etc/default/log2ram 2> /dev/null)" ]; then
sed -i "s/^ENABLED=.*/ENABLED=false/" /etc/default/armbian-ramlog
fi
# fix boot delay "waiting for suspend/resume device"
if [ -f "/etc/initramfs-tools/initramfs.conf" ]; then
if ! grep --quiet "RESUME=none" /etc/initramfs-tools/initramfs.conf; then
echo "RESUME=none" >> /etc/initramfs-tools/initramfs.conf
fi
fi
EOF
# install bootscripts if they are not present. Fix upgrades from old images
if [[ $FORCE_BOOTSCRIPT_UPDATE == yes ]]; then
cat <<-EOF >> "${destination}"/DEBIAN/postinst
if [ true ]; then
# this package recreate boot scripts
EOF
else
cat <<-EOF >> "${destination}"/DEBIAN/postinst
if [ ! -f /boot/$bootscript_dst ]; then
# if boot script does not exits its recreated
EOF
fi
cat <<-EOF >> "${destination}"/DEBIAN/postinst
# move bootscript to /usr/share/armbian
# create a backup
[ -f /etc/armbian-release ] && . /etc/armbian-release
[ -z \${VERSION} ] && VERSION=$(echo \`date +%s\`)
if [ -f /boot/$bootscript_dst ]; then
cp /boot/$bootscript_dst /usr/share/armbian/${bootscript_dst}-\${VERSION} >/dev/null 2>&1
echo "NOTE: You can find previous bootscript versions in /usr/share/armbian !"
fi
# cleanup old bootscript backup
ls /usr/share/armbian/boot.cmd-* >/dev/null 2>&1 | head -n -5 | xargs rm -f --
ls /usr/share/armbian/boot.ini-* >/dev/null 2>&1 | head -n -5 | xargs rm -f --
echo "Recreating boot script"
cp /usr/share/armbian/$bootscript_dst /boot >/dev/null 2>&1
rootdev=\$(sed -e 's/^.*root=//' -e 's/ .*\$//' < /proc/cmdline)
rootfstype=\$(sed -e 's/^.*rootfstype=//' -e 's/ .*$//' < /proc/cmdline)
# recreate armbianEnv.txt only not exists
if [ ! -f /boot/armbianEnv.txt ]; then
cp /usr/share/armbian/armbianEnv.txt /boot >/dev/null 2>&1
echo "rootdev="\$rootdev >> /boot/armbianEnv.txt
echo "rootfstype="\$rootfstype >> /boot/armbianEnv.txt
fi
[ -f /boot/boot.ini ] && sed -i "s/setenv rootdev.*/setenv rootdev \\"\$rootdev\\"/" /boot/boot.ini
[ -f /boot/boot.ini ] && sed -i "s/setenv rootfstype.*/setenv rootfstype \\"\$rootfstype\\"/" /boot/boot.ini
[ -f /boot/boot.cmd ] && mkimage -C none -A arm -T script -d /boot/boot.cmd /boot/boot.scr >/dev/null 2>&1
fi
[ ! -f "/etc/network/interfaces" ] && cp /etc/network/interfaces.default /etc/network/interfaces
ln -sf /var/run/motd /etc/motd
rm -f /etc/update-motd.d/00-header /etc/update-motd.d/10-help-text
if [ ! -f "/etc/default/armbian-motd" ]; then
mv /etc/default/armbian-motd.dpkg-dist /etc/default/armbian-motd
fi
if [ ! -f "/etc/default/armbian-ramlog" ]; then
mv /etc/default/armbian-ramlog.dpkg-dist /etc/default/armbian-ramlog
fi
if [ ! -f "/etc/default/armbian-zram-config" ]; then
mv /etc/default/armbian-zram-config.dpkg-dist /etc/default/armbian-zram-config
fi
if [ -L "/usr/lib/chromium-browser/master_preferences.dpkg-dist" ]; then
mv /usr/lib/chromium-browser/master_preferences.dpkg-dist /usr/lib/chromium-browser/master_preferences
fi
systemctl --no-reload enable armbian-hardware-monitor.service armbian-hardware-optimize.service armbian-zram-config.service >/dev/null 2>&1
exit 0
EOF
chmod 755 "${destination}"/DEBIAN/postinst
# won't recreate files if they were removed by user
# TODO: Add proper handling for updated conffiles
#cat <<-EOF > "${destination}"/DEBIAN/conffiles
#EOF
# copy common files from a premade directory structure
rsync -a "${SRC}"/packages/bsp/common/* "${destination}"/
# trigger uInitrd creation after installation, to apply
# /etc/initramfs/post-update.d/99-uboot
cat <<-EOF > "${destination}"/DEBIAN/triggers
activate update-initramfs
EOF
# armhwinfo, firstrun, armbianmonitor, etc. config file
cat <<-EOF > "${destination}"/etc/armbian-release
# PLEASE DO NOT EDIT THIS FILE
BOARD=$BOARD
BOARD_NAME="$BOARD_NAME"
BOARDFAMILY=${BOARDFAMILY}
BUILD_REPOSITORY_URL=${BUILD_REPOSITORY_URL}
BUILD_REPOSITORY_COMMIT=${BUILD_REPOSITORY_COMMIT}
DISTRIBUTION_CODENAME=${RELEASE}
DISTRIBUTION_STATUS=${DISTRIBUTION_STATUS}
VERSION=$REVISION
LINUXFAMILY=$LINUXFAMILY
BRANCH=$BRANCH
ARCH=$ARCHITECTURE
IMAGE_TYPE=$IMAGE_TYPE
BOARD_TYPE=$BOARD_TYPE
INITRD_ARCH=$INITRD_ARCH
KERNEL_IMAGE_TYPE=$KERNEL_IMAGE_TYPE
EOF
# this is required for NFS boot to prevent deconfiguring the network on shutdown
sed -i 's/#no-auto-down/no-auto-down/g' "${destination}"/etc/network/interfaces.default
if [[ $LINUXFAMILY == sunxi* ]]; then
# add mpv config for x11 output - slow, but it works compared to no config at all
# TODO: Test which output driver is better with DRM
mkdir -p "${destination}"/etc/mpv/
cp "${SRC}"/packages/bsp/mpv/mpv_mainline.conf "${destination}"/etc/mpv/mpv.conf
fi
# disable power savings on wireless connections by default
mkdir -p "${destination}"/usr/lib/NetworkManager/conf.d/
cp "${SRC}"/packages/bsp/zz-override-wifi-powersave-off.conf "${destination}"/usr/lib/NetworkManager/conf.d/
# execute $LINUXFAMILY-specific tweaks
[[ $(type -t family_tweaks_bsp) == function ]] && family_tweaks_bsp
# add some summary to the image
fingerprint_image "${destination}/etc/armbian.txt"
# fixing permissions (basic), reference: dh_fixperms
find "${destination}" -print0 2>/dev/null | xargs -0r chown --no-dereference 0:0
find "${destination}" ! -type l -print0 2>/dev/null | xargs -0r chmod 'go=rX,u+rw,a-s'
# create board DEB file
display_alert "Building package" "$CHOSEN_ROOTFS" "info"
fakeroot dpkg-deb -b "${destination}" "${destination}.deb" >> "${DEST}"/debug/install.log 2>&1
mkdir -p "${DEB_STORAGE}/${RELEASE}/"
mv "${destination}.deb" "${DEB_STORAGE}/${RELEASE}/"
# cleanup
rm -rf "${destination}"
}
|
lipro-armbian/lib
|
lib/makeboarddeb.sh
|
Shell
|
gpl-2.0
| 11,910 |
#!/bin/bash
################################################
# Tool to program a polytouchdemo on Karo TX28 #
# Please send feedback to: #
# [email protected] #
# Dominik Peuker November 2014 #
# Glyn GmbH & Co. KG #
# #
#History #
#----------------------------------------------#
#0.1 - 16.12.2014 - Initial Version #
#1.0 - 13.01.2015 - Override IP - settings in #
# predefined environment #
#1.1 - 13.01.2015 - Change display settings #
#1.2 - 21.01.2015 - New u-boot, new kernel #
#1.3 - 04.02.2015 - Enhanced setting for #
# pixelclock of old and new #
# EDT 7" #
#1.4 - 26.05.2015 - add ETV570 #
################################################
clear
echo "Program Polytouchdemo to TX28"
echo "-----------------------------"
echo
#Presetting
. /$HOME/PycharmProjects/practice/flasher.conf
#IPH=192.168.15.176 #Host
#IPT=192.168.15.205 #Target
#port=/dev/ttyUSB0 #serial port for console
uboot=u-boot-tx28-41x0.sb #Bootloader
image=setenv_poly_tx28.img #Environment
dtb=imx28-tx28.dtb #Device Tree
kernel=uImage-tx28-m09-raw #Linux kernel
rootfs=mucross-2.0-console-image-tx28.ubi #Terminal-Demo
echo
#preparation
echo "Please check:"
echo "tftp - server running?"
echo "serial cable connected?"
echo "ethernet connected?"
echo "module TX28 (TX28-4130) inserted?"
echo "power supply connected?"
echo "continue (y/n)"
read continue
if [ "$continue" != y ]
then
echo "exiting now!"
exit 0
else
clear
fi
#Keep or set IP adresses / serial port?
echo "IP adresses currently set to:"
echo "Host: "${IPH}
echo "Target: "${IPT}
echo "Serial port is currently set to "${port}
echo
echo "Keep these settings (y) or enter new adresses (n)?"
read settings
if [ "$settings" != y ]
then
#Host
echo "Please enter IP of your host (serverip):"
read IPH
echo
#Target
echo "Please enter IP of your target (ipaddr):"
read IPT
echo
#serial port
echo "Please enter your serial like this: /dev/ttyS0:"
read port
echo
#correct?
echo "Host:"${IPH}
echo "Target:"${IPT}
echo "Port:"${port}
#wait and clear screen
sleep 4
clear
else
#clear screen
clear
fi
#Mainfunction
#cleanup
echo " 1/20 - Clean Partitions"
#delete kernel
echo 'nand erase.part linux' > ${port}
sleep 3
#delete rootfs
echo 'nand erase.part rootfs' > ${port}
sleep 3
echo " 2/20 - Set IP adresses"
echo 'setenv serverip '${IPH} > ${port}
echo 'setenv ipaddr '${IPT} > ${port}
echo " 3/20 - Change autostart / autoload"
echo 'setenv autoload no' > ${port}
echo 'setenv autostart no' > ${port}
echo 'saveenv' > ${port}
echo " 4/20 - Update Bootloader"
sleep 5
echo 'tftp ${loadaddr}' ${uboot} > ${port}
echo " 5/20 - Transfering Bootloader"
sleep 10
echo " 6/20 - Installing Bootloader"
sleep 5
echo 'romupdate ${fileaddr}' > ${port}
sleep 5
echo " 7/20 - Reset"
echo 'reset' > ${port}
sleep 5
echo " 8/20 - Set default environment"
echo 'env default -f -a' > ${port}
echo " 9/20 - Set IP adresses"
sleep 5
echo 'setenv serverip '${IPH} > ${port}
echo 'setenv ipaddr '${IPT} > ${port}
echo "10/20 - Transfer Environment"
#copy and source predefinded environment
echo 'tftp ${loadaddr}' ${image} > ${port}
sleep 8
echo 'source ${fileaddr}' > ${port}
sleep 5
#override IP - Settings in predefined Environment
echo 'setenv serverip '${IPH} > ${port}
echo 'setenv ipaddr '${IPT} > ${port}
echo 'saveenv' > ${port}
echo "11/20 - Transfering device tree"
echo 'tftp ${loadaddr}' ${dtb} > ${port}
sleep 8
echo 'nand erase.part dtb' > ${port}
sleep 5
echo "12/20 - Save device tree"
echo 'nand write.jffs2 ${fileaddr} dtb ${filesize}' > ${port}
sleep 5
echo 'saveenv' > ${port}
echo 'reset' > ${port}
sleep 5
echo > ${port}
#copy and install kernel
echo "13/20 - Transfering Linux Kernel"
echo 'tftp ${loadaddr}' ${kernel} > ${port}
sleep 15
echo 'nand erase.part linux' > ${port}
sleep 5
echo "14/20 - Save Linux Kernel"
echo 'nand write.jffs2 ${fileaddr} linux ${filesize}' > ${port}
sleep 5
#copy and install filesystem
echo "15/20 - Transfering Filesystem"
echo 'tftp 0x40000000' ${rootfs} > ${port}
sleep 25
echo 'nand erase.part rootfs' > ${port}
sleep 5
echo "16/20 - Save Filesystem"
echo 'nand write.trimffs $0x40000000 rootfs ${filesize}' > ${port}
sleep 15
echo "17/20 - Reset and Reboot"
echo 'reset' > ${port}
sleep 3
echo > ${port}
echo > ${port}
#backlight is only 50% so far, set it to 100%
echo "18/20 - Set backlight to full brightness"
sleep 6
echo 'fdt set /backlight default-brightness-level <0x01>' > ${port}
sleep 3
echo > ${port}
sleep 3
echo 'nand erase.part dtb' > ${port}
sleep 3
echo "19/20 - Save environment"
sleep 3
echo > ${port}
echo 'nand write.jffs2 ${fdtaddr} dtb' > ${port}
sleep 3
echo "20/20 - Done!"
#ready for start
#change displaysettings
echo "Display currently set to EDT 5,7 (ETV570)"
echo "possible other video modes are:"
echo "1: ET0350 ET0350G0DH6"
echo "2: ET0430 ET0430G0DH6"
echo "3: ET0500 ET0500G0DH6"
echo "4: ETQ570 ETQ570G0DH6 or ETQ570G2DH6"
#add "ETV570 if "y" was entered unintenionally
echo "5: ETV570 ETV570"
echo "6: ET0700 ET0700G0DH6 or ET0700G0BDH6"
echo "7: VGA standard VGA"
echo "change video mode? (y/n)"
read video_decision
if [ "$video_decision" != y ]
then
echo "Video resolution set to ETV570, exiting now!"
exit 0
else
echo "Please enter number of desired video mode (1-6)"
read video_mode
if [ "$video_mode" = 1 ]
then
echo 'setenv video_mode ET0350' > ${port}
echo 'saveenv' > ${port}
sleep 3
echo "Finished!"
exit 0
elif [ "$video_mode" = 2 ]
then
echo 'setenv video_mode ET0430' > ${port}
echo 'saveenv' > ${port}
sleep 3
echo "Finished!"
exit 0
elif [ "$video_mode" = 3 ]
then
echo 'setenv video_mode ET0500' > ${port}
echo 'saveenv' > ${port}
sleep 3
echo "Finished!"
exit 0
elif [ "$video_mode" = 4 ]
then
echo 'setenv video_mode ETQ570' > ${port}
echo 'saveenv' > ${port}
sleep 3
echo "Finished!"
exit 0
elif [ "$video_mode" = 5 ]
then
echo 'setenv video_mode ETV570' > ${port}
echo 'saveenv' > ${port}
sleep 3
echo "Finished!"
exit 0
elif [ "$video_mode" = 6 ]
then
echo 'setenv video_mode ET0700' > ${port}
echo 'saveenv' > ${port}
echo > ${port}
sleep 3
#we need to invert the pixelclock for the newer 7"
#Otherwise the output won't be correct and some pixels are strange
echo "For newer EDT 7 inch Displays pixelclock needs to be inverted"
echo "Partnumber is: (G-)ETM0700G0BDH6"
echo "Invert pixelclock? (y/n)"
read invert
if [ ${invert} = y ]
then
echo 'fdt set display/display-timings/timing4/ pixelclk-active <0>' > ${port}
sleep 3
echo > ${port}
sleep 3
echo 'nand erase.part dtb' > ${port}
echo > ${port}
sleep 3
echo 'nand write.jffs2 ${fdtaddr} dtb' > ${port}
echo > ${port}
sleep 3
echo "Finished!"
exit 0
else
echo "Finished!"
exit 0
fi
else [ "$video_mode" = 7 ]
echo 'setenv video_mode VGA' > ${port}
echo 'saveenv'
sleep 3
echo "Finished!"
exit 0
fi
fi
|
jitter77/practice
|
terminal_tx28s.sh
|
Shell
|
gpl-2.0
| 8,367 |
ssh-keygen # to generate the SSH key
# then add the key to your Github account.
ssh -T [email protected]
git clone [email protected]:xxx/xxx.git # clone with write and read permissions.
git pull [email protected]:xxx/xxx.git
git push # first cd to the repository and use this to push your updates.
cd /home/domicor/LammpsRelated
git push
References:
Git User’s Manual (for version 1.5.3 or newer)
http://schacon.github.io/git/user-manual.html
Github help
https://help.github.com/
About large file and releases:
https://help.github.com/articles/what-is-my-disk-quota
https://help.github.com/articles/creating-releases
https://help.github.com/articles/linking-to-releases
https://help.github.com/articles/distributing-large-binaries
Chinese: http://blog.csdn.net/five3/article/details/8904635
|
Rareson/LinuxTricks
|
Github.sh
|
Shell
|
gpl-2.0
| 807 |
# Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston
# MA 02110-1301 USA.
##############################################################################
# Some common macro definitions
##############################################################################
# NOTE: "vendor" is used in upgrade/downgrade check, so you can't
# change these, has to be exactly as is.
%global mysql_old_vendor MySQL AB
%global mysql_vendor_2 Sun Microsystems, Inc.
%global mysql_vendor Oracle and/or its affiliates
%global mysql_version @VERSION@
%global mysqld_user mysql
%global mysqld_group mysql
%global mysqldatadir /var/lib/mysql
%global release 1
#
# Macros we use which are not available in all supported versions of RPM
#
# - defined/undefined are missing on RHEL4
#
%if %{expand:%{?defined:0}%{!?defined:1}}
%define defined() %{expand:%%{?%{1}:1}%%{!?%{1}:0}}
%endif
%if %{expand:%{?undefined:0}%{!?undefined:1}}
%define undefined() %{expand:%%{?%{1}:0}%%{!?%{1}:1}}
%endif
# ----------------------------------------------------------------------------
# RPM build tools now automatically detect Perl module dependencies. This
# detection causes problems as it is broken in some versions, and it also
# provides unwanted dependencies from mandatory scripts in our package.
# It might not be possible to disable this in all versions of RPM, but here we
# try anyway. We keep the "AutoReqProv: no" for the "test" sub package, as
# disabling here might fail, and that package has the most problems.
# See:
# http://fedoraproject.org/wiki/Packaging/Perl#Filtering_Requires:_and_Provides
# http://www.wideopen.com/archives/rpm-list/2002-October/msg00343.html
# ----------------------------------------------------------------------------
%undefine __perl_provides
%undefine __perl_requires
##############################################################################
# Command line handling
##############################################################################
#
# To set options:
#
# $ rpmbuild --define="option <x>" ...
#
# ----------------------------------------------------------------------------
# Commercial builds
# ----------------------------------------------------------------------------
%if %{undefined commercial}
%define commercial 0
%endif
# ----------------------------------------------------------------------------
# Source name
# ----------------------------------------------------------------------------
%if %{undefined src_base}
%define src_base mysql
%endif
%define src_dir %{src_base}-%{mysql_version}
# ----------------------------------------------------------------------------
# Feature set (storage engines, options). Default to community (everything)
# ----------------------------------------------------------------------------
%if %{undefined feature_set}
%define feature_set community
%endif
# ----------------------------------------------------------------------------
# Server comment strings
# ----------------------------------------------------------------------------
%if %{undefined compilation_comment_debug}
%define compilation_comment_debug MySQL Community Server - Debug (GPL)
%endif
%if %{undefined compilation_comment_release}
%define compilation_comment_release MySQL Community Server (GPL)
%endif
# ----------------------------------------------------------------------------
# Product and server suffixes
# ----------------------------------------------------------------------------
%if %{undefined product_suffix}
%if %{defined short_product_tag}
%define product_suffix -%{short_product_tag}
%else
%define product_suffix %{nil}
%endif
%endif
%if %{undefined server_suffix}
%define server_suffix %{nil}
%endif
# ----------------------------------------------------------------------------
# Distribution support
# ----------------------------------------------------------------------------
%if %{undefined distro_specific}
%define distro_specific 0
%endif
%if %{distro_specific}
%if %(test -f /etc/enterprise-release && echo 1 || echo 0)
%define oelver %(rpm -qf --qf '%%{version}\\n' /etc/enterprise-release | sed -e 's/^\\([0-9]*\\).*/\\1/g')
%if "%oelver" == "4"
%define distro_description Oracle Enterprise Linux 4
%define distro_releasetag oel4
%define distro_buildreq gcc-c++ gperf ncurses-devel perl time zlib-devel cmake libaio-devel
%define distro_requires chkconfig coreutils grep procps shadow-utils net-tools
%else
%if "%oelver" == "5"
%define distro_description Oracle Enterprise Linux 5
%define distro_releasetag oel5
%define distro_buildreq gcc-c++ gperf ncurses-devel perl time zlib-devel cmake libaio-devel
%define distro_requires chkconfig coreutils grep procps shadow-utils net-tools
%else
%{error:Oracle Enterprise Linux %{oelver} is unsupported}
%endif
%endif
%else
%if %(test -f /etc/oracle-release && echo 1 || echo 0)
%define elver %(rpm -qf --qf '%%{version}\\n' /etc/oracle-release | sed -e 's/^\\([0-9]*\\).*/\\1/g')
%if "%elver" == "6"
%define distro_description Oracle Linux 6
%define distro_releasetag el6
%define distro_buildreq gcc-c++ ncurses-devel perl time zlib-devel cmake libaio-devel
%define distro_requires chkconfig coreutils grep procps shadow-utils net-tools
%else
%{error:Oracle Linux %{elver} is unsupported}
%endif
%else
%if %(test -f /etc/redhat-release && echo 1 || echo 0)
%define rhelver %(rpm -qf --qf '%%{version}\\n' /etc/redhat-release | sed -e 's/^\\([0-9]*\\).*/\\1/g')
%if "%rhelver" == "4"
%define distro_description Red Hat Enterprise Linux 4
%define distro_releasetag rhel4
%define distro_buildreq gcc-c++ gperf ncurses-devel perl time zlib-devel cmake libaio-devel
%define distro_requires chkconfig coreutils grep procps shadow-utils net-tools
%else
%if "%rhelver" == "5"
%define distro_description Red Hat Enterprise Linux 5
%define distro_releasetag rhel5
%define distro_buildreq gcc-c++ gperf ncurses-devel perl time zlib-devel cmake libaio-devel
%define distro_requires chkconfig coreutils grep procps shadow-utils net-tools
%else
%if "%rhelver" == "6"
%define distro_description Red Hat Enterprise Linux 6
%define distro_releasetag rhel6
%define distro_buildreq gcc-c++ ncurses-devel perl time zlib-devel cmake libaio-devel
%define distro_requires chkconfig coreutils grep procps shadow-utils net-tools
%else
%{error:Red Hat Enterprise Linux %{rhelver} is unsupported}
%endif
%endif
%endif
%else
%if %(test -f /etc/SuSE-release && echo 1 || echo 0)
%define susever %(rpm -qf --qf '%%{version}\\n' /etc/SuSE-release | cut -d. -f1)
%if "%susever" == "10"
%define distro_description SUSE Linux Enterprise Server 10
%define distro_releasetag sles10
%define distro_buildreq gcc-c++ gdbm-devel gperf ncurses-devel openldap2-client zlib-devel cmake libaio-devel
%define distro_requires aaa_base coreutils grep procps pwdutils
%else
%if "%susever" == "11"
%define distro_description SUSE Linux Enterprise Server 11
%define distro_releasetag sles11
%define distro_buildreq gcc-c++ gdbm-devel gperf ncurses-devel openldap2-client procps pwdutils zlib-devel cmake libaio-devel
%define distro_requires aaa_base coreutils grep procps pwdutils
%else
%{error:SuSE %{susever} is unsupported}
%endif
%endif
%else
%{error:Unsupported distribution}
%endif
%endif
%endif
%endif
%else
%define glibc_version %(/lib/libc.so.6 | grep stable | cut -d, -f1 | cut -c38-)
%define distro_description Generic Linux (glibc %{glibc_version})
%define distro_releasetag linux_glibc%{glibc_version}
%define distro_buildreq gcc-c++ gperf ncurses-devel perl time zlib-devel
%define distro_requires coreutils grep procps /sbin/chkconfig /usr/sbin/useradd /usr/sbin/groupadd
%endif
# Avoid debuginfo RPMs, leaves binaries unstripped
%define debug_package %{nil}
# Hack to work around bug in RHEL5 __os_install_post macro, wrong inverted
# test for __debug_package
%define __strip /bin/true
# ----------------------------------------------------------------------------
# Support optional "tcmalloc" library (experimental)
# ----------------------------------------------------------------------------
%if %{defined malloc_lib_target}
%define WITH_TCMALLOC 1
%else
%define WITH_TCMALLOC 0
%endif
##############################################################################
# Configuration based upon above user input, not to be set directly
##############################################################################
%if 0%{?commercial}
%define license_files_server %{src_dir}/LICENSE.mysql
%define license_type Commercial
%else
%define license_files_server %{src_dir}/COPYING %{src_dir}/README
%define license_type GPL
%endif
##############################################################################
# Main spec file section
##############################################################################
Name: MySQL%{product_suffix}
Summary: MySQL: a very fast and reliable SQL database server
Group: Applications/Databases
Version: @MYSQL_RPM_VERSION@
Release: %{release}%{?distro_releasetag:.%{distro_releasetag}}
Distribution: %{distro_description}
License: Copyright (c) 2000, @MYSQL_COPYRIGHT_YEAR@, %{mysql_vendor}. All rights reserved. Under %{license_type} license as shown in the Description field.
Source: http://www.mysql.com/Downloads/MySQL-@MYSQL_BASE_VERSION@/%{src_dir}.tar.gz
URL: http://www.mysql.com/
Packager: MySQL Release Engineering <[email protected]>
Vendor: %{mysql_vendor}
Provides: msqlormysql MySQL-server
BuildRequires: %{distro_buildreq}
# Regression tests may take a long time, override the default to skip them
%{!?runselftest:%global runselftest 1}
# Think about what you use here since the first step is to
# run a rm -rf
BuildRoot: %{_tmppath}/%{name}-%{version}-build
# From the manual
%description
The MySQL(TM) software delivers a very fast, multi-threaded, multi-user,
and robust SQL (Structured Query Language) database server. MySQL Server
is intended for mission-critical, heavy-load production systems as well
as for embedding into mass-deployed software. MySQL is a trademark of
%{mysql_vendor}
The MySQL software has Dual Licensing, which means you can use the MySQL
software free of charge under the GNU General Public License
(http://www.gnu.org/licenses/). You can also purchase commercial MySQL
licenses from %{mysql_vendor} if you do not wish to be bound by the terms of
the GPL. See the chapter "Licensing and Support" in the manual for
further info.
The MySQL web site (http://www.mysql.com/) provides the latest
news and information about the MySQL software. Also please see the
documentation and the manual for more information.
##############################################################################
# Sub package definition
##############################################################################
%package -n MySQL-server%{product_suffix}
Summary: MySQL: a very fast and reliable SQL database server
Group: Applications/Databases
Requires: %{distro_requires}
%if 0%{?commercial}
Obsoletes: MySQL-server
%else
Obsoletes: MySQL-server-advanced
%endif
Obsoletes: mysql-server mysql-advanced mysql-server-advanced
Obsoletes: MySQL-server-classic MySQL-server-community MySQL-server-enterprise
Obsoletes: MySQL-server-advanced-gpl MySQL-server-enterprise-gpl
%description -n MySQL-server%{product_suffix}
The MySQL(TM) software delivers a very fast, multi-threaded, multi-user,
and robust SQL (Structured Query Language) database server. MySQL Server
is intended for mission-critical, heavy-load production systems as well
as for embedding into mass-deployed software. MySQL is a trademark of
%{mysql_vendor}
The MySQL software has Dual Licensing, which means you can use the MySQL
software free of charge under the GNU General Public License
(http://www.gnu.org/licenses/). You can also purchase commercial MySQL
licenses from %{mysql_vendor} if you do not wish to be bound by the terms of
the GPL. See the chapter "Licensing and Support" in the manual for
further info.
The MySQL web site (http://www.mysql.com/) provides the latest news and
information about the MySQL software. Also please see the documentation
and the manual for more information.
This package includes the MySQL server binary as well as related utilities
to run and administer a MySQL server.
If you want to access and work with the database, you have to install
package "MySQL-client%{product_suffix}" as well!
# ----------------------------------------------------------------------------
%package -n MySQL-client%{product_suffix}
Summary: MySQL - Client
Group: Applications/Databases
%if 0%{?commercial}
Obsoletes: MySQL-client
%else
Obsoletes: MySQL-client-advanced
%endif
Obsoletes: mysql < %{version}-%{release}
Obsoletes: mysql-advanced < %{version}-%{release}
Obsoletes: MySQL-client-classic MySQL-client-community MySQL-client-enterprise
Obsoletes: MySQL-client-advanced-gpl MySQL-client-enterprise-gpl
Provides: mysql = %{version}-%{release}
%description -n MySQL-client%{product_suffix}
This package contains the standard MySQL clients and administration tools.
For a description of MySQL see the base MySQL RPM or http://www.mysql.com/
# ----------------------------------------------------------------------------
%package -n MySQL-test%{product_suffix}
Summary: MySQL - Test suite
Group: Applications/Databases
%if 0%{?commercial}
Requires: MySQL-client-advanced perl
Obsoletes: MySQL-test
%else
Requires: MySQL-client perl
Obsoletes: MySQL-test-advanced
%endif
Obsoletes: mysql-test mysql-test-advanced
Obsoletes: MySQL-test-classic MySQL-test-community MySQL-test-enterprise
Obsoletes: MySQL-test-advanced-gpl MySQL-test-enterprise-gpl
AutoReqProv: no
%description -n MySQL-test%{product_suffix}
This package contains the MySQL regression test suite.
For a description of MySQL see the base MySQL RPM or http://www.mysql.com/
# ----------------------------------------------------------------------------
%package -n MySQL-devel%{product_suffix}
Summary: MySQL - Development header files and libraries
Group: Applications/Databases
%if 0%{?commercial}
Obsoletes: MySQL-devel
%else
Obsoletes: MySQL-devel-advanced
%endif
Obsoletes: mysql-devel mysql-embedded-devel mysql-devel-advanced mysql-embedded-devel-advanced
Obsoletes: MySQL-devel-classic MySQL-devel-community MySQL-devel-enterprise
Obsoletes: MySQL-devel-advanced-gpl MySQL-devel-enterprise-gpl
%description -n MySQL-devel%{product_suffix}
This package contains the development header files and libraries necessary
to develop MySQL client applications.
For a description of MySQL see the base MySQL RPM or http://www.mysql.com/
# ----------------------------------------------------------------------------
%package -n MySQL-shared%{product_suffix}
Summary: MySQL - Shared libraries
Group: Applications/Databases
%if 0%{?commercial}
Obsoletes: MySQL-shared
%else
Obsoletes: MySQL-shared-advanced
%endif
Obsoletes: MySQL-shared-standard MySQL-shared-pro
Obsoletes: MySQL-shared-pro-cert MySQL-shared-pro-gpl
Obsoletes: MySQL-shared-pro-gpl-cert
Obsoletes: MySQL-shared-classic MySQL-shared-community MySQL-shared-enterprise
Obsoletes: MySQL-shared-advanced-gpl MySQL-shared-enterprise-gpl
%description -n MySQL-shared%{product_suffix}
This package contains the shared libraries (*.so*) which certain languages
and applications need to dynamically load and use MySQL.
# ----------------------------------------------------------------------------
%package -n MySQL-embedded%{product_suffix}
Summary: MySQL - Embedded library
Group: Applications/Databases
%if 0%{?commercial}
Requires: MySQL-devel-advanced
Obsoletes: MySQL-embedded
%else
Requires: MySQL-devel
Obsoletes: MySQL-embedded-advanced
%endif
Obsoletes: mysql-embedded mysql-embedded-advanced
Obsoletes: MySQL-embedded-pro
Obsoletes: MySQL-embedded-classic MySQL-embedded-community MySQL-embedded-enterprise
Obsoletes: MySQL-embedded-advanced-gpl MySQL-embedded-enterprise-gpl
%description -n MySQL-embedded%{product_suffix}
This package contains the MySQL server as an embedded library.
The embedded MySQL server library makes it possible to run a full-featured
MySQL server inside the client application. The main benefits are increased
speed and more simple management for embedded applications.
The API is identical for the embedded MySQL version and the
client/server version.
For a description of MySQL see the base MySQL RPM or http://www.mysql.com/
##############################################################################
%prep
%setup -T -a 0 -c -n %{src_dir}
##############################################################################
%build
# Fail quickly and obviously if user tries to build as root
%if %runselftest
if [ x"`id -u`" = x0 ]; then
echo "The MySQL regression tests may fail if run as root."
echo "If you really need to build the RPM as root, use"
echo "--define='runselftest 0' to skip the regression tests."
exit 1
fi
%endif
# Be strict about variables, bail at earliest opportunity, etc.
set -eu
# Optional package files
touch optional-files-devel
#
# Set environment in order of preference, MYSQL_BUILD_* first, then variable
# name, finally a default. RPM_OPT_FLAGS is assumed to be a part of the
# default RPM build environment.
#
# This is a hack, $RPM_OPT_FLAGS on ia64 hosts contains flags which break
# the compile in cmd-line-utils/libedit - needs investigation, but for now
# we simply unset it and use those specified directly in cmake.
%if "%{_arch}" == "ia64"
RPM_OPT_FLAGS=
%endif
export PATH=${MYSQL_BUILD_PATH:-$PATH}
export CC=${MYSQL_BUILD_CC:-${CC:-gcc}}
export CXX=${MYSQL_BUILD_CXX:-${CXX:-g++}}
export CFLAGS=${MYSQL_BUILD_CFLAGS:-${CFLAGS:-$RPM_OPT_FLAGS}}
export CXXFLAGS=${MYSQL_BUILD_CXXFLAGS:-${CXXFLAGS:-$RPM_OPT_FLAGS -felide-constructors}}
export LDFLAGS=${MYSQL_BUILD_LDFLAGS:-${LDFLAGS:-}}
export CMAKE=${MYSQL_BUILD_CMAKE:-${CMAKE:-cmake}}
export MAKE_JFLAG=${MYSQL_BUILD_MAKE_JFLAG:-}
# By default, a build will include the bundeled "yaSSL" library for SSL.
# However, there may be a need to override.
# Protect against undefined variables if there is no override option.
%if %{undefined with_ssl}
%define ssl_option %{nil}
%else
%define ssl_option -DWITH_SSL=%{with_ssl}
%endif
# Build debug mysqld and libmysqld.a
mkdir debug
(
cd debug
# Attempt to remove any optimisation flags from the debug build
CFLAGS=`echo " ${CFLAGS} " | \
sed -e 's/ -O[0-9]* / /' \
-e 's/ -unroll2 / /' \
-e 's/ -ip / /' \
-e 's/^ //' \
-e 's/ $//'`
CXXFLAGS=`echo " ${CXXFLAGS} " | \
sed -e 's/ -O[0-9]* / /' \
-e 's/ -unroll2 / /' \
-e 's/ -ip / /' \
-e 's/^ //' \
-e 's/ $//'`
# XXX: MYSQL_UNIX_ADDR should be in cmake/* but mysql_version is included before
# XXX: install_layout so we can't just set it based on INSTALL_LAYOUT=RPM
${CMAKE} ../%{src_dir} -DBUILD_CONFIG=mysql_release -DINSTALL_LAYOUT=RPM \
-DCMAKE_BUILD_TYPE=Debug \
-DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \
-DFEATURE_SET="%{feature_set}" \
%{ssl_option} \
-DCOMPILATION_COMMENT="%{compilation_comment_debug}" \
-DMYSQL_SERVER_SUFFIX="%{server_suffix}"
echo BEGIN_DEBUG_CONFIG ; egrep '^#define' include/config.h ; echo END_DEBUG_CONFIG
make ${MAKE_JFLAG} VERBOSE=1
)
# Build full release
mkdir release
(
cd release
# XXX: MYSQL_UNIX_ADDR should be in cmake/* but mysql_version is included before
# XXX: install_layout so we can't just set it based on INSTALL_LAYOUT=RPM
${CMAKE} ../%{src_dir} -DBUILD_CONFIG=mysql_release -DINSTALL_LAYOUT=RPM \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DMYSQL_UNIX_ADDR="%{mysqldatadir}/mysql.sock" \
-DFEATURE_SET="%{feature_set}" \
%{ssl_option} \
-DCOMPILATION_COMMENT="%{compilation_comment_release}" \
-DMYSQL_SERVER_SUFFIX="%{server_suffix}"
echo BEGIN_NORMAL_CONFIG ; egrep '^#define' include/config.h ; echo END_NORMAL_CONFIG
make ${MAKE_JFLAG} VERBOSE=1
)
%if %runselftest
MTR_BUILD_THREAD=auto
export MTR_BUILD_THREAD
(cd release && make test-bt-fast || true)
%endif
##############################################################################
%install
RBR=$RPM_BUILD_ROOT
MBD=$RPM_BUILD_DIR/%{src_dir}
# Ensure that needed directories exists
install -d $RBR%{_sysconfdir}/{logrotate.d,init.d}
install -d $RBR%{mysqldatadir}/mysql
install -d $RBR%{_datadir}/mysql-test
install -d $RBR%{_datadir}/mysql/SELinux/RHEL4
install -d $RBR%{_includedir}
install -d $RBR%{_libdir}
install -d $RBR%{_mandir}
install -d $RBR%{_sbindir}
# Install all binaries
(
cd $MBD/release
make DESTDIR=$RBR install
)
# FIXME: at some point we should stop doing this and just install everything
# FIXME: directly into %{_libdir}/mysql - perhaps at the same time as renaming
# FIXME: the shared libraries to use libmysql*-$major.$minor.so syntax
mv -v $RBR/%{_libdir}/*.a $RBR/%{_libdir}/mysql/
# Install logrotate and autostart
install -m 644 $MBD/release/support-files/mysql-log-rotate $RBR%{_sysconfdir}/logrotate.d/mysql
install -m 755 $MBD/release/support-files/mysql.server $RBR%{_sysconfdir}/init.d/mysql
# Create a symlink "rcmysql", pointing to the init.script. SuSE users
# will appreciate that, as all services usually offer this.
ln -s %{_sysconfdir}/init.d/mysql $RBR%{_sbindir}/rcmysql
# Touch the place where the my.cnf config file might be located
# Just to make sure it's in the file list and marked as a config file
touch $RBR%{_sysconfdir}/my.cnf
# Install SELinux files in datadir
install -m 600 $MBD/%{src_dir}/support-files/RHEL4-SElinux/mysql.{fc,te} \
$RBR%{_datadir}/mysql/SELinux/RHEL4
%if %{WITH_TCMALLOC}
# Even though this is a shared library, put it under /usr/lib*/mysql, so it
# doesn't conflict with possible shared lib by the same name in /usr/lib*. See
# `mysql_config --variable=pkglibdir` and mysqld_safe for how this is used.
install -m 644 "%{malloc_lib_source}" \
"$RBR%{_libdir}/mysql/%{malloc_lib_target}"
%endif
# Remove man pages we explicitly do not want to package, avoids 'unpackaged
# files' warning.
# This has become obsolete: rm -f $RBR%{_mandir}/man1/make_win_bin_dist.1*
##############################################################################
# Post processing actions, i.e. when installed
##############################################################################
%pre -n MySQL-server%{product_suffix}
# This is the code running at the beginning of a RPM upgrade action,
# before replacing the old files with the new ones.
# ATTENTION: Parts of this are duplicated in the "triggerpostun" !
# There are users who deviate from the default file system layout.
# Check local settings to support them.
if [ -x %{_bindir}/my_print_defaults ]
then
mysql_datadir=`%{_bindir}/my_print_defaults server mysqld | grep '^--datadir=' | sed -n 's/--datadir=//p'`
PID_FILE_PATT=`%{_bindir}/my_print_defaults server mysqld | grep '^--pid-file=' | sed -n 's/--pid-file=//p'`
fi
if [ -z "$mysql_datadir" ]
then
mysql_datadir=%{mysqldatadir}
fi
if [ -z "$PID_FILE_PATT" ]
then
PID_FILE_PATT="$mysql_datadir/*.pid"
fi
# Check if we can safely upgrade. An upgrade is only safe if it's from one
# of our RPMs in the same version family.
# Handle both ways of spelling the capability.
installed=`rpm -q --whatprovides mysql-server 2> /dev/null`
if [ $? -ne 0 -o -z "$installed" ]; then
installed=`rpm -q --whatprovides MySQL-server 2> /dev/null`
fi
if [ $? -eq 0 -a -n "$installed" ]; then
installed=`echo $installed | sed 's/\([^ ]*\) .*/\1/'` # Tests have shown duplicated package names
vendor=`rpm -q --queryformat='%{VENDOR}' "$installed" 2>&1`
version=`rpm -q --queryformat='%{VERSION}' "$installed" 2>&1`
myoldvendor='%{mysql_old_vendor}'
myvendor_2='%{mysql_vendor_2}'
myvendor='%{mysql_vendor}'
myversion='%{mysql_version}'
old_family=`echo $version \
| sed -n -e 's,^\([1-9][0-9]*\.[0-9][0-9]*\)\..*$,\1,p'`
new_family=`echo $myversion \
| sed -n -e 's,^\([1-9][0-9]*\.[0-9][0-9]*\)\..*$,\1,p'`
[ -z "$vendor" ] && vendor='<unknown>'
[ -z "$old_family" ] && old_family="<unrecognized version $version>"
[ -z "$new_family" ] && new_family="<bad package specification: version $myversion>"
error_text=
if [ "$vendor" != "$myoldvendor" \
-a "$vendor" != "$myvendor_2" \
-a "$vendor" != "$myvendor" ]; then
error_text="$error_text
The current MySQL server package is provided by a different
vendor ($vendor) than $myoldvendor, $myvendor_2, or $myvendor.
Some files may be installed to different locations, including log
files and the service startup script in %{_sysconfdir}/init.d/.
"
fi
if [ "$old_family" != "$new_family" ]; then
error_text="$error_text
Upgrading directly from MySQL $old_family to MySQL $new_family may not
be safe in all cases. A manual dump and restore using mysqldump is
recommended. It is important to review the MySQL manual's Upgrading
section for version-specific incompatibilities.
"
fi
if [ -n "$error_text" ]; then
cat <<HERE >&2
******************************************************************
A MySQL server package ($installed) is installed.
$error_text
A manual upgrade is required.
- Ensure that you have a complete, working backup of your data and my.cnf
files
- Shut down the MySQL server cleanly
- Remove the existing MySQL packages. Usually this command will
list the packages you should remove:
rpm -qa | grep -i '^mysql-'
You may choose to use 'rpm --nodeps -ev <package-name>' to remove
the package which contains the mysqlclient shared library. The
library will be reinstalled by the MySQL-shared-compat package.
- Install the new MySQL packages supplied by $myvendor
- Ensure that the MySQL server is started
- Run the 'mysql_upgrade' program
This is a brief description of the upgrade process. Important details
can be found in the MySQL manual, in the Upgrading section.
******************************************************************
HERE
exit 1
fi
fi
# We assume that if there is exactly one ".pid" file,
# it contains the valid PID of a running MySQL server.
NR_PID_FILES=`ls $PID_FILE_PATT 2>/dev/null | wc -l`
case $NR_PID_FILES in
0 ) SERVER_TO_START='' ;; # No "*.pid" file == no running server
1 ) SERVER_TO_START='true' ;;
* ) SERVER_TO_START='' # Situation not clear
SEVERAL_PID_FILES=true ;;
esac
# That logic may be debated: We might check whether it is non-empty,
# contains exactly one number (possibly a PID), and whether "ps" finds it.
# OTOH, if there is no such process, it means a crash without a cleanup -
# is that a reason not to start a new server after upgrade?
STATUS_FILE=$mysql_datadir/RPM_UPGRADE_MARKER
if [ -f $STATUS_FILE ]; then
echo "Some previous upgrade was not finished:"
ls -ld $STATUS_FILE
echo "Please check its status, then do"
echo " rm $STATUS_FILE"
echo "before repeating the MySQL upgrade."
exit 1
elif [ -n "$SEVERAL_PID_FILES" ] ; then
echo "You have more than one PID file:"
ls -ld $PID_FILE_PATT
echo "Please check which one (if any) corresponds to a running server"
echo "and delete all others before repeating the MySQL upgrade."
exit 1
fi
NEW_VERSION=%{mysql_version}-%{release}
# The "pre" section code is also run on a first installation,
# when there is no data directory yet. Protect against error messages.
# Check for the existence of subdirectory "mysql/", the database of system
# tables like "mysql.user".
if [ -d $mysql_datadir/mysql ] ; then
echo "MySQL RPM upgrade to version $NEW_VERSION" > $STATUS_FILE
echo "'pre' step running at `date`" >> $STATUS_FILE
echo >> $STATUS_FILE
fcount=`ls -ltr $mysql_datadir/*.err 2>/dev/null | wc -l`
if [ $fcount -gt 0 ] ; then
echo "ERR file(s):" >> $STATUS_FILE
ls -ltr $mysql_datadir/*.err >> $STATUS_FILE
echo >> $STATUS_FILE
echo "Latest 'Version' line in latest file:" >> $STATUS_FILE
grep '^Version' `ls -tr $mysql_datadir/*.err | tail -1` | \
tail -1 >> $STATUS_FILE
echo >> $STATUS_FILE
fi
if [ -n "$SERVER_TO_START" ] ; then
# There is only one PID file, race possibility ignored
echo "PID file:" >> $STATUS_FILE
ls -l $PID_FILE_PATT >> $STATUS_FILE
cat $PID_FILE_PATT >> $STATUS_FILE
echo >> $STATUS_FILE
echo "Server process:" >> $STATUS_FILE
ps -fp `cat $PID_FILE_PATT` >> $STATUS_FILE
echo >> $STATUS_FILE
echo "SERVER_TO_START=$SERVER_TO_START" >> $STATUS_FILE
else
# Take a note we checked it ...
echo "PID file:" >> $STATUS_FILE
ls -l $PID_FILE_PATT >> $STATUS_FILE 2>&1
fi
fi
# Shut down a previously installed server first
# Note we *could* make that depend on $SERVER_TO_START, but we rather don't,
# so a "stop" is attempted even if there is no PID file.
# (Maybe the "stop" doesn't work then, but we might fix that in itself.)
if [ -x %{_sysconfdir}/init.d/mysql ] ; then
%{_sysconfdir}/init.d/mysql stop > /dev/null 2>&1
echo "Giving mysqld 5 seconds to exit nicely"
sleep 5
fi
%post -n MySQL-server%{product_suffix}
# This is the code running at the end of a RPM install or upgrade action,
# after the (new) files have been written.
# ATTENTION: Parts of this are duplicated in the "triggerpostun" !
# There are users who deviate from the default file system layout.
# Check local settings to support them.
if [ -x %{_bindir}/my_print_defaults ]
then
mysql_datadir=`%{_bindir}/my_print_defaults server mysqld | grep '^--datadir=' | sed -n 's/--datadir=//p'`
fi
if [ -z "$mysql_datadir" ]
then
mysql_datadir=%{mysqldatadir}
fi
NEW_VERSION=%{mysql_version}-%{release}
STATUS_FILE=$mysql_datadir/RPM_UPGRADE_MARKER
# ----------------------------------------------------------------------
# Create data directory if needed, check whether upgrade or install
# ----------------------------------------------------------------------
if [ ! -d $mysql_datadir ] ; then mkdir -m 755 $mysql_datadir; fi
if [ -f $STATUS_FILE ] ; then
SERVER_TO_START=`grep '^SERVER_TO_START=' $STATUS_FILE | cut -c17-`
else
SERVER_TO_START=''
fi
# echo "Analyzed: SERVER_TO_START=$SERVER_TO_START"
if [ ! -d $mysql_datadir/mysql ] ; then
mkdir $mysql_datadir/mysql $mysql_datadir/test
echo "MySQL RPM installation of version $NEW_VERSION" >> $STATUS_FILE
else
# If the directory exists, we may assume it is an upgrade.
echo "MySQL RPM upgrade to version $NEW_VERSION" >> $STATUS_FILE
fi
# ----------------------------------------------------------------------
# Make MySQL start/shutdown automatically when the machine does it.
# ----------------------------------------------------------------------
# NOTE: This still needs to be debated. Should we check whether these links
# for the other run levels exist(ed) before the upgrade?
# use chkconfig on Enterprise Linux and newer SuSE releases
if [ -x /sbin/chkconfig ] ; then
/sbin/chkconfig --add mysql
# use insserv for older SuSE Linux versions
elif [ -x /sbin/insserv ] ; then
/sbin/insserv %{_sysconfdir}/init.d/mysql
fi
# ----------------------------------------------------------------------
# Create a MySQL user and group. Do not report any problems if it already
# exists.
# ----------------------------------------------------------------------
groupadd -r %{mysqld_group} 2> /dev/null || true
useradd -M -r -d $mysql_datadir -s /bin/bash -c "MySQL server" \
-g %{mysqld_group} %{mysqld_user} 2> /dev/null || true
# The user may already exist, make sure it has the proper group nevertheless
# (BUG#12823)
usermod -g %{mysqld_group} %{mysqld_user} 2> /dev/null || true
# ----------------------------------------------------------------------
# Change permissions so that the user that will run the MySQL daemon
# owns all database files.
# ----------------------------------------------------------------------
chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir
# ----------------------------------------------------------------------
# Initiate databases if needed
# ----------------------------------------------------------------------
if ! grep '^MySQL RPM upgrade' $STATUS_FILE >/dev/null 2>&1 ; then
# Fix bug#45415: no "mysql_install_db" on an upgrade
# Do this as a negative to err towards more "install" runs
# rather than to miss one.
%{_bindir}/mysql_install_db --rpm --user=%{mysqld_user} --random-passwords
# Attention: Now 'root' is the only database user,
# its password is a random value found in ~/.mysql_secret,
# and the "password expired" flag is set:
# Any client needs that password, and the first command
# executed must be a new "set password"!
fi
# ----------------------------------------------------------------------
# Upgrade databases if needed would go here - but it cannot be automated yet
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# Change permissions again to fix any new files.
# ----------------------------------------------------------------------
chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir
# ----------------------------------------------------------------------
# Fix permissions for the permission database so that only the user
# can read them.
# ----------------------------------------------------------------------
chmod -R og-rw $mysql_datadir/mysql
# ----------------------------------------------------------------------
# install SELinux files - but don't override existing ones
# ----------------------------------------------------------------------
SETARGETDIR=/etc/selinux/targeted/src/policy
SEDOMPROG=$SETARGETDIR/domains/program
SECONPROG=$SETARGETDIR/file_contexts/program
if [ -f /etc/redhat-release ] \
&& (grep -q "Red Hat Enterprise Linux .. release 4" /etc/redhat-release \
|| grep -q "CentOS release 4" /etc/redhat-release) ; then
echo
echo
echo 'Notes regarding SELinux on this platform:'
echo '========================================='
echo
echo 'The default policy might cause server startup to fail because it is'
echo 'not allowed to access critical files. In this case, please update'
echo 'your installation.'
echo
echo 'The default policy might also cause inavailability of SSL related'
echo 'features because the server is not allowed to access /dev/random'
echo 'and /dev/urandom. If this is a problem, please do the following:'
echo
echo ' 1) install selinux-policy-targeted-sources from your OS vendor'
echo ' 2) add the following two lines to '$SEDOMPROG/mysqld.te':'
echo ' allow mysqld_t random_device_t:chr_file read;'
echo ' allow mysqld_t urandom_device_t:chr_file read;'
echo ' 3) cd to '$SETARGETDIR' and issue the following command:'
echo ' make load'
echo
echo
fi
if [ -x sbin/restorecon ] ; then
sbin/restorecon -R var/lib/mysql
fi
# Was the server running before the upgrade? If so, restart the new one.
if [ "$SERVER_TO_START" = "true" ] ; then
# Restart in the same way that mysqld will be started normally.
if [ -x %{_sysconfdir}/init.d/mysql ] ; then
%{_sysconfdir}/init.d/mysql start
echo "Giving mysqld 5 seconds to start"
sleep 5
fi
fi
# Collect an upgrade history ...
echo "Upgrade/install finished at `date`" >> $STATUS_FILE
echo >> $STATUS_FILE
echo "=====" >> $STATUS_FILE
STATUS_HISTORY=$mysql_datadir/RPM_UPGRADE_HISTORY
cat $STATUS_FILE >> $STATUS_HISTORY
mv -f $STATUS_FILE ${STATUS_FILE}-LAST # for "triggerpostun"
#echo "Thank you for installing the MySQL Community Server! For Production
#systems, we recommend MySQL Enterprise, which contains enterprise-ready
#software, intelligent advisory services, and full production support with
#scheduled service packs and more. Visit www.mysql.com/enterprise for more
#information."
%preun -n MySQL-server%{product_suffix}
# Which '$1' does this refer to? Fedora docs have info:
# " ... a count of the number of versions of the package that are installed.
# Action Count
# Install the first time 1
# Upgrade 2 or higher (depending on the number of versions installed)
# Remove last version of package 0 "
#
# http://docs.fedoraproject.org/en-US/Fedora_Draft_Documentation/0.1/html/RPM_Guide/ch09s04s05.html
if [ $1 = 0 ] ; then
# Stop MySQL before uninstalling it
if [ -x %{_sysconfdir}/init.d/mysql ] ; then
%{_sysconfdir}/init.d/mysql stop > /dev/null
# Remove autostart of MySQL
# use chkconfig on Enterprise Linux and newer SuSE releases
if [ -x /sbin/chkconfig ] ; then
/sbin/chkconfig --del mysql
# For older SuSE Linux versions
elif [ -x /sbin/insserv ] ; then
/sbin/insserv -r %{_sysconfdir}/init.d/mysql
fi
fi
fi
# We do not remove the mysql user since it may still own a lot of
# database files.
%triggerpostun -n MySQL-server%{product_suffix} --MySQL-server-community
# Setup: We renamed this package, so any existing "server-community"
# package will be removed when this "server" is installed.
# Problem: RPM will first run the "pre" and "post" sections of this script,
# and only then the "preun" of that old community server.
# But this "preun" includes stopping the server and uninstalling the service,
# "chkconfig --del mysql" which removes the symlinks to the start script.
# Solution: *After* the community server got removed, restart this server
# and re-install the service.
#
# For information about triggers in spec files, see the Fedora docs:
# http://docs.fedoraproject.org/en-US/Fedora_Draft_Documentation/0.1/html/RPM_Guide/ch10s02.html
# For all details of this code, see the "pre" and "post" sections.
# There are users who deviate from the default file system layout.
# Check local settings to support them.
if [ -x %{_bindir}/my_print_defaults ]
then
mysql_datadir=`%{_bindir}/my_print_defaults server mysqld | grep '^--datadir=' | sed -n 's/--datadir=//p'`
fi
if [ -z "$mysql_datadir" ]
then
mysql_datadir=%{mysqldatadir}
fi
NEW_VERSION=%{mysql_version}-%{release}
STATUS_FILE=$mysql_datadir/RPM_UPGRADE_MARKER-LAST # Note the difference!
STATUS_HISTORY=$mysql_datadir/RPM_UPGRADE_HISTORY
if [ -f $STATUS_FILE ] ; then
SERVER_TO_START=`grep '^SERVER_TO_START=' $STATUS_FILE | cut -c17-`
else
# This should never happen, but let's be prepared
SERVER_TO_START=''
fi
echo "Analyzed: SERVER_TO_START=$SERVER_TO_START"
if [ -x /sbin/chkconfig ] ; then
/sbin/chkconfig --add mysql
# use insserv for older SuSE Linux versions
elif [ -x /sbin/insserv ] ; then
/sbin/insserv %{_sysconfdir}/init.d/mysql
fi
# Was the server running before the upgrade? If so, restart the new one.
if [ "$SERVER_TO_START" = "true" ] ; then
# Restart in the same way that mysqld will be started normally.
if [ -x %{_sysconfdir}/init.d/mysql ] ; then
%{_sysconfdir}/init.d/mysql start
echo "Giving mysqld 5 seconds to start"
sleep 5
fi
fi
echo "Trigger 'postun --community' finished at `date`" >> $STATUS_HISTORY
echo >> $STATUS_HISTORY
echo "=====" >> $STATUS_HISTORY
# ----------------------------------------------------------------------
# Clean up the BuildRoot after build is done
# ----------------------------------------------------------------------
%clean
[ "$RPM_BUILD_ROOT" != "/" ] && [ -d $RPM_BUILD_ROOT ] \
&& rm -rf $RPM_BUILD_ROOT;
##############################################################################
# Files section
##############################################################################
%files -n MySQL-server%{product_suffix} -f release/support-files/plugins.files
%defattr(-,root,root,0755)
%if %{defined license_files_server}
%doc %{license_files_server}
%endif
%doc %{src_dir}/Docs/ChangeLog
%doc %{src_dir}/Docs/INFO_SRC*
%doc release/Docs/INFO_BIN*
%doc release/support-files/my-default.cnf
%doc %attr(644, root, root) %{_infodir}/mysql.info*
%doc %attr(644, root, man) %{_mandir}/man1/innochecksum.1*
%doc %attr(644, root, man) %{_mandir}/man1/my_print_defaults.1*
%doc %attr(644, root, man) %{_mandir}/man1/myisam_ftdump.1*
%doc %attr(644, root, man) %{_mandir}/man1/myisamchk.1*
%doc %attr(644, root, man) %{_mandir}/man1/myisamlog.1*
%doc %attr(644, root, man) %{_mandir}/man1/myisampack.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_convert_table_format.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_fix_extensions.1*
%doc %attr(644, root, man) %{_mandir}/man8/mysqld.8*
%doc %attr(644, root, man) %{_mandir}/man1/mysqld_multi.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqld_safe.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqldumpslow.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_install_db.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_plugin.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_secure_installation.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_setpermission.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_upgrade.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlhotcopy.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlman.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql.server.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqltest.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_tzinfo_to_sql.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_zap.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlbug.1*
%doc %attr(644, root, man) %{_mandir}/man1/perror.1*
%doc %attr(644, root, man) %{_mandir}/man1/replace.1*
%doc %attr(644, root, man) %{_mandir}/man1/resolve_stack_dump.1*
%doc %attr(644, root, man) %{_mandir}/man1/resolveip.1*
%ghost %config(noreplace,missingok) %{_sysconfdir}/my.cnf
%attr(755, root, root) %{_bindir}/innochecksum
%attr(755, root, root) %{_bindir}/my_print_defaults
%attr(755, root, root) %{_bindir}/myisam_ftdump
%attr(755, root, root) %{_bindir}/myisamchk
%attr(755, root, root) %{_bindir}/myisamlog
%attr(755, root, root) %{_bindir}/myisampack
%attr(755, root, root) %{_bindir}/mysql_convert_table_format
%attr(755, root, root) %{_bindir}/mysql_fix_extensions
%attr(755, root, root) %{_bindir}/mysql_install_db
%attr(755, root, root) %{_bindir}/mysql_plugin
%attr(755, root, root) %{_bindir}/mysql_secure_installation
%attr(755, root, root) %{_bindir}/mysql_setpermission
%attr(755, root, root) %{_bindir}/mysql_tzinfo_to_sql
%attr(755, root, root) %{_bindir}/mysql_upgrade
%attr(755, root, root) %{_bindir}/mysql_zap
%attr(755, root, root) %{_bindir}/mysqlbug
%attr(755, root, root) %{_bindir}/mysqld_multi
%attr(755, root, root) %{_bindir}/mysqld_safe
%attr(755, root, root) %{_bindir}/mysqldumpslow
%attr(755, root, root) %{_bindir}/mysqlhotcopy
%attr(755, root, root) %{_bindir}/mysqltest
%attr(755, root, root) %{_bindir}/perror
%attr(755, root, root) %{_bindir}/replace
%attr(755, root, root) %{_bindir}/resolve_stack_dump
%attr(755, root, root) %{_bindir}/resolveip
%attr(755, root, root) %{_sbindir}/mysqld
%attr(755, root, root) %{_sbindir}/mysqld-debug
%attr(755, root, root) %{_sbindir}/rcmysql
%attr(755, root, root) %{_libdir}/mysql/plugin/daemon_example.ini
%if %{WITH_TCMALLOC}
%attr(755, root, root) %{_libdir}/mysql/%{malloc_lib_target}
%endif
%attr(644, root, root) %config(noreplace,missingok) %{_sysconfdir}/logrotate.d/mysql
%attr(755, root, root) %{_sysconfdir}/init.d/mysql
%attr(755, root, root) %{_datadir}/mysql/
# ----------------------------------------------------------------------------
%files -n MySQL-client%{product_suffix}
%defattr(-, root, root, 0755)
%attr(755, root, root) %{_bindir}/msql2mysql
%attr(755, root, root) %{_bindir}/mysql
%attr(755, root, root) %{_bindir}/mysql_find_rows
%attr(755, root, root) %{_bindir}/mysql_waitpid
%attr(755, root, root) %{_bindir}/mysqlaccess
# XXX: This should be moved to %{_sysconfdir}
%attr(644, root, root) %{_bindir}/mysqlaccess.conf
%attr(755, root, root) %{_bindir}/mysqladmin
%attr(755, root, root) %{_bindir}/mysqlbinlog
%attr(755, root, root) %{_bindir}/mysqlcheck
%attr(755, root, root) %{_bindir}/mysqldump
%attr(755, root, root) %{_bindir}/mysqlimport
%attr(755, root, root) %{_bindir}/mysqlshow
%attr(755, root, root) %{_bindir}/mysqlslap
%attr(755, root, root) %{_bindir}/mysql_config_editor
%doc %attr(644, root, man) %{_mandir}/man1/msql2mysql.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_find_rows.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_waitpid.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlaccess.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqladmin.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlbinlog.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlcheck.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqldump.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlimport.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlshow.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqlslap.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_config_editor.1*
# ----------------------------------------------------------------------------
%files -n MySQL-devel%{product_suffix} -f optional-files-devel
%defattr(-, root, root, 0755)
%doc %attr(644, root, man) %{_mandir}/man1/comp_err.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_config.1*
%attr(755, root, root) %{_bindir}/mysql_config
%dir %attr(755, root, root) %{_includedir}/mysql
%dir %attr(755, root, root) %{_libdir}/mysql
%{_includedir}/mysql/*
%{_datadir}/aclocal/mysql.m4
%{_libdir}/mysql/libmysqlclient.a
%{_libdir}/mysql/libmysqlclient_r.a
%{_libdir}/mysql/libmysqlservices.a
# ----------------------------------------------------------------------------
%files -n MySQL-shared%{product_suffix}
%defattr(-, root, root, 0755)
# Shared libraries (omit for architectures that don't support them)
%{_libdir}/libmysql*.so*
%post -n MySQL-shared%{product_suffix}
/sbin/ldconfig
%postun -n MySQL-shared%{product_suffix}
/sbin/ldconfig
# ----------------------------------------------------------------------------
%files -n MySQL-test%{product_suffix}
%defattr(-, root, root, 0755)
%attr(-, root, root) %{_datadir}/mysql-test
%attr(755, root, root) %{_bindir}/mysql_client_test
%attr(755, root, root) %{_bindir}/mysql_client_test_embedded
%attr(755, root, root) %{_bindir}/mysqltest_embedded
%doc %attr(644, root, man) %{_mandir}/man1/mysql_client_test.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql-stress-test.pl.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql-test-run.pl.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysql_client_test_embedded.1*
%doc %attr(644, root, man) %{_mandir}/man1/mysqltest_embedded.1*
# ----------------------------------------------------------------------------
%files -n MySQL-embedded%{product_suffix}
%defattr(-, root, root, 0755)
%attr(755, root, root) %{_bindir}/mysql_embedded
%attr(644, root, root) %{_libdir}/mysql/libmysqld.a
%attr(644, root, root) %{_libdir}/mysql/libmysqld-debug.a
##############################################################################
# The spec file changelog only includes changes made to the spec file
# itself - note that they must be ordered by date (important when
# merging BK trees)
##############################################################################
%changelog
* Wed Jun 26 2013 Balasubramanian Kandasamy <[email protected]>
- Cleaned up spec file to resolve rpm dependencies.
* Mon Nov 05 2012 Joerg Bruehe <[email protected]>
- Allow to override the default to use the bundled yaSSL by an option like
--define="with_ssl /path/to/ssl"
* Wed Oct 10 2012 Bjorn Munch <[email protected]>
- Replace old my-*.cnf config file examples with template my-default.cnf
* Fri Oct 05 2012 Joerg Bruehe <[email protected]>
- Let the installation use the new option "--random-passwords" of "mysql_install_db".
(Bug# 12794345 Ensure root password)
- Fix an inconsistency: "new install" vs "upgrade" are told from the (non)existence
of "$mysql_datadir/mysql" (holding table "mysql.user" and other system stuff).
* Tue Jul 24 2012 Joerg Bruehe <[email protected]>
- Add a macro "runselftest":
if set to 1 (default), the test suite will be run during the RPM build;
this can be oveeridden via the command line by adding
--define "runselftest 0"
Failures of the test suite will NOT make the RPM build fail!
* Mon Jul 16 2012 Joerg Bruehe <[email protected]>
- Add the man page for the "mysql_config_editor".
* Mon Jun 11 2012 Joerg Bruehe <[email protected]>
- Make sure newly added "SPECIFIC-ULN/" directory does not disturb packaging.
* Wed Feb 29 2012 Brajmohan Saxena <[email protected]>
- Removal all traces of the readline library from mysql (BUG 13738013)
* Wed Sep 28 2011 Joerg Bruehe <[email protected]>
- Fix duplicate mentioning of "mysql_plugin" and its manual page,
it is better to keep alphabetic order in the files list (merging!).
* Wed Sep 14 2011 Joerg Bruehe <[email protected]>
- Let the RPM capabilities ("obsoletes" etc) ensure that an upgrade may replace
the RPMs of any configuration (of the current or the preceding release series)
by the new ones. This is done by not using the implicitly generated capabilities
(which include the configuration name) and relying on more generic ones which
just list the function ("server", "client", ...).
The implicit generation cannot be prevented, so all these capabilities must be
explicitly listed in "Obsoletes:"
* Tue Sep 13 2011 Jonathan Perkin <[email protected]>
- Add support for Oracle Linux 6 and Red Hat Enterprise Linux 6. Due to
changes in RPM behaviour ($RPM_BUILD_ROOT is removed prior to install)
this necessitated a move of the libmygcc.a installation to the install
phase, which is probably where it belonged in the first place.
* Tue Sep 13 2011 Joerg Bruehe <[email protected]>
- "make_win_bin_dist" and its manual are dropped, cmake does it different.
* Thu Sep 08 2011 Daniel Fischer <[email protected]>
- Add mysql_plugin man page.
* Tue Aug 30 2011 Tor Didriksen <[email protected]>
- Set CXX=g++ by default to add a dependency on libgcc/libstdc++.
Also, remove the use of the -fno-exceptions and -fno-rtti flags.
TODO: update distro_buildreq/distro_requires
* Tue Aug 30 2011 Joerg Bruehe <[email protected]>
- Add the manual page for "mysql_plugin" to the server package.
* Fri Aug 19 2011 Joerg Bruehe <[email protected]>
- Null-upmerge the fix of bug#37165: This spec file is not affected.
- Replace "/var/lib/mysql" by the spec file variable "%{mysqldatadir}".
* Fri Aug 12 2011 Daniel Fischer <[email protected]>
- Source plugin library files list from cmake-generated file.
* Mon Jul 25 2011 Chuck Bell <[email protected]>
- Added the mysql_plugin client - enables or disables plugins.
* Thu Jul 21 2011 Sunanda Menon <[email protected]>
- Fix bug#12561297: Added the MySQL embedded binary
* Thu Jul 07 2011 Joerg Bruehe <[email protected]>
- Fix bug#45415: "rpm upgrade recreates test database"
Let the creation of the "test" database happen only during a new installation,
not in an RPM upgrade.
This affects both the "mkdir" and the call of "mysql_install_db".
* Thu Feb 09 2011 Joerg Bruehe <[email protected]>
- Fix bug#56581: If an installation deviates from the default file locations
("datadir" and "pid-file"), the mechanism to detect a running server (on upgrade)
should still work, and use these locations.
The problem was that the fix for bug#27072 did not check for local settings.
* Mon Jan 31 2011 Joerg Bruehe <[email protected]>
- Install the new "manifest" files: "INFO_SRC" and "INFO_BIN".
* Tue Nov 23 2010 Jonathan Perkin <[email protected]>
- EXCEPTIONS-CLIENT has been deleted, remove it from here too
- Support MYSQL_BUILD_MAKE_JFLAG environment variable for passing
a '-j' argument to make.
* Mon Nov 1 2010 Georgi Kodinov <[email protected]>
- Added test authentication (WL#1054) plugin binaries
* Wed Oct 6 2010 Georgi Kodinov <[email protected]>
- Added example external authentication (WL#1054) plugin binaries
* Wed Aug 11 2010 Joerg Bruehe <[email protected]>
- With a recent spec file cleanup, names have changed: A "-community" part was dropped.
Reflect that in the "Obsoletes" specifications.
- Add a "triggerpostun" to handle the uninstall of the "-community" server RPM.
- This fixes bug#55015 "MySQL server is not restarted properly after RPM upgrade".
* Tue Jun 15 2010 Joerg Bruehe <[email protected]>
- Change the behaviour on installation and upgrade:
On installation, do not autostart the server.
*Iff* the server was stopped before the upgrade is started, this is taken as a
sign the administrator is handling that manually, and so the new server will
not be started automatically at the end of the upgrade.
The start/stop scripts will still be installed, so the server will be started
on the next machine boot.
This is the 5.5 version of fixing bug#27072 (RPM autostarting the server).
* Tue Jun 1 2010 Jonathan Perkin <[email protected]>
- Implement SELinux checks from distribution-specific spec file.
* Wed May 12 2010 Jonathan Perkin <[email protected]>
- Large number of changes to build using CMake
- Introduce distribution-specific RPMs
- Drop debuginfo, build all binaries with debug/symbols
- Remove __os_install_post, use native macro
- Remove _unpackaged_files_terminate_build, make it an error to have
unpackaged files
- Remove cluster RPMs
* Wed Mar 24 2010 Joerg Bruehe <[email protected]>
- Add "--with-perfschema" to the configure options.
* Mon Mar 22 2010 Joerg Bruehe <[email protected]>
- User "usr/lib*" to allow for both "usr/lib" and "usr/lib64",
mask "rmdir" return code 1.
- Remove "ha_example.*" files from the list, they aren't built.
* Wed Mar 17 2010 Joerg Bruehe <[email protected]>
- Fix a wrong path name in handling the debug plugins.
* Wed Mar 10 2010 Joerg Bruehe <[email protected]>
- Take the result of the debug plugin build and put it into the optimized tree,
so that it becomes part of the final installation;
include the files in the packlist. Part of the fixes for bug#49022.
* Mon Mar 01 2010 Joerg Bruehe <[email protected]>
- Set "Oracle and/or its affiliates" as the vendor and copyright owner,
accept upgrading from packages showing MySQL or Sun as vendor.
* Fri Feb 12 2010 Joerg Bruehe <[email protected]>
- Formatting changes:
Have a consistent structure of separator lines and of indentation
(8 leading blanks => tab).
- Introduce the variable "src_dir".
- Give the environment variables "MYSQL_BUILD_CC(CXX)" precedence
over "CC" ("CXX").
- Drop the old "with_static" argument analysis, this is not supported
in 5.1 since ages.
- Introduce variables to control the handlers individually, as well
as other options.
- Use the new "--with-plugin" notation for the table handlers.
- Drop handling "/etc/rc.d/init.d/mysql", the switch to "/etc/init.d/mysql"
was done back in 2002 already.
- Make "--with-zlib-dir=bundled" the default, add an option to disable it.
- Add missing manual pages to the file list.
- Improve the runtime check for "libgcc.a", protect it against being tried
with the Intel compiler "icc".
* Mon Jan 11 2010 Joerg Bruehe <[email protected]>
- Change RPM file naming:
- Suffix like "-m2", "-rc" becomes part of version as "_m2", "_rc".
- Release counts from 1, not 0.
* Wed Dec 23 2009 Joerg Bruehe <[email protected]>
- The "semisync" plugin file name has lost its introductory "lib",
adapt the file lists for the subpackages.
This is a part missing from the fix for bug#48351.
- Remove the "fix_privilege_tables" manual, it does not exist in 5.5
(and likely, the whole script will go, too).
* Mon Nov 16 2009 Joerg Bruehe <[email protected]>
- Fix some problems with the directives around "tcmalloc" (experimental),
remove erroneous traces of the InnoDB plugin (that is 5.1 only).
* Fri Oct 06 2009 Magnus Blaudd <[email protected]>
- Removed mysql_fix_privilege_tables
* Fri Oct 02 2009 Alexander Nozdrin <[email protected]>
- "mysqlmanager" got removed from version 5.4, all references deleted.
* Fri Aug 28 2009 Joerg Bruehe <[email protected]>
- Merge up from 5.1 to 5.4: Remove handling for the InnoDB plugin.
* Thu Aug 27 2009 Joerg Bruehe <[email protected]>
- This version does not contain the "Instance manager", "mysqlmanager":
Remove it from the spec file so that packaging succeeds.
* Mon Aug 24 2009 Jonathan Perkin <[email protected]>
- Add conditionals for bundled zlib and innodb plugin
* Fri Aug 21 2009 Jonathan Perkin <[email protected]>
- Install plugin libraries in appropriate packages.
- Disable libdaemon_example and ftexample plugins.
* Thu Aug 20 2009 Jonathan Perkin <[email protected]>
- Update variable used for mysql-test suite location to match source.
* Fri Nov 07 2008 Joerg Bruehe <[email protected]>
- Correct yesterday's fix, so that it also works for the last flag,
and fix a wrong quoting: un-quoted quote marks must not be escaped.
* Thu Nov 06 2008 Kent Boortz <[email protected]>
- Removed "mysql_upgrade_shell"
- Removed some copy/paste between debug and normal build
* Thu Nov 06 2008 Joerg Bruehe <[email protected]>
- Modify CFLAGS and CXXFLAGS such that a debug build is not optimized.
This should cover both gcc and icc flags. Fixes bug#40546.
* Fri Aug 29 2008 Kent Boortz <[email protected]>
- Removed the "Federated" storage engine option, and enabled in all
* Tue Aug 26 2008 Joerg Bruehe <[email protected]>
- Get rid of the "warning: Installed (but unpackaged) file(s) found:"
Some generated files aren't needed in RPMs:
- the "sql-bench/" subdirectory
Some files were missing:
- /usr/share/aclocal/mysql.m4 ("devel" subpackage)
- Manual "mysqlbug" ("server" subpackage)
- Program "innochecksum" and its manual ("server" subpackage)
- Manual "mysql_find_rows" ("client" subpackage)
- Script "mysql_upgrade_shell" ("client" subpackage)
- Program "ndb_cpcd" and its manual ("ndb-extra" subpackage)
- Manuals "ndb_mgm" + "ndb_restore" ("ndb-tools" subpackage)
* Mon Mar 31 2008 Kent Boortz <[email protected]>
- Made the "Federated" storage engine an option
- Made the "Cluster" storage engine and sub packages an option
* Wed Mar 19 2008 Joerg Bruehe <[email protected]>
- Add the man pages for "ndbd" and "ndb_mgmd".
* Mon Feb 18 2008 Timothy Smith <[email protected]>
- Require a manual upgrade if the alread-installed mysql-server is
from another vendor, or is of a different major version.
* Wed May 02 2007 Joerg Bruehe <[email protected]>
- "ndb_size.tmpl" is not needed any more,
"man1/mysql_install_db.1" lacked the trailing '*'.
* Sat Apr 07 2007 Kent Boortz <[email protected]>
- Removed man page for "mysql_create_system_tables"
* Wed Mar 21 2007 Daniel Fischer <[email protected]>
- Add debug server.
* Mon Mar 19 2007 Daniel Fischer <[email protected]>
- Remove Max RPMs; the server RPMs contain a mysqld compiled with all
features that previously only were built into Max.
* Fri Mar 02 2007 Joerg Bruehe <[email protected]>
- Add several man pages for NDB which are now created.
* Fri Jan 05 2007 Kent Boortz <[email protected]>
- Put back "libmygcc.a", found no real reason it was removed.
- Add CFLAGS to gcc call with --print-libgcc-file, to make sure the
correct "libgcc.a" path is returned for the 32/64 bit architecture.
* Mon Dec 18 2006 Joerg Bruehe <[email protected]>
- Fix the move of "mysqlmanager" to section 8: Directory name was wrong.
* Thu Dec 14 2006 Joerg Bruehe <[email protected]>
- Include the new man pages for "my_print_defaults" and "mysql_tzinfo_to_sql"
in the server RPM.
- The "mysqlmanager" man page got moved from section 1 to 8.
* Thu Nov 30 2006 Joerg Bruehe <[email protected]>
- Call "make install" using "benchdir_root=%{_datadir}",
because that is affecting the regression test suite as well.
* Thu Nov 16 2006 Joerg Bruehe <[email protected]>
- Explicitly note that the "MySQL-shared" RPMs (as built by MySQL AB)
replace "mysql-shared" (as distributed by SuSE) to allow easy upgrading
(bug#22081).
* Mon Nov 13 2006 Joerg Bruehe <[email protected]>
- Add "--with-partition" to all server builds.
- Use "--report-features" in one test run per server build.
* Tue Aug 15 2006 Joerg Bruehe <[email protected]>
- The "max" server is removed from packages, effective from 5.1.12-beta.
Delete all steps to build, package, or install it.
* Mon Jul 10 2006 Joerg Bruehe <[email protected]>
- Fix a typing error in the "make" target for the Perl script to run the tests.
* Tue Jul 04 2006 Joerg Bruehe <[email protected]>
- Use the Perl script to run the tests, because it will automatically check
whether the server is configured with SSL.
* Tue Jun 27 2006 Joerg Bruehe <[email protected]>
- move "mysqldumpslow" from the client RPM to the server RPM (bug#20216)
- Revert all previous attempts to call "mysql_upgrade" during RPM upgrade,
there are some more aspects which need to be solved before this is possible.
For now, just ensure the binary "mysql_upgrade" is delivered and installed.
* Thu Jun 22 2006 Joerg Bruehe <[email protected]>
- Close a gap of the previous version by explicitly using
a newly created temporary directory for the socket to be used
in the "mysql_upgrade" operation, overriding any local setting.
* Tue Jun 20 2006 Joerg Bruehe <[email protected]>
- To run "mysql_upgrade", we need a running server;
start it in isolation and skip password checks.
* Sat May 20 2006 Kent Boortz <[email protected]>
- Always compile for PIC, position independent code.
* Wed May 10 2006 Kent Boortz <[email protected]>
- Use character set "all" when compiling with Cluster, to make Cluster
nodes independent on the character set directory, and the problem
that two RPM sub packages both wants to install this directory.
* Mon May 01 2006 Kent Boortz <[email protected]>
- Use "./libtool --mode=execute" instead of searching for the
executable in current directory and ".libs".
* Fri Apr 28 2006 Kent Boortz <[email protected]>
- Install and run "mysql_upgrade"
* Wed Apr 12 2006 Jim Winstead <[email protected]>
- Remove sql-bench, and MySQL-bench RPM (will be built as an independent
project from the mysql-bench repository)
* Tue Apr 11 2006 Jim Winstead <[email protected]>
- Remove old mysqltestmanager and related programs
* Sat Apr 01 2006 Kent Boortz <[email protected]>
- Set $LDFLAGS from $MYSQL_BUILD_LDFLAGS
* Wed Mar 07 2006 Kent Boortz <[email protected]>
- Changed product name from "Community Edition" to "Community Server"
* Mon Mar 06 2006 Kent Boortz <[email protected]>
- Fast mutexes is now disabled by default, but should be
used in Linux builds.
* Mon Feb 20 2006 Kent Boortz <[email protected]>
- Reintroduced a max build
- Limited testing of 'debug' and 'max' servers
- Berkeley DB only in 'max'
* Mon Feb 13 2006 Joerg Bruehe <[email protected]>
- Use "-i" on "make test-force";
this is essential for later evaluation of this log file.
* Thu Feb 09 2006 Kent Boortz <[email protected]>
- Pass '-static' to libtool, link static with our own libraries, dynamic
with system libraries. Link with the bundled zlib.
* Wed Feb 08 2006 Kristian Nielsen <[email protected]>
- Modified RPM spec to match new 5.1 debug+max combined community packaging.
* Sun Dec 18 2005 Kent Boortz <[email protected]>
- Added "client/mysqlslap"
* Mon Dec 12 2005 Rodrigo Novo <[email protected]>
- Added zlib to the list of (static) libraries installed
- Added check against libtool wierdness (WRT: sql/mysqld || sql/.libs/mysqld)
- Compile MySQL with bundled zlib
- Fixed %packager name to "MySQL Production Engineering Team"
* Mon Dec 05 2005 Joerg Bruehe <[email protected]>
- Avoid using the "bundled" zlib on "shared" builds:
As it is not installed (on the build system), this gives dependency
problems with "libtool" causing the build to fail.
(Change was done on Nov 11, but left uncommented.)
* Tue Nov 22 2005 Joerg Bruehe <[email protected]>
- Extend the file existence check for "init.d/mysql" on un-install
to also guard the call to "insserv"/"chkconfig".
* Thu Oct 27 2005 Lenz Grimmer <[email protected]>
- added more man pages
* Wed Oct 19 2005 Kent Boortz <[email protected]>
- Made yaSSL support an option (off by default)
* Wed Oct 19 2005 Kent Boortz <[email protected]>
- Enabled yaSSL support
* Sat Oct 15 2005 Kent Boortz <[email protected]>
- Give mode arguments the same way in all places
- Moved copy of mysqld.a to "standard" build, but
disabled it as we don't do embedded yet in 5.0
* Fri Oct 14 2005 Kent Boortz <[email protected]>
- For 5.x, always compile with --with-big-tables
- Copy the config.log file to location outside
the build tree
* Fri Oct 14 2005 Kent Boortz <[email protected]>
- Removed unneeded/obsolete configure options
- Added archive engine to standard server
- Removed the embedded server from experimental server
- Changed suffix "-Max" => "-max"
- Changed comment string "Max" => "Experimental"
* Thu Oct 13 2005 Lenz Grimmer <[email protected]>
- added a usermod call to assign a potential existing mysql user to the
correct user group (BUG#12823)
- Save the perror binary built during Max build so it supports the NDB
error codes (BUG#13740)
- added a separate macro "mysqld_group" to be able to define the
user group of the mysql user seperately, if desired.
* Thu Sep 29 2005 Lenz Grimmer <[email protected]>
- fixed the removing of the RPM_BUILD_ROOT in the %clean section (the
$RBR variable did not get expanded, thus leaving old build roots behind)
* Thu Aug 04 2005 Lenz Grimmer <[email protected]>
- Fixed the creation of the mysql user group account in the postinstall
section (BUG 12348)
- Fixed enabling the Archive storage engine in the Max binary
* Tue Aug 02 2005 Lenz Grimmer <[email protected]>
- Fixed the Requires: tag for the server RPM (BUG 12233)
* Fri Jul 15 2005 Lenz Grimmer <[email protected]>
- create a "mysql" user group and assign the mysql user account to that group
in the server postinstall section. (BUG 10984)
* Tue Jun 14 2005 Lenz Grimmer <[email protected]>
- Do not build statically on i386 by default, only when adding either "--with
static" or "--define '_with_static 1'" to the RPM build options. Static
linking really only makes sense when linking against the specially patched
glibc 2.2.5.
* Mon Jun 06 2005 Lenz Grimmer <[email protected]>
- added mysql_client_test to the "bench" subpackage (BUG 10676)
- added the libndbclient static and shared libraries (BUG 10676)
* Wed Jun 01 2005 Lenz Grimmer <[email protected]>
- use "mysqldatadir" variable instead of hard-coding the path multiple times
- use the "mysqld_user" variable on all occasions a user name is referenced
- removed (incomplete) Brazilian translations
- removed redundant release tags from the subpackage descriptions
* Wed May 25 2005 Joerg Bruehe <[email protected]>
- Added a "make clean" between separate calls to "BuildMySQL".
* Thu May 12 2005 Guilhem Bichot <[email protected]>
- Removed the mysql_tableinfo script made obsolete by the information schema
* Wed Apr 20 2005 Lenz Grimmer <[email protected]>
- Enabled the "blackhole" storage engine for the Max RPM
* Wed Apr 13 2005 Lenz Grimmer <[email protected]>
- removed the MySQL manual files (html/ps/texi) - they have been removed
from the MySQL sources and are now available seperately.
* Mon Apr 4 2005 Petr Chardin <[email protected]>
- old mysqlmanager, mysqlmanagerc and mysqlmanager-pwger renamed into
mysqltestmanager, mysqltestmanager and mysqltestmanager-pwgen respectively
* Fri Mar 18 2005 Lenz Grimmer <[email protected]>
- Disabled RAID in the Max binaries once and for all (it has finally been
removed from the source tree)
* Sun Feb 20 2005 Petr Chardin <[email protected]>
- Install MySQL Instance Manager together with mysqld, touch mysqlmanager
password file
* Mon Feb 14 2005 Lenz Grimmer <[email protected]>
- Fixed the compilation comments and moved them into the separate build sections
for Max and Standard
* Mon Feb 7 2005 Tomas Ulin <[email protected]>
- enabled the "Ndbcluster" storage engine for the max binary
- added extra make install in ndb subdir after Max build to get ndb binaries
- added packages for ndbcluster storage engine
* Fri Jan 14 2005 Lenz Grimmer <[email protected]>
- replaced obsoleted "BuildPrereq" with "BuildRequires" instead
* Thu Jan 13 2005 Lenz Grimmer <[email protected]>
- enabled the "Federated" storage engine for the max binary
* Tue Jan 04 2005 Petr Chardin <[email protected]>
- ISAM and merge storage engines were purged. As well as appropriate
tools and manpages (isamchk and isamlog)
* Thu Dec 31 2004 Lenz Grimmer <[email protected]>
- enabled the "Archive" storage engine for the max binary
- enabled the "CSV" storage engine for the max binary
- enabled the "Example" storage engine for the max binary
* Thu Aug 26 2004 Lenz Grimmer <[email protected]>
- MySQL-Max now requires MySQL-server instead of MySQL (BUG 3860)
* Fri Aug 20 2004 Lenz Grimmer <[email protected]>
- do not link statically on IA64/AMD64 as these systems do not have
a patched glibc installed
* Tue Aug 10 2004 Lenz Grimmer <[email protected]>
- Added libmygcc.a to the devel subpackage (required to link applications
against the the embedded server libmysqld.a) (BUG 4921)
* Mon Aug 09 2004 Lenz Grimmer <[email protected]>
- Added EXCEPTIONS-CLIENT to the "devel" package
* Thu Jul 29 2004 Lenz Grimmer <[email protected]>
- disabled OpenSSL in the Max binaries again (the RPM packages were the
only exception to this anyway) (BUG 1043)
* Wed Jun 30 2004 Lenz Grimmer <[email protected]>
- fixed server postinstall (mysql_install_db was called with the wrong
parameter)
* Thu Jun 24 2004 Lenz Grimmer <[email protected]>
- added mysql_tzinfo_to_sql to the server subpackage
- run "make clean" instead of "make distclean"
* Mon Apr 05 2004 Lenz Grimmer <[email protected]>
- added ncurses-devel to the build prerequisites (BUG 3377)
* Thu Feb 12 2004 Lenz Grimmer <[email protected]>
- when using gcc, _always_ use CXX=gcc
- replaced Copyright with License field (Copyright is obsolete)
* Tue Feb 03 2004 Lenz Grimmer <[email protected]>
- added myisam_ftdump to the Server package
* Tue Jan 13 2004 Lenz Grimmer <[email protected]>
- link the mysql client against libreadline instead of libedit (BUG 2289)
* Mon Dec 22 2003 Lenz Grimmer <[email protected]>
- marked /etc/logrotate.d/mysql as a config file (BUG 2156)
* Fri Dec 13 2003 Lenz Grimmer <[email protected]>
- fixed file permissions (BUG 1672)
* Thu Dec 11 2003 Lenz Grimmer <[email protected]>
- made testing for gcc3 a bit more robust
* Fri Dec 05 2003 Lenz Grimmer <[email protected]>
- added missing file mysql_create_system_tables to the server subpackage
* Fri Nov 21 2003 Lenz Grimmer <[email protected]>
- removed dependency on MySQL-client from the MySQL-devel subpackage
as it is not really required. (BUG 1610)
* Fri Aug 29 2003 Lenz Grimmer <[email protected]>
- Fixed BUG 1162 (removed macro names from the changelog)
- Really fixed BUG 998 (disable the checking for installed but
unpackaged files)
* Tue Aug 05 2003 Lenz Grimmer <[email protected]>
- Fixed BUG 959 (libmysqld not being compiled properly)
- Fixed BUG 998 (RPM build errors): added missing files to the
distribution (mysql_fix_extensions, mysql_tableinfo, mysqldumpslow,
mysql_fix_privilege_tables.1), removed "-n" from install section.
* Wed Jul 09 2003 Lenz Grimmer <[email protected]>
- removed the GIF Icon (file was not included in the sources anyway)
- removed unused variable shared_lib_version
- do not run automake before building the standard binary
(should not be necessary)
- add server suffix '-standard' to standard binary (to be in line
with the binary tarball distributions)
- Use more RPM macros (_exec_prefix, _sbindir, _libdir, _sysconfdir,
_datadir, _includedir) throughout the spec file.
- allow overriding CC and CXX (required when building with other compilers)
* Fri May 16 2003 Lenz Grimmer <[email protected]>
- re-enabled RAID again
* Wed Apr 30 2003 Lenz Grimmer <[email protected]>
- disabled MyISAM RAID (--with-raid) - it throws an assertion which
needs to be investigated first.
* Mon Mar 10 2003 Lenz Grimmer <[email protected]>
- added missing file mysql_secure_installation to server subpackage
(BUG 141)
* Tue Feb 11 2003 Lenz Grimmer <[email protected]>
- re-added missing pre- and post(un)install scripts to server subpackage
- added config file /etc/my.cnf to the file list (just for completeness)
- make sure to create the datadir with 755 permissions
* Mon Jan 27 2003 Lenz Grimmer <[email protected]>
- removed unused CC and CXX variables
- CFLAGS and CXXFLAGS should honor RPM_OPT_FLAGS
* Fri Jan 24 2003 Lenz Grimmer <[email protected]>
- renamed package "MySQL" to "MySQL-server"
- fixed Copyright tag
- added mysql_waitpid to client subpackage (required for mysql-test-run)
* Wed Nov 27 2002 Lenz Grimmer <[email protected]>
- moved init script from /etc/rc.d/init.d to /etc/init.d (the majority of
Linux distributions now support this scheme as proposed by the LSB either
directly or via a compatibility symlink)
- Use new "restart" init script action instead of starting and stopping
separately
- Be more flexible in activating the automatic bootup - use insserv (on
older SuSE versions) or chkconfig (Red Hat, newer SuSE versions and
others) to create the respective symlinks
* Wed Sep 25 2002 Lenz Grimmer <[email protected]>
- MySQL-Max now requires MySQL >= 4.0 to avoid version mismatches
(mixing 3.23 and 4.0 packages)
* Fri Aug 09 2002 Lenz Grimmer <[email protected]>
- Turn off OpenSSL in MySQL-Max for now until it works properly again
- enable RAID for the Max binary instead
- added compatibility link: safe_mysqld -> mysqld_safe to ease the
transition from 3.23
* Thu Jul 18 2002 Lenz Grimmer <[email protected]>
- Reworked the build steps a little bit: the Max binary is supposed
to include OpenSSL, which cannot be linked statically, thus trying
to statically link against a special glibc is futile anyway
- because of this, it is not required to make yet another build run
just to compile the shared libs (saves a lot of time)
- updated package description of the Max subpackage
- clean up the BuildRoot directory afterwards
* Mon Jul 15 2002 Lenz Grimmer <[email protected]>
- Updated Packager information
- Fixed the build options: the regular package is supposed to
include InnoDB and linked statically, while the Max package
should include BDB and SSL support
* Fri May 03 2002 Lenz Grimmer <[email protected]>
- Use more RPM macros (e.g. infodir, mandir) to make the spec
file more portable
- reorganized the installation of documentation files: let RPM
take care of this
- reorganized the file list: actually install man pages along
with the binaries of the respective subpackage
- do not include libmysqld.a in the devel subpackage as well, if we
have a special "embedded" subpackage
- reworked the package descriptions
* Mon Oct 8 2001 Monty
- Added embedded server as a separate RPM
* Fri Apr 13 2001 Monty
- Added mysqld-max to the distribution
* Tue Jan 2 2001 Monty
- Added mysql-test to the bench package
* Fri Aug 18 2000 Tim Smith <[email protected]>
- Added separate libmysql_r directory; now both a threaded
and non-threaded library is shipped.
* Wed Sep 28 1999 David Axmark <[email protected]>
- Added the support-files/my-example.cnf to the docs directory.
- Removed devel dependency on base since it is about client
development.
* Wed Sep 8 1999 David Axmark <[email protected]>
- Cleaned up some for 3.23.
* Thu Jul 1 1999 David Axmark <[email protected]>
- Added support for shared libraries in a separate sub
package. Original fix by David Fox ([email protected])
- The --enable-assembler switch is now automatically disables on
platforms there assembler code is unavailable. This should allow
building this RPM on non i386 systems.
* Mon Feb 22 1999 David Axmark <[email protected]>
- Removed unportable cc switches from the spec file. The defaults can
now be overridden with environment variables. This feature is used
to compile the official RPM with optimal (but compiler version
specific) switches.
- Removed the repetitive description parts for the sub rpms. Maybe add
again if RPM gets a multiline macro capability.
- Added support for a pt_BR translation. Translation contributed by
Jorge Godoy <[email protected]>.
* Wed Nov 4 1998 David Axmark <[email protected]>
- A lot of changes in all the rpm and install scripts. This may even
be a working RPM :-)
* Sun Aug 16 1998 David Axmark <[email protected]>
- A developers changelog for MySQL is available in the source RPM. And
there is a history of major user visible changed in the Reference
Manual. Only RPM specific changes will be documented here.
|
wy182000/mysql
|
support-files/mysql.spec.sh
|
Shell
|
gpl-2.0
| 79,885 |
html2pdf ggplot_scales_y_log10_notes.html ggplot_scales_y_log10_notes.pdf
|
pamag/CODEmiscellanea
|
ggplot2_notes/_convert_to_pdf.sh
|
Shell
|
gpl-2.0
| 74 |
#!/bin/bash
######################################################################
#
# idFORGE Framework - Manage identity manuals in community
# Copyright © 2015 The CentOS Artwork SIG
#
# idFORGE Framework is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# idFORGE Framework is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with idFORGE Framework; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Alain Reguera Delgado <[email protected]>
# 39 Street No. 4426 Cienfuegos, Cuba.
#
######################################################################
function command_line {
idforge_checkFiles -ex ${QATEST_FLAG_COMMAND}
idforge_printMessage "${QATEST_FLAG_COMMAND} ${@}" --as-processing-line
local COMMAND_TIMESTAMP=$(date '+%s.%N')
eval ${QATEST_FLAG_COMMAND} ${@} > /dev/null
local COMMAND_EXIT=${?}
COMMAND_TIMESTAMP=$(check_printTimestamp ${COMMAND_TIMESTAMP})
check_setCommandStatus "${COMMAND_EXIT}" "`eval_gettext "in \\\$COMMAND_TIMESTAMP seconds"`"
}
|
areguera/idforge
|
Library/Modules/Qatest/Modules/Run/Modules/Check/Modules/Command/command_line.sh
|
Shell
|
gpl-2.0
| 1,528 |
#!/bin/bash
# skirpta se pokrece na backup strani
# ovo bqackupje baze incremnetal
rsync -haz -i --rsh='ssh -p4000' SOURCE_KOMP:/putanja_do_mssql_baza/ /DETINATION/putanja_do_backupa/
chown mssql:mssql -R /DETINATION/putanja_do_backupa/
|
dbrankov/skripte
|
rsync_MSSQLincremental.sh
|
Shell
|
gpl-2.0
| 238 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=GNU-Linux-x86
CND_CONF=Release
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=so
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/auth
OUTPUT_BASENAME=auth
PACKAGE_TOP_DIR=auth/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/auth/bin"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/auth.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/auth.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
yvanvds/yATools
|
auth/nbproject/Package-Release.bash
|
Shell
|
gpl-2.0
| 1,427 |
#!/bin/bash
SCRIPT_DIR=`dirname $0`
source ${SCRIPT_DIR}/quiz_common.sh
INPUT_FILES=`ls ${QUIZ_DIR}/omr_input/ 2> /dev/null | grep "^[^(jpg)]*jpg$"`
if [ "${INPUT_FILES}" = "" ]
then
echo "Error: No image in omr input directory."
exit 1
fi
WHOAMI=`whoami`
if [ "$WHOAMI" = "apache" ]
then
export HOME=/home/apache
export PATH=/usr/java/latest/bin/:$PATH
fi
OMR_ROTATE_LOG=${QUIZ_DIR}/omr_rotate.log
TMP_OMR_ROTATE_LOG=/tmp/${QUIZ_NAME}.omr_rotate.log
touch ${OMR_ROTATE_LOG}
ERRORS="false"
for file in ${INPUT_FILES}
do
echo -n "${file}"
file=${QUIZ_DIR}/omr_input/${file}
# fichier à tester, image corrigée, seuil, position repere
java -jar -Xmn128M -Xms256M -Xmx256M -Xss4096k -Djava.awt.headless=true ${SCRIPT_DIR}/omr_rotate.jar ${file} ${file}.rotated_temp.jpg 220 hg > ${TMP_OMR_ROTATE_LOG}
if [ $? -ne 0 ]
then
ERRORS="true"
echo " [ERROR]"
continue
fi
ROTATION=$(grep 'alpha=' ${TMP_OMR_ROTATE_LOG} | cut -f 2 -d = )
ORIENTATION=$(grep 'orientation=' ${TMP_OMR_ROTATE_LOG} | cut -f 2 -d = )
# pour le crop on récupère les dimensions de l'image avant rotation, il faut donc tenir compte de l'orientation de départ
if [ "${ORIENTATION}" == "1" -o "${ORIENTATION}" == "3" ]
then
ORIGINAL_WIDTH=$(identify -format '%h' ${file})
ORIGINAL_HEIGHT=$(identify -format '%w' ${file})
else
ORIGINAL_WIDTH=$(identify -format '%w' ${file})
ORIGINAL_HEIGHT=$(identify -format '%h' ${file})
fi
printf " (angle: %.4f) " ${ROTATION}
# convert ${file}.rotated_temp.jpg -rotate ${ROTATION} -crop ${WIDTH}x${HEIGHT} ${file}.rotated.jpg
convert ${file} -rotate ${ROTATION} ${file}.rotated_temp.jpg
ROTATED_WIDTH=$(identify -format '%w' ${file}.rotated_temp.jpg)
ROTATED_HEIGHT=$(identify -format '%h' ${file}.rotated_temp.jpg)
convert ${file}.rotated_temp.jpg -crop ${ORIGINAL_WIDTH}x${ORIGINAL_HEIGHT}+$(( (${ROTATED_WIDTH}-${ORIGINAL_WIDTH})/2 ))+$(( (${ROTATED_HEIGHT}-${ORIGINAL_HEIGHT})/2 )) ${file}.rotated.jpg
rm ${file}.rotated_temp.jpg 2> /dev/null
cat ${TMP_OMR_ROTATE_LOG} >> ${OMR_ROTATE_LOG}
echo "[OK]"
done
rm ${TMP_OMR_ROTATE_LOG}
echo ""
if [ "${ERRORS}" = "true" ]
then
echo "There were errors but you can continue with OMR."
exit 2
else
echo "There were no error."
exit 0
fi
|
CedricDinont/PaperQuiz
|
bin/correct_images_rotation.sh
|
Shell
|
gpl-2.0
| 2,305 |
#!/bin/bash
set -ev
export PATH=/opt/qt59/bin:$PATH
qmake -recursive
make --silent -j3 || make
exit
|
gcoco/GoldenCheetah
|
travis/linux/script.sh
|
Shell
|
gpl-2.0
| 100 |
#!/usr/bin/env bash
# Author : Gaurav Kumar, Johns Hopkins University
# Creates OpenFST lattices from Kaldi lattices
# This script needs to be run from one level above this directory
. path.sh
if [ $# -lt 3 ]; then
echo "Enter the latdir (where the lattices will be put), the decode dir containing lattices and the acoustic scale"
exit 1
fi
prunebeam=2
latdir=$1
decode_dir=$2
acoustic_scale=$3
#latdir="latjosh-2-callhome"
#decode_dir=exp/tri5a/decode_$partition
#acoustic_scale=0.077
stage=0
if [ -d $decode_dir ]
then
# TODO:Add scaling factor for weights, how?
rawLatDir="lattices"
compiledLatDir="lattices-bin"
preplfLatDir="lattices-pushed"
mkdir -p $latdir
mkdir -p $latdir/$rawLatDir
mkdir -p $latdir/$compiledLatDir
mkdir -p $latdir/$preplfLatDir
for l in $decode_dir/lat.*.gz
do
(
# Extract file name and unzip the file first
bname=${l##*/}
bname="$latdir/${bname%.gz}"
gunzip -c $l > "$bname.bin"
if [ $stage -le 0 ]; then
# Now copy into ark format
$KALDI_ROOT/src/latbin/lattice-copy ark:$bname.bin ark,t:- > "$bname.raw"
# Prune lattices
$KALDI_ROOT/src/latbin/lattice-prune --acoustic-scale=$acoustic_scale --beam=$prunebeam ark:"$bname.raw" ark:"$bname.pruned"
# Convert to an openfst compatible format
$KALDI_ROOT/src/latbin/lattice-to-fst --lm-scale=1.0 --acoustic-scale=$acoustic_scale ark:$bname.pruned ark,t:$bname.ark.fst
fi
if [ $stage -le 1 ]; then
fileName=""
fileLine=0
while read line; do
if [ $fileLine = 0 ]; then
fileName="$line"
fileLine=1
continue
fi
if [ -z "$line" ]; then
fileLine=0
continue
fi
# Replace laugh, unk, oov, noise with eps
echo "$line" | awk '{if ($3 == 2038 || $3 == 2039 || $3 == 2040) {$3 = 0; $4 = 0} print}' >> "$latdir/$rawLatDir/$fileName.lat"
done < $bname.ark.fst
echo "Done isolating lattices"
fi
) &
done
wait
rm $latdir/*.bin
rm $latdir/*.pruned
if [ $stage -le 2 ]; then
#Compile lattices
for l in $latdir/$rawLatDir/*.lat
do
(
# Arc type needs to be log
bname=${l##*/}
fstcompile --arc_type=log $latdir/$rawLatDir/$bname $latdir/$compiledLatDir/$bname
) &
done
wait
echo "Done compiling lattices."
fi
if [ $stage -le 3 ]; then
#Sanjeev's Recipe for creating valid PLF compatible FSTs"
# Create a dummy FST with one state and no arcs first
echo 0 | fstcompile --arc_type=log - $latdir/$preplfLatDir/dummy.fst
# Push Lattice weights towards initial state
for l in $latdir/$compiledLatDir/*.lat
do
(
bname=${l##*/}
fstrmepsilon $latdir/$compiledLatDir/$bname | \
fstpush --push_weights --remove_total_weight - | \
# Do not topo sort here, do it before converting into PLF
# Sanjeev's Recipe : Concatenate with dummy FST
fstconcat - $latdir/$preplfLatDir/dummy.fst | \
fstreverse - | \
fstrmepsilon - | \
fstreverse - $latdir/$preplfLatDir/$bname
) &
done
wait
# Let's take a moment to thank the dummy FST for playing its
# part in this process. However, it has to go now.
rm $latdir/$preplfLatDir/dummy.fst
echo "Done performing fst push (initial state)"
fi
else
echo "Complete training and decoding first"
fi
|
michellemorales/OpenMM
|
kaldi/egs/fisher_callhome_spanish/s5/local/latconvert.sh
|
Shell
|
gpl-2.0
| 3,417 |
#! /bin/sh
# Copyright (C) 2002-2017 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Check that info files are normally built in $(srcdir),
# not in $(builddir).
required='makeinfo tex texi2dvi'
. test-init.sh
cat >> configure.ac << 'END'
AC_OUTPUT
END
cat > Makefile.am << 'END'
info_TEXINFOS = main.texi
END
cat > main.texi << 'END'
\input texinfo
@setfilename main.info
@settitle main
@node Top
Hello walls.
@include version.texi
@bye
END
$ACLOCAL
$AUTOMAKE --add-missing
$AUTOCONF
mkdir build
cd build
../configure
$MAKE
test -f ../main.info
test ! -e main.info
test -f ../stamp-vti
test ! -e stamp-vti
test -f ../version.texi
test ! -e version.texi
cd ..
rm -rf build make.info* stamp-vti version.texi
./configure
$MAKE
test -f main.info
# Make sure stamp-vti is older that version.texi.
# (A common situation in a real tree.)
# This is needed to test the "subtle" issue described below.
test -f stamp-vti
test -f version.texi
$sleep
touch stamp-vti
$MAKE distclean
test -f stamp-vti
test -f version.texi
mkdir build
cd build
../configure
$MAKE
# main.info should not be rebuilt in the current directory, since
# it's up-to-date in $(srcdir).
# This can be caused by a subtle issue related to VPATH handling
# of version.texi (see also the comment in texi-vers.am): because
# stamp-vti is newer than version.texi, the 'version.texi: stamp-vti'
# rule is always triggered. Still that's not a reason for 'make'
# to think 'version.texi' has been created...
test ! -e main.info
$MAKE dvi
test -f main.dvi
$MAKE distcheck
:
|
Starlink/automake
|
t/txinfo-info-in-srcdir.sh
|
Shell
|
gpl-2.0
| 2,145 |
#!/bin/sh
cat ./Output.log | grep '^STATISTICS\|^AGENT' > Output-Stats.log
|
benelot/GVG-AI-2015
|
statistics/Clean-output.sh
|
Shell
|
gpl-2.0
| 75 |
#!/bin/sh
LOGFILE=run150102.log
#opp_run -r 0 -u Cmdenv -c OLSR5x5 -n ../src:../../inet-2.3/examples:../../inet-2.3/src -l ../src/CPVPlant -l ../../inet-2.3/src/inet WirelessScaling.ini >> ${LOGFILE}
#opp_run -r 0 -u Cmdenv -c BATMAN5x5 -n ../src:../../inet-2.3/examples:../../inet-2.3/src -l ../src/CPVPlant -l ../../inet-2.3/src/inet WirelessScaling.ini >> ${LOGFILE}
#opp_run -r 0 -u Cmdenv -c OLSR5x5 -n ../../src:../../../inet-2.5/examples:../../../inet-2.6/src -l ../../src/CPVPlant -l ../../../inet-2.6/src/inet WirelessScaling.ini >> ${LOGFILE}
#opp_run -r 0 -u Cmdenv -c DYMO5x5 -n ../src:../../inet-2.3/examples:../../inet-2.3/src -l ../src/CPVPlant -l ../../inet-2.3/src/inet WirelessScaling.ini >> ${LOGFILE}
opp_run -r 0 -u Cmdenv -c OLSR5x5 -n ../../src:../../../inet-2.5/examples:../../../inet-2.5/src -l ../../src/CPVPlant -l ../../../inet-2.5/src/inet WirelessScaling.ini >> ${LOGFILE}
|
PascalBen/CPVPlant
|
simulations/scaling/run.sh
|
Shell
|
gpl-2.0
| 915 |
#!/bin/sh
import_pootle()
{
po_dir=~/git/translations/po
if [ -d $po_dir ]; then
: > lib/libpacman/po/LINGUAS
: > src/pacman-g2/po/LINGUAS
for i in $(/bin/ls $po_dir/pacman)
do
if [ -e $po_dir/pacman/$i/libpacman.po ]; then
cp $po_dir/pacman/$i/libpacman.po lib/libpacman/po/$i.po
if msgfmt -c --statistics -o lib/libpacman/po/$i.gmo lib/libpacman/po/$i.po; then
echo $i >> lib/libpacman/po/LINGUAS
else
echo "WARNING: lib/libpacman/po/$i.po would break your build!"
fi
fi
if [ -e $po_dir/pacman/$i/pacman-g2.po ]; then
cp $po_dir/pacman/$i/pacman-g2.po src/pacman-g2/po/$i.po
if msgfmt -c --statistics -o src/pacman-g2/po/$i.gmo src/pacman-g2/po/$i.po; then
echo $i >> src/pacman-g2/po/LINGUAS
else
echo "WARNING: src/pacman-g2/po/$i.po would break your build!"
fi
fi
if [ -e $po_dir/pacman/$i/mans.po ]; then
cp $po_dir/pacman/$i/mans.po doc/po/$i.po
if ! msgfmt -c --statistics -o doc/po/$i.gmo doc/po/$i.po; then
echo "WARNING: doc/po/$i.po will break your build!"
fi
fi
done
else
echo "WARNING: no po files will be used"
fi
# generate the pot files
for i in lib/libpacman/po src/pacman-g2/po
do
cd $i
mv Makevars Makevars.tmp
package=`pwd|sed 's|.*/\(.*\)/.*|\1|'`
cp /usr/bin/intltool-extract ./
intltool-update --pot --gettext-package=$package
rm intltool-extract
mv Makevars.tmp Makevars
cd - >/dev/null
done
# avoing having the Makevars file as modified ones
git update-index --refresh >/dev/null
}
cd `dirname $0`
ver=`grep AC_INIT configure.ac|sed 's/.*, \([0-9\.]*\), .*/\1/'`
if [ "$1" == "--dist" ]; then
git archive --format=tar --prefix=pacman-g2-$ver/ HEAD | tar xf -
git log --no-merges |git name-rev --tags --stdin > pacman-g2-$ver/ChangeLog
cd pacman-g2-$ver
./autogen.sh --git
cd ..
tar czf pacman-g2-$ver.tar.gz pacman-g2-$ver
rm -rf pacman-g2-$ver
exit 0
elif [ "$1" == "--release" ]; then
git tag -l |grep -q $ver || dg tag $ver
sh $0 --dist
gpg --comment "See http://ftp.frugalware.org/pub/README.GPG for info" \
-ba -u 20F55619 pacman-g2-$ver.tar.gz
mv pacman-g2-$ver.tar.gz{,.asc} ../releases
exit 0
elif [ "$1" == "--gettext-only" ]; then
sh autoclean.sh
for i in lib/libpacman/po src/pacman-g2/po
do
cd $i
mv Makevars Makevars.tmp
package=`pwd|sed 's|.*/\(.*\)/.*|\1|'`
cp /usr/bin/intltool-extract ./
intltool-update --pot --gettext-package=$package
rm intltool-extract
if [ "$2" != "--pot-only" ]; then
for j in *.po
do
if msgmerge $j $package.pot -o $j.new; then
mv -f $j.new $j
echo -n "$i/$j: "
msgfmt -c --statistics -o $j.gmo $j
rm -f $j.gmo
else
echo "msgmerge for $j failed!"
rm -f $j.new
fi
done
fi
mv Makevars.tmp Makevars
cd - >/dev/null
done
cd doc
po4a -k 0 po4a.cfg
if [ "$2" == "--pot-only" ]; then
rm -rf po/*.po `grep '\[po4a_langs\]' po4a.cfg |sed 's/\[po4a_langs\] //'`
exit 0
fi
cd po
for i in *po
do
if msgmerge $i $package.pot -o $i.new; then
mv -f $i.new $i
echo -n "man/$i: "
msgfmt -c --statistics -o $i.gmo $i
rm -f $i.gmo
else
echo "msgmerge for $i failed!"
rm -f $i.new
fi
done
exit 0
fi
# copy in the po files
import_pootle
autoreconf -fi
if [ "$1" == "--git" ]; then
rm -rf autom4te.cache
fi
|
frugalware/pacman-g2
|
autogen.sh
|
Shell
|
gpl-2.0
| 3,286 |
#! /bin/sh
function add_logs {
echo "add logs in source: $(basename $1)"
for f in $(find src/ -name \*.java) ; do
sed \
-e 's:///*Log\.v:Log.v:' \
-e 's:///*Log\.d:Log.d:' \
-i '' $f
done
}
for lib in $PWD $(grep android.library.reference default.properties | cut -d= -f2) ; do
add_logs $lib
done
echo "set debuggable=true"
sed -e 's/android:debuggable="false"/android:debuggable="true"/' -i '' AndroidManifest.xml
|
lmb/websms-connector-pbxnetwork
|
shell/postDeploy.sh
|
Shell
|
gpl-3.0
| 435 |
/data/g4gm/data/1combine /data/tmp/w_hadgem2.bin /data/g4gm/data/rest/r2010.txt /data/tmp/w_hadgem2_2000.txt /data/tmp/w_hadgem2_2020.txt /data/tmp/w_hadgem2_2050.txt /data/tmp/w_hadgem2_2085.txt /data/tmp/w_hadgem2_2100.txt
shutdown -h now
|
GeorgKindermann/g4m
|
application/14GlobDynClim2Globiom/3combine.bash
|
Shell
|
gpl-3.0
| 241 |
#!/bin/bash
INSTALLATION_TYPE=$1
PYGOGAPI='https://github.com/yancharkin/pygogapi/archive/master.zip'
GOGLIB_SCRIPTS1='https://github.com/yancharkin/games_nebula_goglib_scripts/archive/master.zip'
GOGLIB_SCRIPTS2='https://bitbucket.org/yancharkin/games_nebula_goglib_scripts/get/master.zip'
GOGLIB_SCRIPTS3='https://gitlab.com/yancharkin/games_nebula_goglib_scripts/-/archive/master/games_nebula_goglib_scripts-master.zip'
MYLIB_SCRIPTS1='https://github.com/yancharkin/games_nebula_mylib_scripts/archive/master.zip'
MYLIB_SCRIPTS2='https://bitbucket.org/yancharkin/games_nebula_mylib_scripts/get/master.zip'
MYLIB_SCRIPTS3='https://gitlab.com/yancharkin/games_nebula_mylib_scripts/-/archive/master/games_nebula_mylib_scripts-master.zip'
GOGLIB_IMAGES1='https://github.com/yancharkin/games_nebula_goglib_images/archive/master.zip'
MYLIB_IMAGES1='https://github.com/yancharkin/games_nebula_mylib_images/archive/master.zip'
INNOEXTRACT='https://github.com/dscharrer/innoextract/releases/download/1.7/innoextract-1.7-linux.tar.xz'
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
source "$DIR/scripts/shell_functions.sh"
ARCH=$(get_arch i)
if [ "$ARCH" != "amd64" ]; then
ARCH="i686"
fi
extract_all () {
if [ -f "$DIR/tmp/pygogapi.zip" ]; then
7z x -aoa -o"$DIR/tmp/pygogapi" "$DIR/tmp/pygogapi.zip"
if [ ! -d "$DIR/gogapi" ]; then
mv "$DIR/tmp/pygogapi/pygogapi-master/gogapi" "$DIR/"
else
cp -r "$DIR/tmp/pygogapi/pygogapi-master/gogapi/"* "$DIR/gogapi/"
fi
fi
if [ -f "$DIR/tmp/goglib_scripts.zip" ]; then
7z x -aoa -o"$DIR/tmp/goglib_scripts" "$DIR/tmp/goglib_scripts.zip"
if [ ! -d "$DIR/scripts/goglib" ]; then
mv "$DIR/tmp/goglib_scripts/"* "$DIR/scripts/goglib"
else
cp -r "$DIR/tmp/goglib_scripts/"*/* "$DIR/scripts/goglib/"
fi
fi
if [ -f "$DIR/tmp/mylib_scripts.zip" ]; then
7z x -aoa -o"$DIR/tmp/mylib_scripts" "$DIR/tmp/mylib_scripts.zip"
if [ ! -d "$DIR/scripts/mylib" ]; then
mv "$DIR/tmp/mylib_scripts/"*/free "$DIR/scripts/mylib"
else
cp -r "$DIR/tmp/mylib_scripts/"*/free/* "$DIR/scripts/mylib/"
fi
cp -r "$DIR/tmp/mylib_scripts/"*/autosetup.ini "$DIR/scripts/mylib/"
fi
if [ -f "$DIR/tmp/goglib_images.zip" ]; then
7z x -aoa -o"$DIR/tmp/goglib_images" "$DIR/tmp/goglib_images.zip"
if [ ! -d "$DIR/images/goglib" ]; then
mv "$DIR/tmp/goglib_images/"* "$DIR/images/goglib"
else
cp -r "$DIR/tmp/goglib_images/"*/* "$DIR/images/goglib/"
fi
fi
if [ -f "$DIR/tmp/mylib_images.zip" ]; then
7z x -aoa -o"$DIR/tmp/mylib_images" "$DIR/tmp/mylib_images.zip"
if [ ! -d "$DIR/images/mylib" ]; then
mv "$DIR/tmp/mylib_images/"* "$DIR/images/mylib"
else
cp -r "$DIR/tmp/mylib_images/"*/* "$DIR/images/mylib/"
fi
fi
if [ -f "$DIR/tmp/innoextract.tar.xz" ]; then
mkdir -p "$DIR/tmp/innoextract"
tar xf "$DIR/tmp/innoextract.tar.xz" --strip-components=2 -C "$DIR/tmp/innoextract"
mkdir -p "$DIR/bin"
mv "$DIR/tmp/innoextract/$ARCH/innoextract" "$DIR/bin/"
fi
rm -r "$DIR/tmp"
}
create_launcher () {
echo '#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
python "$DIR/games_nebula.py"' > "$DIR/start.sh"
chmod +x "$DIR/start.sh"
mkdir -p "$HOME/.local/share/applications"
echo "[Desktop Entry]
Name=Games Nebula
Comment=Application for managing and playing games
Exec=$DIR/start.sh
Icon=$DIR/images/icon.png
Type=Application
Terminal=false
Categories=Game;" > "$HOME/.local/share/applications/games_nebula.desktop"
chmod +x "$HOME/.local/share/applications/games_nebula.desktop"
}
mkdir -p "$DIR/tmp"
curl -L -o "$DIR/tmp/pygogapi.zip" "$PYGOGAPI" || \
error_message "Failed to download pygogapi"
if [ "$INSTALLATION_TYPE" == "auto" ]; then
curl -L -o "$DIR/tmp/goglib_scripts.zip" "$GOGLIB_SCRIPTS1" || \
curl -L -o "$DIR/tmp/goglib_scripts.zip" "$GOGLIB_SCRIPTS2" || \
curl -L -o "$DIR/tmp/goglib_scripts.zip" "$GOGLIB_SCRIPTS3" || \
error_message "Failed to download goglib_scripts" &&
curl -L -o "$DIR/tmp/mylib_scripts.zip" "$MYLIB_SCRIPTS1" || \
curl -L -o "$DIR/tmp/mylib_scripts.zip" "$MYLIB_SCRIPTS2" || \
curl -L -o "$DIR/tmp/mylib_scripts.zip" "$MYLIB_SCRIPTS3" || \
error_message "Failed to download mylib_scripts" &&
curl -L -o "$DIR/tmp/goglib_images.zip" "$GOGLIB_IMAGES1" || \
error_message "Failed to download goglib_images" &&
curl -L -o "$DIR/tmp/mylib_images.zip" "$MYLIB_IMAGES1" || \
error_message "Failed to download mylib_images" &&
extract_all || error_message "Failed to extract files" &&
echo -ne "${COLOR_LIGHT_GREEN}\nInstallation successful!${COLOR_RESET}\n"
else
# Install all components:
question_y_n "Download innoextract binary? (Useful only if you system innoextract version < 1.7)." \
"curl -L -o '$DIR/tmp/innoextract.tar.xz' '$INNOEXTRACT'" \
:
question_y_n "Install/reinstall all components?" \
"curl -L -o '$DIR/tmp/goglib_scripts.zip' '$GOGLIB_SCRIPTS1' || \
curl -L -o '$DIR/tmp/goglib_scripts.zip' '$GOGLIB_SCRIPTS2' || \
curl -L -o '$DIR/tmp/goglib_scripts.zip' '$GOGLIB_SCRIPTS3' || \
error_message 'Failed to download goglib_scripts' &&
curl -L -o '$DIR/tmp/mylib_scripts.zip' '$MYLIB_SCRIPTS1' || \
curl -L -o '$DIR/tmp/mylib_scripts.zip' '$MYLIB_SCRIPTS2' || \
curl -L -o '$DIR/tmp/mylib_scripts.zip' '$MYLIB_SCRIPTS3' || \
error_message 'Failed to download mylib_scripts' &&
curl -L -o '$DIR/tmp/goglib_images.zip' '$GOGLIB_IMAGES1' || \
error_message 'Failed to download goglib_images' &&
curl -L -o '$DIR/tmp/mylib_images.zip' '$MYLIB_IMAGES1' || \
error_message 'Failed to download mylib_images' &&
question_y_n 'Create launcher?' create_launcher : &&
extract_all || error_message 'Failed to extract files' &&
echo -ne '${COLOR_LIGHT_GREEN}\nInstallation successful!${COLOR_RESET}\n' &&
exit 0" \
:
# Or install selected components:
question_y_n "Install/reinstall goglib_scripts?" \
"curl -L -o '$DIR/tmp/goglib_scripts.zip' '$GOGLIB_SCRIPTS1' || \
curl -L -o '$DIR/tmp/goglib_scripts.zip' '$GOGLIB_SCRIPTS2' || \
curl -L -o '$DIR/tmp/goglib_scripts.zip' '$GOGLIB_SCRIPTS3' || \
error_message 'Failed to download goglib_scripts'" \
:
question_y_n "Install/reinstall mylib_scripts?" \
"curl -L -o '$DIR/tmp/mylib_scripts.zip' '$MYLIB_SCRIPTS1' || \
curl -L -o '$DIR/tmp/mylib_scripts.zip' '$MYLIB_SCRIPTS2' || \
curl -L -o '$DIR/tmp/mylib_scripts.zip' '$MYLIB_SCRIPTS3' || \
error_message 'Failed to download mylib_scripts'" \
:
question_y_n "Install/reinstall goglib_images?" \
"curl -L -o '$DIR/tmp/goglib_images.zip' '$GOGLIB_IMAGES1' || \
error_message 'Failed to download goglib_images'" \
:
question_y_n "Install/reinstall mylib_images?" \
"curl -L -o '$DIR/tmp/mylib_images.zip' '$MYLIB_IMAGES1' || \
error_message 'Failed to download mylib_images'" \
:
extract_all || error_message "Failed to extract files" &&
question_y_n "Create launcher?" create_launcher : &&
echo -ne "${COLOR_LIGHT_GREEN}\nInstallation successful!${COLOR_RESET}\n"
fi
|
yancharkin/games_nebula
|
setup.sh
|
Shell
|
gpl-3.0
| 7,543 |
#!/bin/bash
# Vesta RHEL/CentOS installer v.05
#----------------------------------------------------------#
# Variables&Functions #
#----------------------------------------------------------#
export PATH=$PATH:/sbin
RHOST='r.vestacp.com'
CHOST='c.vestacp.com'
REPO='cmmnt'
VERSION='rhel'
VESTA='/usr/local/vesta'
memory=$(grep 'MemTotal' /proc/meminfo |tr ' ' '\n' |grep [0-9])
arch=$(uname -i)
os=$(cut -f 1 -d ' ' /etc/redhat-release)
release=$(grep -o "[0-9]" /etc/redhat-release |head -n1)
codename="${os}_$release"
vestacp="$VESTA/install/$VERSION/$release"
# Defining software pack for all distros
software="awstats bc bind bind-libs bind-utils clamav-server clamav-update
curl dovecot e2fsprogs exim expect fail2ban flex freetype ftp GeoIP httpd
ImageMagick iptables-services jwhois lsof mailx mariadb mariadb-server mc
mod_fcgid mod_ruid2 mod_ssl net-tools nginx ntp openssh-clients pcre php
php-bcmath php-cli php-common php-fpm php-gd php-imap php-mbstring
php-mcrypt phpMyAdmin php-mysql php-pdo phpPgAdmin php-pgsql php-soap
php-tidy php-xml php-xmlrpc postgresql postgresql-contrib
postgresql-server proftpd roundcubemail rrdtool rsyslog screen
spamassassin sqlite sudo tar telnet unzip vesta vesta-ioncube vesta-nginx
vesta-php vesta-softaculous vim-common vsftpd webalizer which zip"
# Fix for old releases
if [ "$release" -lt 7 ]; then
software=$(echo "$software" |sed -e "s/mariadb/mysql/g")
software=$(echo "$software" |sed -e "s/clamav-server/clamd/")
software=$(echo "$software" |sed -e "s/clamav-update//")
software=$(echo "$software" |sed -e "s/iptables-services//")
software="$software mod_extract_forwarded"
fi
# Defining help function
help() {
echo "Usage: $0 [OPTIONS]
-a, --apache Install Apache [yes|no] default: yes
-n, --nginx Install Nginx [yes|no] default: yes
-w, --phpfpm Install PHP-FPM [yes|no] default: no
-v, --vsftpd Install Vsftpd [yes|no] default: yes
-j, --proftpd Install ProFTPD [yes|no] default: no
-k, --named Install Bind [yes|no] default: yes
-m, --mysql Install MySQL [yes|no] default: yes
-g, --postgresql Install PostgreSQL [yes|no] default: no
-d, --mongodb Install MongoDB [yes|no] unsupported
-x, --exim Install Exim [yes|no] default: yes
-z, --dovecot Install Dovecot [yes|no] default: yes
-c, --clamav Install ClamAV [yes|no] default: yes
-t, --spamassassin Install SpamAssassin [yes|no] default: yes
-i, --iptables Install Iptables [yes|no] default: yes
-b, --fail2ban Install Fail2ban [yes|no] default: yes
-r, --remi Install Remi repo [yes|no] default: yes
-o, --softaculous Install Softaculous [yes|no] default: yes
-q, --quota Filesystem Quota [yes|no] default: no
-l, --lang Default language default: en
-y, --interactive Interactive install [yes|no] default: yes
-s, --hostname Set hostname
-e, --email Set admin email
-p, --password Set admin password
-f, --force Force installation
-h, --help Print this help
Example: bash $0 -e [email protected] -p p4ssw0rd --apache no --phpfpm yes"
exit 1
}
# Defining password-gen function
gen_pass() {
MATRIX='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
LENGTH=10
while [ ${n:=1} -le $LENGTH ]; do
PASS="$PASS${MATRIX:$(($RANDOM%${#MATRIX})):1}"
let n+=1
done
echo "$PASS"
}
# Defining return code check function
check_result() {
if [ $1 -ne 0 ]; then
echo "Error: $2"
exit $1
fi
}
# Defining function to set default value
set_default_value() {
eval variable=\$$1
if [ -z "$variable" ]; then
eval $1=$2
fi
if [ "$variable" != 'yes' ] && [ "$variable" != 'no' ]; then
eval $1=$2
fi
}
# Defining function to set default language value
set_default_lang() {
if [ -z "$lang" ]; then
eval lang=$1
fi
lang_list="
ar cz el fa hu ja no pt se ua
bs da en fi id ka pl ro tr vi
cn de es fr it nl pt-BR ru tw
bg ko sr th ur"
if !(echo $lang_list |grep -w $lang 1>&2>/dev/null); then
eval lang=$1
fi
}
#----------------------------------------------------------#
# Verifications #
#----------------------------------------------------------#
# Translating argument to --gnu-long-options
for arg; do
delim=""
case "$arg" in
--apache) args="${args}-a " ;;
--nginx) args="${args}-n " ;;
--phpfpm) args="${args}-w " ;;
--vsftpd) args="${args}-v " ;;
--proftpd) args="${args}-j " ;;
--named) args="${args}-k " ;;
--mysql) args="${args}-m " ;;
--postgresql) args="${args}-g " ;;
--mongodb) args="${args}-d " ;;
--exim) args="${args}-x " ;;
--dovecot) args="${args}-z " ;;
--clamav) args="${args}-c " ;;
--spamassassin) args="${args}-t " ;;
--iptables) args="${args}-i " ;;
--fail2ban) args="${args}-b " ;;
--remi) args="${args}-r " ;;
--softaculous) args="${args}-o " ;;
--quota) args="${args}-q " ;;
--lang) args="${args}-l " ;;
--interactive) args="${args}-y " ;;
--hostname) args="${args}-s " ;;
--email) args="${args}-e " ;;
--password) args="${args}-p " ;;
--force) args="${args}-f " ;;
--help) args="${args}-h " ;;
*) [[ "${arg:0:1}" == "-" ]] || delim="\""
args="${args}${delim}${arg}${delim} ";;
esac
done
eval set -- "$args"
# Parsing arguments
while getopts "a:n:w:v:j:k:m:g:d:x:z:c:t:i:b:r:o:q:l:y:s:e:p:fh" Option; do
case $Option in
a) apache=$OPTARG ;; # Apache
n) nginx=$OPTARG ;; # Nginx
w) phpfpm=$OPTARG ;; # PHP-FPM
v) vsftpd=$OPTARG ;; # Vsftpd
j) proftpd=$OPTARG ;; # Proftpd
k) named=$OPTARG ;; # Named
m) mysql=$OPTARG ;; # MySQL
g) postgresql=$OPTARG ;; # PostgreSQL
d) mongodb=$OPTARG ;; # MongoDB (unsupported)
x) exim=$OPTARG ;; # Exim
z) dovecot=$OPTARG ;; # Dovecot
c) clamd=$OPTARG ;; # ClamAV
t) spamd=$OPTARG ;; # SpamAssassin
i) iptables=$OPTARG ;; # Iptables
b) fail2ban=$OPTARG ;; # Fail2ban
r) remi=$OPTARG ;; # Remi repo
o) softaculous=$OPTARG ;; # Softaculous plugin
q) quota=$OPTARG ;; # FS Quota
l) lang=$OPTARG ;; # Language
y) interactive=$OPTARG ;; # Interactive install
s) servername=$OPTARG ;; # Hostname
e) email=$OPTARG ;; # Admin email
p) vpass=$OPTARG ;; # Admin password
f) force='yes' ;; # Force install
h) help ;; # Help
*) help ;; # Print help (default)
esac
done
# Defining default software stack
set_default_value 'nginx' 'yes'
set_default_value 'apache' 'yes'
set_default_value 'phpfpm' 'no'
set_default_value 'vsftpd' 'yes'
set_default_value 'proftpd' 'no'
set_default_value 'named' 'yes'
set_default_value 'mysql' 'yes'
set_default_value 'postgresql' 'no'
set_default_value 'mongodb' 'no'
set_default_value 'exim' 'yes'
set_default_value 'dovecot' 'yes'
if [ $memory -lt 1500000 ]; then
set_default_value 'clamd' 'no'
set_default_value 'spamd' 'no'
else
set_default_value 'clamd' 'yes'
set_default_value 'spamd' 'yes'
fi
set_default_value 'iptables' 'yes'
set_default_value 'fail2ban' 'yes'
set_default_value 'remi' 'yes'
set_default_value 'softaculous' 'yes'
set_default_value 'quota' 'no'
set_default_value 'interactive' 'yes'
set_default_lang 'en'
# Checking software conflicts
if [ "$phpfpm" = 'yes' ]; then
apache='no'
nginx='yes'
fi
if [ "$proftpd" = 'yes' ]; then
vsftpd='no'
fi
if [ "$exim" = 'no' ]; then
clamd='no'
spamd='no'
dovecot='no'
fi
if [ "$iptables" = 'no' ]; then
fail2ban='no'
fi
# Checking root permissions
if [ "x$(id -u)" != 'x0' ]; then
check_result 1 "Script can be run executed only by root"
fi
# Checking admin user account
if [ ! -z "$(grep ^admin: /etc/passwd /etc/group)" ] && [ -z "$force" ]; then
echo 'Please remove admin user account before proceeding.'
echo 'If you want to do it automatically run installer with -f option:'
echo -e "Example: bash $0 --force\n"
check_result 1 "User admin exists"
fi
# Checking wget
if [ ! -e '/usr/bin/wget' ]; then
yum -y install wget
check_result $? "Can't install wget"
fi
# Checking repository availability
wget -q "c.vestacp.com/GPG.txt" -O /dev/null
check_result $? "No access to Vesta repository"
# Checking installed packages
tmpfile=$(mktemp -p /tmp)
rpm -qa > $tmpfile
for pkg in exim mysql-server httpd nginx vesta; do
if [ ! -z "$(grep $pkg $tmpfile)" ]; then
conflicts="$pkg $conflicts"
fi
done
rm -f $tmpfile
if [ ! -z "$conflicts" ] && [ -z "$force" ]; then
echo '!!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!!'
echo
echo 'Following packages are already installed:'
echo "$conflicts"
echo
echo 'It is highly recommended to remove them before proceeding.'
echo 'If you want to force installation run this script with -f option:'
echo "Example: bash $0 --force"
echo
echo '!!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!! !!!'
echo
check_result 1 "Control Panel should be installed on clean server."
fi
#----------------------------------------------------------#
# Brief Info #
#----------------------------------------------------------#
# Printing nice ASCII logo
clear
echo
echo ' _| _| _|_|_|_| _|_|_| _|_|_|_|_| _|_|'
echo ' _| _| _| _| _| _| _|'
echo ' _| _| _|_|_| _|_| _| _|_|_|_|'
echo ' _| _| _| _| _| _| _|'
echo ' _| _|_|_|_| _|_|_| _| _| _|'
echo
echo ' Vesta Control Panel'
echo -e "\n\n"
echo 'The following software will be installed on your system:'
# Web stack
if [ "$nginx" = 'yes' ]; then
echo ' - Nginx Web Server'
fi
if [ "$apache" = 'yes' ] && [ "$nginx" = 'no' ] ; then
echo ' - Apache Web Server'
fi
if [ "$apache" = 'yes' ] && [ "$nginx" = 'yes' ] ; then
echo ' - Apache Web Server (as backend)'
fi
if [ "$phpfpm" = 'yes' ]; then
echo ' - PHP-FPM Application Server'
fi
# DNS stack
if [ "$named" = 'yes' ]; then
echo ' - Bind DNS Server'
fi
# Mail stack
if [ "$exim" = 'yes' ]; then
echo -n ' - Exim Mail Server'
if [ "$clamd" = 'yes' ] || [ "$spamd" = 'yes' ] ; then
echo -n ' + '
if [ "$clamd" = 'yes' ]; then
echo -n 'ClamAV '
fi
if [ "$spamd" = 'yes' ]; then
echo -n 'SpamAssassin'
fi
fi
echo
if [ "$dovecot" = 'yes' ]; then
echo ' - Dovecot POP3/IMAP Server'
fi
fi
# Database stack
if [ "$mysql" = 'yes' ]; then
if [ $release -ge 7 ]; then
echo ' - MariaDB Database Server'
else
echo ' - MySQL Database Server'
fi
fi
if [ "$postgresql" = 'yes' ]; then
echo ' - PostgreSQL Database Server'
fi
if [ "$mongodb" = 'yes' ]; then
echo ' - MongoDB Database Server'
fi
# FTP stack
if [ "$vsftpd" = 'yes' ]; then
echo ' - Vsftpd FTP Server'
fi
if [ "$proftpd" = 'yes' ]; then
echo ' - ProFTPD FTP Server'
fi
# Softaculous
if [ "$softaculous" = 'yes' ]; then
echo ' - Softaculous Plugin'
fi
# Firewall stack
if [ "$iptables" = 'yes' ]; then
echo -n ' - Iptables Firewall'
fi
if [ "$iptables" = 'yes' ] && [ "$fail2ban" = 'yes' ]; then
echo -n ' + Fail2Ban'
fi
echo -e "\n\n"
# Asking for confirmation to proceed
if [ "$interactive" = 'yes' ]; then
read -p 'Would you like to continue [y/n]: ' answer
if [ "$answer" != 'y' ] && [ "$answer" != 'Y' ]; then
echo 'Goodbye'
exit 1
fi
# Asking for contact email
if [ -z "$email" ]; then
read -p 'Please enter admin email address: ' email
fi
# Asking to set FQDN hostname
if [ -z "$servername" ]; then
read -p "Please enter FQDN hostname [$(hostname -f)]: " servername
fi
fi
# Generating admin password if it wasn't set
if [ -z "$vpass" ]; then
vpass=$(gen_pass)
fi
# Set hostname if it wasn't set
if [ -z "$servername" ]; then
servername=$(hostname -f)
fi
# Set FQDN if it wasn't set
mask1='(([[:alnum:]](-?[[:alnum:]])*)\.)'
mask2='*[[:alnum:]](-?[[:alnum:]])+\.[[:alnum:]]{2,}'
if ! [[ "$servername" =~ ^${mask1}${mask2}$ ]]; then
if [ ! -z "$servername" ]; then
servername="$servername.example.com"
else
servername="example.com"
fi
echo "127.0.0.1 $servername" >> /etc/hosts
fi
# Set email if it wasn't set
if [ -z "$email" ]; then
email="admin@$servername"
fi
# Defining backup directory
vst_backups="/root/vst_install_backups/$(date +%s)"
echo "Installation backup directory: $vst_backups"
# Printing start message and sleeping for 5 seconds
echo -e "\n\n\n\nInstallation will take about 15 minutes ...\n"
sleep 5
#----------------------------------------------------------#
# Checking swap #
#----------------------------------------------------------#
# Checking swap on small instances
if [ -z "$(swapon -s)" ] && [ $memory -lt 1000000 ]; then
fallocate -l 1G /swapfile
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
echo "/swapfile none swap sw 0 0" >> /etc/fstab
fi
#----------------------------------------------------------#
# Install repository #
#----------------------------------------------------------#
# Updating system
yum -y update
check_result $? 'yum update failed'
# Installing EPEL repository
yum install epel-release -y
check_result $? "Can't install EPEL repository"
# Installing Remi repository
if [ "$remi" = 'yes' ] && [ ! -e "/etc/yum.repos.d/remi.repo" ]; then
rpm -Uvh http://rpms.remirepo.net/enterprise/remi-release-$release.rpm
check_result $? "Can't install REMI repository"
sed -i "s/enabled=0/enabled=1/g" /etc/yum.repos.d/remi.repo
fi
# Installing Nginx repository
nrepo="/etc/yum.repos.d/nginx.repo"
echo "[nginx]" > $nrepo
echo "name=nginx repo" >> $nrepo
echo "baseurl=http://nginx.org/packages/centos/$release/\$basearch/" >> $nrepo
echo "gpgcheck=0" >> $nrepo
echo "enabled=1" >> $nrepo
# Installing Vesta repository
vrepo='/etc/yum.repos.d/vesta.repo'
echo "[vesta]" > $vrepo
echo "name=Vesta - $REPO" >> $vrepo
echo "baseurl=http://$RHOST/$REPO/$release/\$basearch/" >> $vrepo
echo "enabled=1" >> $vrepo
echo "gpgcheck=1" >> $vrepo
echo "gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-VESTA" >> $vrepo
wget c.vestacp.com/GPG.txt -O /etc/pki/rpm-gpg/RPM-GPG-KEY-VESTA
#----------------------------------------------------------#
# Backup #
#----------------------------------------------------------#
# Creating backup directory tree
mkdir -p $vst_backups
cd $vst_backups
mkdir nginx httpd php php-fpm vsftpd proftpd named exim dovecot clamd \
spamassassin mysql postgresql mongodb vesta
# Backup Nginx configuration
service nginx stop > /dev/null 2>&1
cp -r /etc/nginx/* $vst_backups/nginx > /dev/null 2>&1
# Backup Apache configuration
service httpd stop > /dev/null 2>&1
cp -r /etc/httpd/* $vst_backups/httpd > /dev/null 2>&1
# Backup PHP-FPM configuration
service php-fpm stop >/dev/null 2>&1
cp /etc/php.ini $vst_backups/php > /dev/null 2>&1
cp -r /etc/php.d $vst_backups/php > /dev/null 2>&1
cp /etc/php-fpm.conf $vst_backups/php-fpm > /dev/null 2>&1
mv -f /etc/php-fpm.d/* $vst_backups/php-fpm/ > /dev/null 2>&1
# Backup Bind configuration
yum remove bind-chroot > /dev/null 2>&1
service named stop > /dev/null 2>&1
cp /etc/named.conf $vst_backups/named >/dev/null 2>&1
# Backup Vsftpd configuration
service vsftpd stop > /dev/null 2>&1
cp /etc/vsftpd/vsftpd.conf $vst_backups/vsftpd >/dev/null 2>&1
# Backup ProFTPD configuration
service proftpd stop > /dev/null 2>&1
cp /etc/proftpd.conf $vst_backups/proftpd >/dev/null 2>&1
# Backup Exim configuration
service exim stop > /dev/null 2>&1
cp -r /etc/exim/* $vst_backups/exim >/dev/null 2>&1
# Backup ClamAV configuration
service clamd stop > /dev/null 2>&1
cp /etc/clamd.conf $vst_backups/clamd >/dev/null 2>&1
cp -r /etc/clamd.d $vst_backups/clamd >/dev/null 2>&1
# Backup SpamAssassin configuration
service spamassassin stop > /dev/null 2>&1
cp -r /etc/mail/spamassassin/* $vst_backups/spamassassin >/dev/null 2>&1
# Backup Dovecot configuration
service dovecot stop > /dev/null 2>&1
cp /etc/dovecot.conf $vst_backups/dovecot > /dev/null 2>&1
cp -r /etc/dovecot/* $vst_backups/dovecot > /dev/null 2>&1
# Backup MySQL/MariaDB configuration and data
service mysql stop > /dev/null 2>&1
service mysqld stop > /dev/null 2>&1
service mariadb stop > /dev/null 2>&1
mv /var/lib/mysql $vst_backups/mysql/mysql_datadir >/dev/null 2>&1
cp /etc/my.cnf $vst_backups/mysql > /dev/null 2>&1
cp /etc/my.cnf.d $vst_backups/mysql > /dev/null 2>&1
mv /root/.my.cnf $vst_backups/mysql > /dev/null 2>&1
# Backup MySQL/MariaDB configuration and data
service postgresql stop > /dev/null 2>&1
mv /var/lib/pgsql/data $vst_backups/postgresql/ >/dev/null 2>&1
# Backup Vesta
service vesta stop > /dev/null 2>&1
mv $VESTA/data/* $vst_backups/vesta > /dev/null 2>&1
mv $VESTA/conf/* $vst_backups/vesta > /dev/null 2>&1
#----------------------------------------------------------#
# Package Excludes #
#----------------------------------------------------------#
# Excluding packages
if [ "$nginx" = 'no' ]; then
software=$(echo "$software" | sed -e "s/^nginx//")
fi
if [ "$apache" = 'no' ]; then
software=$(echo "$software" | sed -e "s/httpd//")
software=$(echo "$software" | sed -e "s/mod_ssl//")
software=$(echo "$software" | sed -e "s/mod_fcgid//")
software=$(echo "$software" | sed -e "s/mod_ruid2//")
fi
if [ "$phpfpm" = 'no' ]; then
software=$(echo "$software" | sed -e "s/php-fpm//")
fi
if [ "$vsftpd" = 'no' ]; then
software=$(echo "$software" | sed -e "s/vsftpd//")
fi
if [ "$proftpd" = 'no' ]; then
software=$(echo "$software" | sed -e "s/proftpd//")
fi
if [ "$named" = 'no' ]; then
software=$(echo "$software" | sed -e "s/bind //")
fi
if [ "$exim" = 'no' ]; then
software=$(echo "$software" | sed -e "s/exim//")
software=$(echo "$software" | sed -e "s/dovecot//")
software=$(echo "$software" | sed -e "s/clamd//")
software=$(echo "$software" | sed -e "s/clamav-server//")
software=$(echo "$software" | sed -e "s/clamav-update//")
software=$(echo "$software" | sed -e "s/spamassassin//")
software=$(echo "$software" | sed -e "s/dovecot//")
software=$(echo "$software" | sed -e "s/roundcubemail//")
fi
if [ "$clamd" = 'no' ]; then
software=$(echo "$software" | sed -e "s/clamd//")
software=$(echo "$software" | sed -e "s/clamav-server//")
software=$(echo "$software" | sed -e "s/clamav-update//")
fi
if [ "$spamd" = 'no' ]; then
software=$(echo "$software" | sed -e 's/spamassassin//')
fi
if [ "$dovecot" = 'no' ]; then
software=$(echo "$software" | sed -e "s/dovecot//")
fi
if [ "$mysql" = 'no' ]; then
software=$(echo "$software" | sed -e 's/mysql //')
software=$(echo "$software" | sed -e 's/mysql-server//')
software=$(echo "$software" | sed -e 's/mariadb //')
software=$(echo "$software" | sed -e 's/mariadb-server//')
software=$(echo "$software" | sed -e 's/php-mysql//')
software=$(echo "$software" | sed -e 's/phpMyAdmin//')
software=$(echo "$software" | sed -e 's/roundcubemail//')
fi
if [ "$postgresql" = 'no' ]; then
software=$(echo "$software" | sed -e 's/postgresql //')
software=$(echo "$software" | sed -e 's/postgresql-server//')
software=$(echo "$software" | sed -e 's/postgresql-contrib//')
software=$(echo "$software" | sed -e 's/php-pgsql//')
software=$(echo "$software" | sed -e 's/phpPgAdmin//')
fi
if [ "$softaculous" = 'no' ]; then
software=$(echo "$software" | sed -e 's/vesta-softaculous//')
fi
if [ "$iptables" = 'no' ] || [ "$fail2ban" = 'no' ]; then
software=$(echo "$software" | sed -e 's/fail2ban//')
fi
#----------------------------------------------------------#
# Install packages #
#----------------------------------------------------------#
# Installing rpm packages
yum install -y $software
if [ $? -ne 0 ]; then
if [ "$remi" = 'yes' ]; then
yum -y --disablerepo=* \
--enablerepo="*base,*updates,nginx,epel,vesta,remi*" \
install $software
else
yum -y --disablerepo=* --enablerepo="*base,*updates,nginx,epel,vesta" \
install $software
fi
fi
check_result $? "yum install failed"
#----------------------------------------------------------#
# Configure system #
#----------------------------------------------------------#
# Restarting rsyslog
service rsyslog restart > /dev/null 2>&1
# Checking ipv6 on loopback interface
check_lo_ipv6=$(/sbin/ip addr | grep 'inet6')
check_rc_ipv6=$(grep 'scope global dev lo' /etc/rc.local)
if [ ! -z "$check_lo_ipv6)" ] && [ -z "$check_rc_ipv6" ]; then
ip addr add ::2/128 scope global dev lo
echo "# Vesta: Workraround for openssl validation func" >> /etc/rc.local
echo "ip addr add ::2/128 scope global dev lo" >> /etc/rc.local
chmod a+x /etc/rc.local
fi
# Disabling SELinux
if [ -e '/etc/sysconfig/selinux' ]; then
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0 2>/dev/null
fi
# Disabling iptables
service iptables stop
# Configuring NTP synchronization
echo '#!/bin/sh' > /etc/cron.daily/ntpdate
echo "$(which ntpdate) -s pool.ntp.org" >> /etc/cron.daily/ntpdate
chmod 775 /etc/cron.daily/ntpdate
ntpdate -s pool.ntp.org
# Disabling webalizer routine
rm -f /etc/cron.daily/00webalizer
# Adding backup user
adduser backup 2>/dev/null
ln -sf /home/backup /backup
chmod a+x /backup
# Set directory color
echo 'LS_COLORS="$LS_COLORS:di=00;33"' >> /etc/profile
# Changing default systemd interval
if [ "$release" -eq '7' ]; then
# Hi Lennart
echo "DefaultStartLimitInterval=1s" >> /etc/systemd/system.conf
echo "DefaultStartLimitBurst=60" >> /etc/systemd/system.conf
systemctl daemon-reexec
fi
#----------------------------------------------------------#
# Configure VESTA #
#----------------------------------------------------------#
# Installing sudo configuration
mkdir -p /etc/sudoers.d
cp -f $vestacp/sudo/admin /etc/sudoers.d/
chmod 440 /etc/sudoers.d/admin
# Configuring system env
echo "export VESTA='$VESTA'" > /etc/profile.d/vesta.sh
chmod 755 /etc/profile.d/vesta.sh
source /etc/profile.d/vesta.sh
echo 'PATH=$PATH:'$VESTA'/bin' >> /root/.bash_profile
echo 'export PATH' >> /root/.bash_profile
source /root/.bash_profile
# Configuring logrotate for vesta logs
cp -f $vestacp/logrotate/vesta /etc/logrotate.d/
# Building directory tree and creating some blank files for Vesta
mkdir -p $VESTA/conf $VESTA/log $VESTA/ssl $VESTA/data/ips \
$VESTA/data/queue $VESTA/data/users $VESTA/data/firewall \
$VESTA/data/sessions
touch $VESTA/data/queue/backup.pipe $VESTA/data/queue/disk.pipe \
$VESTA/data/queue/webstats.pipe $VESTA/data/queue/restart.pipe \
$VESTA/data/queue/traffic.pipe $VESTA/log/system.log \
$VESTA/log/nginx-error.log $VESTA/log/auth.log
chmod 750 $VESTA/conf $VESTA/data/users $VESTA/data/ips $VESTA/log
chmod -R 750 $VESTA/data/queue
chmod 660 $VESTA/log/*
rm -f /var/log/vesta
ln -s $VESTA/log /var/log/vesta
chmod 770 $VESTA/data/sessions
# Generating Vesta configuration
rm -f $VESTA/conf/vesta.conf 2>/dev/null
touch $VESTA/conf/vesta.conf
chmod 660 $VESTA/conf/vesta.conf
# Web stack
if [ "$apache" = 'yes' ] && [ "$nginx" = 'no' ] ; then
echo "WEB_SYSTEM='httpd'" >> $VESTA/conf/vesta.conf
echo "WEB_RGROUPS='apache'" >> $VESTA/conf/vesta.conf
echo "WEB_PORT='80'" >> $VESTA/conf/vesta.conf
echo "WEB_SSL_PORT='443'" >> $VESTA/conf/vesta.conf
echo "WEB_SSL='mod_ssl'" >> $VESTA/conf/vesta.conf
echo "STATS_SYSTEM='webalizer,awstats'" >> $VESTA/conf/vesta.conf
fi
if [ "$apache" = 'yes' ] && [ "$nginx" = 'yes' ] ; then
echo "WEB_SYSTEM='httpd'" >> $VESTA/conf/vesta.conf
echo "WEB_RGROUPS='apache'" >> $VESTA/conf/vesta.conf
echo "WEB_PORT='8080'" >> $VESTA/conf/vesta.conf
echo "WEB_SSL_PORT='8443'" >> $VESTA/conf/vesta.conf
echo "WEB_SSL='mod_ssl'" >> $VESTA/conf/vesta.conf
echo "PROXY_SYSTEM='nginx'" >> $VESTA/conf/vesta.conf
echo "PROXY_PORT='80'" >> $VESTA/conf/vesta.conf
echo "PROXY_SSL_PORT='443'" >> $VESTA/conf/vesta.conf
echo "STATS_SYSTEM='webalizer,awstats'" >> $VESTA/conf/vesta.conf
fi
if [ "$apache" = 'no' ] && [ "$nginx" = 'yes' ]; then
echo "WEB_SYSTEM='nginx'" >> $VESTA/conf/vesta.conf
echo "WEB_PORT='80'" >> $VESTA/conf/vesta.conf
echo "WEB_SSL_PORT='443'" >> $VESTA/conf/vesta.conf
echo "WEB_SSL='openssl'" >> $VESTA/conf/vesta.conf
if [ "$phpfpm" = 'yes' ]; then
echo "WEB_BACKEND='php-fpm'" >> $VESTA/conf/vesta.conf
fi
echo "STATS_SYSTEM='webalizer,awstats'" >> $VESTA/conf/vesta.conf
fi
# FTP stack
if [ "$vsftpd" = 'yes' ]; then
echo "FTP_SYSTEM='vsftpd'" >> $VESTA/conf/vesta.conf
fi
if [ "$proftpd" = 'yes' ]; then
echo "FTP_SYSTEM='proftpd'" >> $VESTA/conf/vesta.conf
fi
# DNS stack
if [ "$named" = 'yes' ]; then
echo "DNS_SYSTEM='named'" >> $VESTA/conf/vesta.conf
fi
# Mail stack
if [ "$exim" = 'yes' ]; then
echo "MAIL_SYSTEM='exim'" >> $VESTA/conf/vesta.conf
if [ "$clamd" = 'yes' ]; then
echo "ANTIVIRUS_SYSTEM='clamav'" >> $VESTA/conf/vesta.conf
fi
if [ "$spamd" = 'yes' ]; then
echo "ANTISPAM_SYSTEM='spamassassin'" >> $VESTA/conf/vesta.conf
fi
if [ "$dovecot" = 'yes' ]; then
echo "IMAP_SYSTEM='dovecot'" >> $VESTA/conf/vesta.conf
fi
fi
# Cron daemon
echo "CRON_SYSTEM='crond'" >> $VESTA/conf/vesta.conf
# Firewall stack
if [ "$iptables" = 'yes' ]; then
echo "FIREWALL_SYSTEM='iptables'" >> $VESTA/conf/vesta.conf
fi
if [ "$iptables" = 'yes' ] && [ "$fail2ban" = 'yes' ]; then
echo "FIREWALL_EXTENSION='fail2ban'" >> $VESTA/conf/vesta.conf
fi
# Disk quota
if [ "$quota" = 'yes' ]; then
echo "DISK_QUOTA='yes'" >> $VESTA/conf/vesta.conf
fi
# Backups
echo "BACKUP_SYSTEM='local'" >> $VESTA/conf/vesta.conf
# Language
echo "LANGUAGE='$lang'" >> $VESTA/conf/vesta.conf
# Version
echo "VERSION='0.9.8'" >> $VESTA/conf/vesta.conf
# Installing hosting packages
cp -rf $vestacp/packages $VESTA/data/
# Installing templates
cp -rf $vestacp/templates $VESTA/data/
# Copying index.html to default documentroot
cp $VESTA/data/templates/web/skel/public_html/index.html /var/www/html/
sed -i 's/%domain%/It worked!/g' /var/www/html/index.html
# Installing firewall rules
cp -rf $vestacp/firewall $VESTA/data/
# Configuring server hostname
$VESTA/bin/v-change-sys-hostname $servername 2>/dev/null
# Generating SSL certificate
$VESTA/bin/v-generate-ssl-cert $(hostname) $email 'US' 'California' \
'San Francisco' 'Vesta Control Panel' 'IT' > /tmp/vst.pem
# Parsing certificate file
crt_end=$(grep -n "END CERTIFICATE-" /tmp/vst.pem |cut -f 1 -d:)
key_start=$(grep -n "BEGIN RSA" /tmp/vst.pem |cut -f 1 -d:)
key_end=$(grep -n "END RSA" /tmp/vst.pem |cut -f 1 -d:)
# Adding SSL certificate
cd $VESTA/ssl
sed -n "1,${crt_end}p" /tmp/vst.pem > certificate.crt
sed -n "$key_start,${key_end}p" /tmp/vst.pem > certificate.key
chown root:mail $VESTA/ssl/*
chmod 660 $VESTA/ssl/*
rm /tmp/vst.pem
#----------------------------------------------------------#
# Configure Nginx #
#----------------------------------------------------------#
if [ "$nginx" = 'yes' ]; then
rm -f /etc/nginx/conf.d/*.conf
cp -f $vestacp/nginx/nginx.conf /etc/nginx/
cp -f $vestacp/nginx/status.conf /etc/nginx/conf.d/
cp -f $vestacp/nginx/phpmyadmin.inc /etc/nginx/conf.d/
cp -f $vestacp/nginx/phppgadmin.inc /etc/nginx/conf.d/
cp -f $vestacp/nginx/webmail.inc /etc/nginx/conf.d/
cp -f $vestacp/logrotate/nginx /etc/logrotate.d/
echo > /etc/nginx/conf.d/vesta.conf
mkdir -p /var/log/nginx/domains
if [ "$release" -ge 7 ]; then
mkdir -p /etc/systemd/system/nginx.service.d
cd /etc/systemd/system/nginx.service.d
echo "[Service]" > limits.conf
echo "LimitNOFILE=500000" >> limits.conf
fi
chkconfig nginx on
service nginx start
check_result $? "nginx start failed"
# Workaround for OpenVZ/Virtuozzo
if [ "$release" -ge '7' ] && [ -e "/proc/vz/veinfo" ]; then
echo "#Vesta: workraround for networkmanager" >> /etc/rc.local
echo "sleep 3 && service nginx restart" >> /etc/rc.local
fi
fi
#----------------------------------------------------------#
# Configure Apache #
#----------------------------------------------------------#
if [ "$apache" = 'yes' ]; then
cp -f $vestacp/httpd/httpd.conf /etc/httpd/conf/
cp -f $vestacp/httpd/status.conf /etc/httpd/conf.d/
cp -f $vestacp/httpd/ssl.conf /etc/httpd/conf.d/
cp -f $vestacp/httpd/ruid2.conf /etc/httpd/conf.d/
cp -f $vestacp/logrotate/httpd /etc/logrotate.d/
if [ $release -lt 7 ]; then
cd /etc/httpd/conf.d
echo "MEFaccept 127.0.0.1" >> mod_extract_forwarded.conf
echo > proxy_ajp.conf
fi
if [ -e "/etc/httpd/conf.modules.d/00-dav.conf" ]; then
cd /etc/httpd/conf.modules.d
sed -i "s/^/#/" 00-dav.conf 00-lua.conf 00-proxy.conf
fi
echo > /etc/httpd/conf.d/vesta.conf
cd /var/log/httpd
touch access_log error_log suexec.log
chmod 640 access_log error_log suexec.log
chmod -f 777 /var/lib/php/session
chmod a+x /var/log/httpd
mkdir -p /var/log/httpd/domains
chmod 751 /var/log/httpd/domains
if [ "$release" -ge 7 ]; then
mkdir -p /etc/systemd/system/httpd.service.d
cd /etc/systemd/system/httpd.service.d
echo "[Service]" > limits.conf
echo "LimitNOFILE=500000" >> limits.conf
fi
chkconfig httpd on
service httpd start
check_result $? "httpd start failed"
# Workaround for OpenVZ/Virtuozzo
if [ "$release" -ge '7' ] && [ -e "/proc/vz/veinfo" ]; then
echo "#Vesta: workraround for networkmanager" >> /etc/rc.local
echo "sleep 2 && service httpd restart" >> /etc/rc.local
fi
fi
#----------------------------------------------------------#
# Configure PHP-FPM #
#----------------------------------------------------------#
if [ "$phpfpm" = 'yes' ]; then
cp -f $vestacp/php-fpm/www.conf /etc/php-fpm.d/
chkconfig php-fpm on
service php-fpm start
check_result $? "php-fpm start failed"
fi
#----------------------------------------------------------#
# Configure PHP #
#----------------------------------------------------------#
ZONE=$(timedatectl 2>/dev/null|grep Timezone|awk '{print $2}')
if [ -e '/etc/sysconfig/clock' ]; then
source /etc/sysconfig/clock
fi
if [ -z "$ZONE" ]; then
ZONE='UTC'
fi
for pconf in $(find /etc/php* -name php.ini); do
sed -i "s|;date.timezone =|date.timezone = $ZONE|g" $pconf
sed -i 's%_open_tag = Off%_open_tag = On%g' $pconf
done
#----------------------------------------------------------#
# Configure Vsftpd #
#----------------------------------------------------------#
if [ "$vsftpd" = 'yes' ]; then
cp -f $vestacp/vsftpd/vsftpd.conf /etc/vsftpd/
chkconfig vsftpd on
service vsftpd start
check_result $? "vsftpd start failed"
fi
#----------------------------------------------------------#
# Configure ProFTPD #
#----------------------------------------------------------#
if [ "$proftpd" = 'yes' ]; then
cp -f $vestacp/proftpd/proftpd.conf /etc/
chkconfig proftpd on
service proftpd start
check_result $? "proftpd start failed"
fi
#----------------------------------------------------------#
# Configure MySQL/MariaDB #
#----------------------------------------------------------#
if [ "$mysql" = 'yes' ]; then
mycnf="my-small.cnf"
if [ $memory -gt 1200000 ]; then
mycnf="my-medium.cnf"
fi
if [ $memory -gt 3900000 ]; then
mycnf="my-large.cnf"
fi
mkdir -p /var/lib/mysql
chown mysql:mysql /var/lib/mysql
mkdir -p /etc/my.cnf.d
if [ $release -lt 7 ]; then
service='mysqld'
else
service='mariadb'
fi
cp -f $vestacp/$service/$mycnf /etc/my.cnf
chkconfig $service on
service $service start
if [ "$?" -ne 0 ]; then
if [ -e "/proc/user_beancounters" ]; then
# Fix for aio on OpenVZ
sed -i "s/#innodb_use_native/innodb_use_native/g" /etc/my.cnf
fi
service $service start
check_result $? "$service start failed"
fi
# Securing MySQL installation
mysqladmin -u root password $vpass
echo -e "[client]\npassword='$vpass'\n" > /root/.my.cnf
chmod 600 /root/.my.cnf
mysql -e "DELETE FROM mysql.user WHERE User=''"
mysql -e "DROP DATABASE test" >/dev/null 2>&1
mysql -e "DELETE FROM mysql.db WHERE Db='test' OR Db='test\\_%'"
mysql -e "DELETE FROM mysql.user WHERE user='' or password='';"
mysql -e "FLUSH PRIVILEGES"
# Configuring phpMyAdmin
if [ "$apache" = 'yes' ]; then
cp -f $vestacp/pma/phpMyAdmin.conf /etc/httpd/conf.d/
fi
cp -f $vestacp/pma/config.inc.conf /etc/phpMyAdmin/config.inc.php
sed -i "s/%blowfish_secret%/$(gen_pass)/g" /etc/phpMyAdmin/config.inc.php
fi
#----------------------------------------------------------#
# Configure PostgreSQL #
#----------------------------------------------------------#
if [ "$postgresql" = 'yes' ]; then
if [ $release -eq 5 ]; then
service postgresql start
sudo -u postgres psql -c "ALTER USER postgres WITH PASSWORD '$vpass'"
service postgresql stop
cp -f $vestacp/postgresql/pg_hba.conf /var/lib/pgsql/data/
service postgresql start
else
service postgresql initdb
cp -f $vestacp/postgresql/pg_hba.conf /var/lib/pgsql/data/
service postgresql start
sudo -u postgres psql -c "ALTER USER postgres WITH PASSWORD '$vpass'"
fi
# Configuring phpPgAdmin
if [ "$apache" = 'yes' ]; then
cp -f $vestacp/pga/phpPgAdmin.conf /etc/httpd/conf.d/
fi
cp -f $vestacp/pga/config.inc.php /etc/phpPgAdmin/
fi
#----------------------------------------------------------#
# Configure Bind #
#----------------------------------------------------------#
if [ "$named" = 'yes' ]; then
cp -f $vestacp/named/named.conf /etc/
chown root:named /etc/named.conf
chmod 640 /etc/named.conf
chkconfig named on
service named start
check_result $? "named start failed"
fi
#----------------------------------------------------------#
# Configure Exim #
#----------------------------------------------------------#
if [ "$exim" = 'yes' ]; then
gpasswd -a exim mail
cp -f $vestacp/exim/exim.conf /etc/exim/
cp -f $vestacp/exim/dnsbl.conf /etc/exim/
cp -f $vestacp/exim/spam-blocks.conf /etc/exim/
touch /etc/exim/white-blocks.conf
if [ "$spamd" = 'yes' ]; then
sed -i "s/#SPAM/SPAM/g" /etc/exim/exim.conf
fi
if [ "$clamd" = 'yes' ]; then
sed -i "s/#CLAMD/CLAMD/g" /etc/exim/exim.conf
fi
chmod 640 /etc/exim/exim.conf
rm -rf /etc/exim/domains
mkdir -p /etc/exim/domains
rm -f /etc/alternatives/mta
ln -s /usr/sbin/sendmail.exim /etc/alternatives/mta
chkconfig sendmail off 2>/dev/null
service sendmail stop 2>/dev/null
chkconfig postfix off 2>/dev/null
service postfix stop 2>/dev/null
chkconfig exim on
service exim start
check_result $? "exim start failed"
fi
#----------------------------------------------------------#
# Configure Dovecot #
#----------------------------------------------------------#
if [ "$dovecot" = 'yes' ]; then
gpasswd -a dovecot mail
cp -rf $vestacp/dovecot /etc/
cp -f $vestacp/logrotate/dovecot /etc/logrotate.d/
chown -R root:root /etc/dovecot*
chkconfig dovecot on
service dovecot start
check_result $? "dovecot start failed"
fi
#----------------------------------------------------------#
# Configure ClamAV #
#----------------------------------------------------------#
if [ "$clamd" = 'yes' ]; then
useradd clam -s /sbin/nologin -d /var/lib/clamav 2>/dev/null
gpasswd -a clam exim
gpasswd -a clam mail
cp -f $vestacp/clamav/clamd.conf /etc/
cp -f $vestacp/clamav/freshclam.conf /etc/
mkdir -p /var/log/clamav /var/run/clamav
chown clam:clam /var/log/clamav /var/run/clamav
chown -R clam:clam /var/lib/clamav
if [ "$release" -ge '7' ]; then
cp -f $vestacp/clamav/clamd.service /usr/lib/systemd/system/
systemctl --system daemon-reload
fi
/usr/bin/freshclam
if [ "$release" -ge '7' ]; then
sed -i "s/nofork/foreground/" /usr/lib/systemd/system/clamd.service
systemctl daemon-reload
fi
chkconfig clamd on
service clamd start
#check_result $? "clamd start failed"
fi
#----------------------------------------------------------#
# Configure SpamAssassin #
#----------------------------------------------------------#
if [ "$spamd" = 'yes' ]; then
chkconfig spamassassin on
service spamassassin start
check_result $? "spamassassin start failed"
if [ "$release" -ge '7' ]; then
groupadd -g 1001 spamd
useradd -u 1001 -g spamd -s /sbin/nologin -d \
/var/lib/spamassassin spamd
mkdir /var/lib/spamassassin
chown spamd:spamd /var/lib/spamassassin
fi
fi
#----------------------------------------------------------#
# Configure RoundCube #
#----------------------------------------------------------#
if [ "$exim" = 'yes' ] && [ "$mysql" = 'yes' ]; then
if [ "$apache" = 'yes' ]; then
cp -f $vestacp/roundcube/roundcubemail.conf /etc/httpd/conf.d/
fi
cp -f $vestacp/roundcube/main.inc.php /etc/roundcubemail/config.inc.php
cd /usr/share/roundcubemail/plugins/password
cp -f $vestacp/roundcube/vesta.php drivers/vesta.php
cp -f $vestacp/roundcube/config.inc.php config.inc.php
sed -i "s/localhost/$servername/g" config.inc.php
chmod a+r /etc/roundcubemail/*
chmod -f 777 /var/log/roundcubemail
r="$(gen_pass)"
mysql -e "CREATE DATABASE roundcube"
mysql -e "GRANT ALL ON roundcube.* TO
roundcube@localhost IDENTIFIED BY '$r'"
sed -i "s/%password%/$r/g" /etc/roundcubemail/config.inc.php
chmod 640 /etc/roundcubemail/config.inc.php
chown root:apache /etc/roundcubemail/config.inc.php
if [ -e "/usr/share/roundcubemail/SQL/mysql.initial.sql" ]; then
mysql roundcube < /usr/share/roundcubemail/SQL/mysql.initial.sql
else
mysql roundcube < /usr/share/doc/roundcubemail-*/SQL/mysql.initial.sql
fi
fi
#----------------------------------------------------------#
# Configure Fail2Ban #
#----------------------------------------------------------#
if [ "$fail2ban" = 'yes' ]; then
cp -rf $vestacp/fail2ban /etc/
if [ "$dovecot" = 'no' ]; then
fline=$(cat /etc/fail2ban/jail.local |grep -n dovecot-iptables -A 2)
fline=$(echo "$fline" |grep enabled |tail -n1 |cut -f 1 -d -)
sed -i "${fline}s/true/false/" /etc/fail2ban/jail.local
fi
if [ "$exim" = 'no' ]; then
fline=$(cat /etc/fail2ban/jail.local |grep -n exim-iptables -A 2)
fline=$(echo "$fline" |grep enabled |tail -n1 |cut -f 1 -d -)
sed -i "${fline}s/true/false/" /etc/fail2ban/jail.local
fi
if [ "$vsftpd" = 'yes' ]; then
#Create vsftpd Log File
if [ ! -f "/var/log/vsftpd.log" ]; then
touch /var/log/vsftpd.log
fi
fline=$(cat /etc/fail2ban/jail.local |grep -n vsftpd-iptables -A 2)
fline=$(echo "$fline" |grep enabled |tail -n1 |cut -f 1 -d -)
sed -i "${fline}s/false/true/" /etc/fail2ban/jail.local
fi
chkconfig fail2ban on
mkdir -p /var/run/fail2ban
if [ -e "/usr/lib/systemd/system/fail2ban.service" ]; then
exec_pre='ExecStartPre=/bin/mkdir -p /var/run/fail2ban'
sed -i "s|\[Service\]|[Service]\n$exec_pre|g" \
/usr/lib/systemd/system/fail2ban.service
systemctl daemon-reload
fi
service fail2ban start
check_result $? "fail2ban start failed"
fi
#----------------------------------------------------------#
# Configure Admin User #
#----------------------------------------------------------#
# Deleting old admin user
if [ ! -z "$(grep ^admin: /etc/passwd)" ] && [ "$force" = 'yes' ]; then
chattr -i /home/admin/conf > /dev/null 2>&1
userdel -f admin >/dev/null 2>&1
chattr -i /home/admin/conf >/dev/null 2>&1
mv -f /home/admin $vst_backups/home/ >/dev/null 2>&1
rm -f /tmp/sess_* >/dev/null 2>&1
fi
if [ ! -z "$(grep ^admin: /etc/group)" ] && [ "$force" = 'yes' ]; then
groupdel admin > /dev/null 2>&1
fi
# Adding Vesta admin account
$VESTA/bin/v-add-user admin $vpass $email default System Administrator
check_result $? "can't create admin user"
$VESTA/bin/v-change-user-shell admin bash
$VESTA/bin/v-change-user-language admin $lang
# Configuring system IPs
$VESTA/bin/v-update-sys-ip
# Get main IP
ip=$(ip addr|grep 'inet '|grep global|head -n1|awk '{print $2}'|cut -f1 -d/)
# Configuring firewall
if [ "$iptables" = 'yes' ]; then
$VESTA/bin/v-update-firewall
fi
# Get public IP
pub_ip=$(curl -s vestacp.com/what-is-my-ip/)
if [ ! -z "$pub_ip" ] && [ "$pub_ip" != "$ip" ]; then
echo "$VESTA/bin/v-update-sys-ip" >> /etc/rc.local
$VESTA/bin/v-change-sys-ip-nat $ip $pub_ip
ip=$pub_ip
fi
# Configuring MySQL/MariaDB host
if [ "$mysql" = 'yes' ]; then
$VESTA/bin/v-add-database-host mysql localhost root $vpass
$VESTA/bin/v-add-database admin default default $(gen_pass) mysql
fi
# Configuring PostgreSQL host
if [ "$postgresql" = 'yes' ]; then
$VESTA/bin/v-add-database-host pgsql localhost postgres $vpass
$VESTA/bin/v-add-database admin db db $(gen_pass) pgsql
fi
# Adding default domain
$VESTA/bin/v-add-domain admin $servername
# Adding cron jobs
command="sudo $VESTA/bin/v-update-sys-queue disk"
$VESTA/bin/v-add-cron-job 'admin' '15' '02' '*' '*' '*' "$command"
command="sudo $VESTA/bin/v-update-sys-queue traffic"
$VESTA/bin/v-add-cron-job 'admin' '10' '00' '*' '*' '*' "$command"
command="sudo $VESTA/bin/v-update-sys-queue webstats"
$VESTA/bin/v-add-cron-job 'admin' '30' '03' '*' '*' '*' "$command"
command="sudo $VESTA/bin/v-update-sys-queue backup"
$VESTA/bin/v-add-cron-job 'admin' '*/5' '*' '*' '*' '*' "$command"
command="sudo $VESTA/bin/v-backup-users"
$VESTA/bin/v-add-cron-job 'admin' '10' '05' '*' '*' '*' "$command"
command="sudo $VESTA/bin/v-update-user-stats"
$VESTA/bin/v-add-cron-job 'admin' '20' '00' '*' '*' '*' "$command"
command="sudo $VESTA/bin/v-update-sys-rrd"
$VESTA/bin/v-add-cron-job 'admin' '*/5' '*' '*' '*' '*' "$command"
service crond restart
# Building RRD images
$VESTA/bin/v-update-sys-rrd
# Enabling file system quota
if [ "$quota" = 'yes' ]; then
$VESTA/bin/v-add-sys-quota
fi
# Enabling Softaculous plugin
if [ "$softaculous" = 'yes' ]; then
$VESTA/bin/v-add-vesta-softaculous
fi
# Starting Vesta service
chkconfig vesta on
service vesta start
check_result $? "vesta start failed"
chown admin:admin $VESTA/data/sessions
# Adding notifications
$VESTA/upd/add_notifications.sh
# Adding cronjob for autoupdates
$VESTA/bin/v-add-cron-vesta-autoupdate
#----------------------------------------------------------#
# Vesta Access Info #
#----------------------------------------------------------#
# Sending install notification to vestacp.com
wget vestacp.com/notify/?$codename -O /dev/null -q
# Comparing hostname and IP
host_ip=$(host $servername |head -n 1 |awk '{print $NF}')
if [ "$host_ip" = "$ip" ]; then
ip="$servername"
fi
# Sending notification to admin email
echo -e "Congratulations, you have just successfully installed \
Vesta Control Panel
https://$ip:8083
username: admin
password: $vpass
We hope that you enjoy your installation of Vesta. Please \
feel free to contact us anytime if you have any questions.
Thank you.
--
Sincerely yours
vestacp.com team
" > $tmpfile
send_mail="$VESTA/web/inc/mail-wrapper.php"
cat $tmpfile | $send_mail -s "Vesta Control Panel" $email
# Congrats
echo '======================================================='
echo
echo ' _| _| _|_|_|_| _|_|_| _|_|_|_|_| _|_| '
echo ' _| _| _| _| _| _| _| '
echo ' _| _| _|_|_| _|_| _| _|_|_|_| '
echo ' _| _| _| _| _| _| _| '
echo ' _| _|_|_|_| _|_|_| _| _| _| '
echo
echo
cat $tmpfile
rm -f $tmpfile
# EOF
|
ttcttctw/vesta
|
install/vst-install-rhel.sh
|
Shell
|
gpl-3.0
| 46,832 |
#!/bin/sh
# compile u-boot configuration
mkimage -A arm -O linux -T script -C none -n boot.scr -d ${BASE_DIR}/../../board/raspberrypi3-64/boot.source ${BINARIES_DIR}/boot.scr
|
s-vincent/br-tree
|
board/raspberrypi3-64/post-build.sh
|
Shell
|
gpl-3.0
| 178 |
pythonw Dropbox/te_measurements/seebeck_measurement/program_hightemp/PIDprogramset2.py
|
bobbymckinney/seebeck_measurement
|
old versions/SeebeckPID_ProgramSet.command
|
Shell
|
gpl-3.0
| 87 |
#!/bin/bash
d1=$(date "+%s" -d "2015-1-11")
d2=$(date "+%s")
ch=$(((d2-d1)/86400))
echo $ch
#date -s "-$ch days"
#exit 0
|
lilianglaoding/codeprimer
|
ShellLab/date1.sh
|
Shell
|
gpl-3.0
| 123 |
# -------------------------------------------------------------------------------------------------
# Copyright (c) 2015 zsh-syntax-highlighting contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the zsh-syntax-highlighting contributors nor the names of its contributors
# may be used to endorse or promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------------------------
# -*- mode: zsh; sh-indentation: 2; indent-tabs-mode: nil; sh-basic-offset: 2; -*-
# vim: ft=zsh sw=2 ts=2 et
# -------------------------------------------------------------------------------------------------
# 42 is in the command position in a nested subshell.
BUFFER='echo `echo \`42\`` "is `echo equal` to" `echo 6 times 9'
expected_region_highlight=(
"6 18 back-quoted-argument" # `echo \`42\``
"20 23 double-quoted-argument" # "is
"24 35 back-quoted-argument" # `echo equal`
"36 39 double-quoted-argument" # to"
"41 55 back-quoted-argument-unclosed" # `echo 6 times 9
)
|
lemones/dotfiles
|
home/.zsh/plugins/syntax-highlighter/highlighters/main/test-data/back-quoted-argument.zsh
|
Shell
|
gpl-3.0
| 2,371 |
# qmidiarp
. $ZYNTHIAN_DIR/zynthian-recipe/recipe/_zynth_lib.sh
cd $ZYNTHIAN_SW_DIR/plugins
sudo apt-get install -y --no-install-recommends autoconf automake libtool
zynth_git https://github.com/dcoredump/qmidiarp.git
if [ ${?} -ne 0 -o "${build}" = "build" ]
then
zynth_build_request clear
cd qmidiarp
git checkout lv2extui
autoreconf -i
./configure --prefix=/usr --exec-prefix=/zynthian/zynthian-plugins --libdir=/zynthian/zynthian-plugins --enable-buildapp=no --enable-lv2pluginuis=no
make
sudo make install
sudo cp -R qmidiarp_arp.lv2/modgui* /zynthian/zynthian-plugins/lv2/qmidiarp_arp.lv2
sudo cp -R qmidiarp_lfo.lv2/modgui* /zynthian/zynthian-plugins/lv2/qmidiarp_lfo.lv2
sudo cp -R qmidiarp_seq.lv2/modgui* /zynthian/zynthian-plugins/lv2/qmidiarp_seq.lv2
zynth_build_request ready
make clean
cd ..
fi
exit 0
######################
add https://github.com/dcoredump/dexed.lv2 0
add http://gareus.org/oss/lv2/stepseq#s8n8 1
add https://git.code.sf.net/p/qmidiarp/arp 2
add http://gareus.org/oss/lv2/modmeter 3
connect effect_0:audio_out effect_3:in
connect effect_1:midiout effect_2:MidiIn
connect effect_2:MidiOut effect_0:midi_in
param_set 1 grid_1_1 1
param_set 2 CH_IN 1
|
dcoredump/zynthian-recipe
|
recipe/qmidiarp.sh
|
Shell
|
gpl-3.0
| 1,195 |
#!/bin/bash
cd control
tar -czf control.tar.gz *
mv control.tar.gz ../
cd ..
cd data
tar -czf data.tar.gz *
mv data.tar.gz ../
cd ..
tar -czf brook.ipk control.tar.gz data.tar.gz debian-binary
rm *.tar.gz
|
txthinking/brook
|
ipk/build.sh
|
Shell
|
gpl-3.0
| 209 |
#!/bin/bash
ulimit -t 1200
MIN=21
MAX=30
TIMEFORMAT='%2R'
OPTS="--no-hybrid-clause-learning"
echo "++++++++++++++++++ Flat NonLinear Heuristic +++++++++++++++"
for ((i=$MIN; i <= $MAX; i++)); do {
K=`expr $i \* 2`
time dReach -u $K -l $K -r dribble-flat.drh --precision 0.1 --stat 2>&1 > /tmp/dan3
} ; done
echo "++++++++++++++++++ Flat NonLinear Heuristic no-cl +++++++++++++++"
for ((i=$MIN; i <= $MAX; i++)); do {
K=`expr $i \* 2`
time dReach -u $K -l $K -r dribble-flat.drh --precision 0.1 --stat $OPTS 2>&1 > /tmp/dan3
} ; done
echo "++++++++++++++++++ Single NonLinear Heuristic +++++++++++++++"
for ((i=$MIN; i <= $MAX; i++)); do {
K=`expr $i \* 4`
time dReach -u $K -l $K -r -n dribble-net.drh --precision 0.1 --stat 2>&1 > /tmp/dan3
} ; done
echo "++++++++++++++++++ Single NonLinear Heuristic no-cl +++++++++++++++"
for ((i=$MIN; i <= $MAX; i++)); do {
K=`expr $i \* 4`
time dReach -u $K -l $K -r -n dribble-net.drh --precision 0.1 --stat $OPTS 2>&1 > /tmp/dan3
} ; done
echo "++++++++++++++++++ Single NonLinear +++++++++++++++"
for ((i=$MIN; i <= $MAX; i++)); do {
K=`expr $i \* 4`
time dReach -u $K -l $K -d -n dribble-net.drh --precision 0.1 --stat $OPTS 2>&1 > /tmp/dan3
} ; done
echo "++++++++++++++++++ Flat NonLinear +++++++++++++++"
for ((i=$MIN; i <= $MAX; i++)); do {
K=`expr $i \* 2`
time dReach -u $K -l $K -d dribble-flat.drh --precision 0.1 --stat $OPTS 2>&1 > /tmp/dan3
} ; done
echo "++++++++++++++++++ Single NonLinear Heuristic Composed +++++++++++++++"
for ((i=$MIN; i <= $MAX; i++)); do {
K=`expr $i \* 4`
time dReach -u $K -l $K -r -c dribble-net.drh --precision 0.1 --stat 2>&1 > /tmp/dan3
} ; done
echo "++++++++++++++++++ Single NonLinear Heuristic Composed no-cl +++++++++++++++"
for ((i=$MIN; i <= $MAX; i++)); do {
K=`expr $i \* 4`
time dReach -u $K -l $K -r -c dribble-net.drh --precision 0.1 --stat $OPTS 2>&1 > /tmp/dan3
} ; done
echo "++++++++++++++++++ Single NonLinear Composed +++++++++++++++"
for ((i=$MIN; i <= $MAX; i++)); do {
K=`expr $i \* 4`
time dReach -u $K -l $K -d -c dribble-net.drh --precision 0.1 --stat $OPTS 2>&1 > /tmp/dan3
} ; done
|
soonhokong/dreal3
|
benchmarks/network/dribble/run-exp.sh
|
Shell
|
gpl-3.0
| 2,276 |
#!/bin/bash
echo "
Script: Arch Linux (2017-05) Recipe (for Inspiron 13 - 7353)
Author: Manoel de Souza
E-Mail: [email protected]
Version: 3.0.0
Date: 07-Jul-2017
"
# https://wiki.archlinux.org/index.php/Installation_guide
# http://lifehacker.com/5680453/build-a-killer-customized-arch-linux-installation-and-learn-all-about-linux-in-the-process
# https://www.ostechnix.com/install-arch-linux-latest-version/
# https://www.ostechnix.com/arch-linux-2016-post-installation/
# https://turlucode.com/arch-linux-install-guide-step-1-basic-installation/
# http://turlucode.com/arch-linux-install-guide-step-2-desktop-environment-installation/#GNOME
# http://turlucode.com/arch-linux-install-guide-step-2-desktop-environment-installation/
# https://ramsdenj.com/2016/06/23/arch-linux-on-zfs-part-1-embed-zfs-in-archiso.html
# https://ramsdenj.com/2016/06/23/arch-linux-on-zfs-part-2-installation.html
# https://wiki.archlinux.org/index.php/Installing_Arch_Linux_on_ZFS
# https://wiki.archlinux.org/index.php/systemd-boot
# Cookbook download
# -----------------------
cd ~/Downloads
git clone https://github.com/manoeldesouza/cookbook
# Network temporary setup
# -----------------------
wifi-menu -o
# Terminal font
# -----------------------
pacman -Syu terminus-font
setfont ter-v16n
# Closest Mirror
# -----------------------
pacman -S reflector rsync
reflector --latest 20 --country 'Brazil' --protocol http --protocol https --sort rate --save /etc/pacman.d/mirrorlist
# Disk setup
# -----------------------
sgdisk --zap /dev/sda
sgdisk --new 0:0:+512M --typecode 0:ef00 --change-name 0:"EFI System" /dev/sda
sgdisk --new 0:0:0 --typecode 0:8300 --change-name 0:"Linux Root" /dev/sda
sgdisk --print /dev/sda
mkfs.fat -F32 -n BOOT /dev/sda1
mkfs.ext4 /dev/sda2
mount /dev/sda2 /mnt
mkdir /mnt/boot
mount /dev/sda1 /mnt/boot
fallocate -l 8G /swapfile
dd if=/dev/zero of=/swapfile bs=1M count=512
chmod 600 /swapfile
nano /etc/sysctl.d/99-sysctl.conf
vm.swappiness=10
mkswap /swapfile
swapon /swapfile
# Basic system installation
# -----------------------
pacstrap -i /mnt base base-devel
genfstab -U /mnt >> /mnt/etc/fstab
nano /mnt/etc/fstab
# /dev/sda2 none swap defaults 0 0
# add discard to /
# Timezone setup
# -----------------------
timedatectl set-ntp true
timedatectl set-timezone Brazil/East
ln -sf /usr/share/zoneinfo/Brazil/East /etc/localtime
hwclock --systohc --utc
date
# Locale setup
# -----------------------
nano /etc/locale.gen
# Uncomment pt_BR.UTF-8
locale-gen
echo LANG=pt_BR.UTF-8 > /etc/locale.conf
export LANG=pt_BR.UTF-8
localectl set-keymap --no-convert br-latin1-us
localectl set-locale LANG="pt_BR.UTF-8"
# Font setup
# -----------------------
pacman -S terminus-font
setfont ter-v16n
echo "
FONT=ter-v16n
" > /etc/vconsole.conf
# Network setup
# -----------------------
hostnamectl set-hostname Inspiron
nano /etc/hostname
nano /etc/hosts
# 127.0.0.1 localhost.localdomain localhost Inspiron
# 127.0.1.1 localhost.localdomain Inspiron
# systemctl enable dhcpcd@"your eth device name".service
#systemctl enable [email protected]
systemctl enable systemd-networkd
systemctl enable systemd-resolved
systemctl enable systemd-timesyncd
pacman -S iw wpa_supplicant
systemctl enable [email protected]
echo '
[Match]
Name=en*
[Network]
DHCP=ipv4
' > /etc/systemd/network/en.network
echo '
[Match]
Name=wl*
[Network]
DHCP=ipv4
' > /etc/systemd/network/wl.network
# LTS Kernel setup
# -----------------------
pacman -S linux-lts linux-lts-headers
# Initial ramdisk
# -----------------------
mkinitcpio -p linux
# Boot loader installation
# -----------------------
arch-chroot /mnt
bootctl install
echo "
default arch
timeout 4
editor 0
" > /boot/loader/loader.conf
echo "
title Arch Linux
linux /vmlinuz-linux-lts
initrd /intramfs-linux-lts.img
options root=/dev/sda2 rw
" > /boot/loader/entries/arch.conf
# Root setup
# -----------------------
passwd
gpasswd -a `id -un` network
EDITOR=nano visudo
# %wheel ALL
# User setup
# -----------------------
useradd -m -g users -G wheel -s /bin/bash manoel
passwd manoel
exit
umount -R /mnt
# Terminal utilities
# -----------------------
pacman -S mc vim lynx elinks gdisk hdparm tmux bash-completion ntfs-3g
reboot
|
manoeldesouza/Cookbook
|
Inspiron-Arch2.sh
|
Shell
|
gpl-3.0
| 4,336 |
#!/bin/bash
python RunBootstrap.py --paralog1 YML026C --paralog2 YDR450W --bootnum 84 > YML026C_YDR450W_Boot84_PrintScreen.txt
|
xjw1001001/IGCexpansion
|
Bootstrap/ShFiles/MG94_YML026C_YDR450W_Boot84.sh
|
Shell
|
gpl-3.0
| 128 |
#! /bin/bash
#
# Express setup of OpenVPN server
# for CentOS 7.x and Ubuntu Server 16.x / 17.x
# by xl-tech https://github.com/xl-tech
#
# Version 0.1 12 August 2017
#
# Use only on fresh installed machine! It can rewrite your firewall rules
# or your current OpenVPN config (if you have it before).
#
# Script is licensed under the GNU General Public License v3.0
#
# Usage: just run openvpnsetup.sh :)
#
#check for root
IAM=$(whoami)
if [ ${IAM} != "root" ]; then
echo "You must be root to use this script"
exit 1
fi
#check for tun/tap
if [ -c /dev/net/tun ]; then
echo TUN/TAP is enabled
else
echo TUN/TAP is disabled. Contact your VPS provider to enable it
exit 1
fi
#enable IPv4 forwarding
if sysctl net.ipv4.ip_forward |grep 0; then
sysctl -w net.ipv4.ip_forward=1
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
else
echo "IPv4 forwarding is already enabled"
fi
#package install
yum_packages="openssl openvpn easy-rsa iptables iptables-services curl"
deb_packages="openssl openvpn easy-rsa iptables netfilter-persistent iptables-persistent curl"
if cat /etc/*release | grep ^NAME | grep CentOS; then
yum -y install epel-release
yum -y install $yum_packages
systemctl disable firewalld & systemctl stop firewalld
elif cat /etc/*release | grep ^NAME | grep Ubuntu; then
apt-get install -y $deb_packages
ufw disable
else
echo "Unsupported distro, sorry"
exit 1;
fi
#server settings
#internal IP
IIP=`hostname -I`
#external IP
EIP=`curl -s checkip.dyndns.org | sed -e 's/.*Current IP Address: //' -e 's/<.*$//'`
#internal IPv6 with mask
IIPv6=`ip -6 addr|grep inet6|grep fe80|awk -F '[ \t]+|' '{print $3}'`
echo "Select server IP to listen on (only used for IPv4):
1) Internal IP - $IIP (in case you are behind NAT)
2) External IP - $EIP
"
read n
case $n in
1) IP=$IIP;;
2) IP=$EIP;;
*) invalid option;;
esac
echo "Select server PORT to listen on:
1) tcp 443 (recommended)
2) udp 1194 (default)
3) Enter manually (proto (lowercase!) port)
"
read n
case $n in
1) PORT="tcp 443";;
2) PORT="udp 1194";;
3) echo -n "Enter proto and port (like tcp 80 or udp 53): " & read -e PORT;;
*) invalid option;;
esac
PORTN=`echo $PORT|grep -o '[0-9]*'`
PORTL=`echo $PORT|grep -o '[a-z,A-Z]*'`
PORTL6=$PORTL"6"
echo "Select server cipher:
1) AES-256-GCM (default for OpenVPN 2.4.x, not supported by Ubuntu Server 16.x)
2) AES-256-CBC
3) AES-128-CBC (default for OpenVPN 2.3.x)
4) BF-CBC (insecure)
"
read n
case $n in
1) CIPHER=AES-256-GCM;;
2) CIPHER=AES-256-CBC;;
3) CIPHER=AES-128-CBC;;
4) CIPHER=BF-CBC;;
*) invalid option;;
esac
echo "Enable IPv6? (ensure that your machine have IPv6 support):
1) Yes
2) No
"
read n
case $n in
1) IPV6E=1;;
2) IPV6E=0;;
*) invalid option;;
esac
echo "Check your selection"
echo "Server will listen on $IP"
echo "Server will listen on $PORT"
echo "Server will use $CIPHER cipher"
echo "IPv6 - $IPV6E (1 is enabled, 0 is disabled)"
read -rsp $'Press enter to continue...\n'
#create dirs and files
mkdir /etc/openvpn/easy-rsa
mkdir /etc/openvpn/easy-rsa/keys
mkdir /etc/openvpn/logs
mkdir /etc/openvpn/bundles
mkdir /etc/openvpn/ccd
touch /etc/openvpn/easy-rsa/keys/index.txt
touch /etc/openvpn/easy-rsa/keys/serial
echo 00 >> /etc/openvpn/easy-rsa/keys/serial
#copy easy-rsa
if cat /etc/*release | grep ^NAME | grep CentOS; then
cp /usr/share/easy-rsa/2.0/* /etc/openvpn/easy-rsa
elif cat /etc/*release | grep ^NAME | grep Ubuntu; then
cp /usr/share/easy-rsa/* /etc/openvpn/easy-rsa
fi
#vars for certs
export EASY_RSA="/etc/openvpn/easy-rsa"
export OPENSSL="openssl"
export PKCS11TOOL="pkcs11-tool"
export GREP="grep"
export KEY_CONFIG=`$EASY_RSA/whichopensslcnf $EASY_RSA`
export KEY_DIR="$EASY_RSA/keys"
export PKCS11_MODULE_PATH="dummy"
export PKCS11_PIN="dummy"
export KEY_SIZE=2048
export CA_EXPIRE=3650
export KEY_EXPIRE=1825
export KEY_COUNTRY="US"
export KEY_PROVINCE="CA"
export KEY_CITY="SanFrancisco"
export KEY_ORG="Fort-Funston"
export KEY_EMAIL="[email protected]"
export KEY_OU="MyVPN"
export KEY_NAME="EasyRSA"
#issue certs and keys
#ca
export EASY_RSA="${EASY_RSA:-.}"
"$EASY_RSA/pkitool" --batch --initca $*
#server
export EASY_RSA="${EASY_RSA:-.}"
"$EASY_RSA/pkitool" --batch --server server-cert
#dh
$OPENSSL dhparam -out ${KEY_DIR}/dh.pem ${KEY_SIZE}
#ta
openvpn --genkey --secret ${KEY_DIR}/ta.key
#issue and revoke client cert to generate list of revoked certs
"$EASY_RSA/pkitool" --batch revoked
"$EASY_RSA/revoke-full" revoked
echo "Error 23 indicates that revoke is successful"
#generate server config
#ipv6 part
if (( "$IPV6E" == 1 )); then
#enable IPv6 forwarding
if sysctl net.ipv6.conf.all.forwarding |grep 0; then
sysctl -w net.ipv6.conf.all.forwarding=1
echo "net.ipv6.conf.all.forwarding = 1" >> /etc/sysctl.conf
else
echo "IPv6 forwarding is already enabled"
fi
echo -e "#IPv6 config
server-ipv6 fd6c:62d9:eb8c::/112
proto $PORTL6
tun-ipv6
push tun-ipv6
push \042route-ipv6 2000::/3\042
push \042redirect-gateway ipv6\042
" > /etc/openvpn/server.conf
else
echo "local $IP" > /etc/openvpn/server.conf
fi
#main part
echo -e "port $PORTN
proto $PORTL
dev tun
#for cert revoke check
crl-verify /etc/openvpn/easy-rsa/keys/crl.pem
server 10.1.0.0 255.255.255.0
topology subnet
push \042redirect-gateway def1 bypass-dhcp\042
#duplicate-cn
push \042dhcp-option DNS 8.8.8.8\042
push \042dhcp-option DNS 8.8.4.4\042
comp-lzo adaptive
push \042comp-lzo adaptive\042
mssfix 0
push \042mssfix 0\042
#management 0.0.0.0 7000 /etc/openvpn/management-password
#duplicate-cn
keepalive 10 120
tls-timeout 160
hand-window 160
cipher $CIPHER
auth SHA256
#uncomment for 2.4.x feature to disable automatically negotiate in AES-256-GCM
#ncp-disable
#max-clients 300
#user nobody
#group nobody
persist-key
persist-tun
status /etc/openvpn/logs/openvpn-status.log
log-append /etc/openvpn/logs/openvpn.log
verb 2
#reneg-sec 864000
mute 3
tls-server
#script-security 3
#buffers
sndbuf 393216
rcvbuf 393216
push \042sndbuf 393216\042
push \042rcvbuf 393216\042
" >> /etc/openvpn/server.conf
echo "<ca>" >> /etc/openvpn/server.conf
cat $KEY_DIR/ca.crt >> /etc/openvpn/server.conf
echo "</ca>" >> /etc/openvpn/server.conf
echo "<cert>" >> /etc/openvpn/server.conf
cat $KEY_DIR/server-cert.crt >> /etc/openvpn/server.conf
echo "</cert>" >> /etc/openvpn/server.conf
echo "<key>" >> /etc/openvpn/server.conf
cat $KEY_DIR/server-cert.key >> /etc/openvpn/server.conf
echo "</key>" >> /etc/openvpn/server.conf
if openvpn --version | grep 2.3; then
# ta tls auth OpenVPN 2.3.x
echo "key-direction 0" >> /etc/openvpn/server.conf
echo "<tls-auth>" >> /etc/openvpn/server.conf
cat $KEY_DIR/ta.key >> /etc/openvpn/server.conf
echo "</tls-auth>" >> /etc/openvpn/server.conf
else
# ta tls crypt OpenVPN 2.4.x
echo "<tls-crypt>" >> /etc/openvpn/server.conf
cat $KEY_DIR/ta.key >> /etc/openvpn/server.conf
echo "</tls-crypt>" >> /etc/openvpn/server.conf
fi
echo "<dh>" >> /etc/openvpn/server.conf
cat $KEY_DIR/dh.pem >> /etc/openvpn/server.conf
echo "</dh>" >> /etc/openvpn/server.conf
#create iptables file
echo "*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:XL-Firewall-1-INPUT - [0:0]
-A INPUT -j XL-Firewall-1-INPUT
-A FORWARD -j XL-Firewall-1-INPUT
-A XL-Firewall-1-INPUT -p icmp --icmp-type any -s localhost -j ACCEPT
-A XL-Firewall-1-INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
-A XL-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT
-A XL-Firewall-1-INPUT -m state --state NEW -m $PORTL -p $PORTL --dport $PORTN -j ACCEPT
-A XL-Firewall-1-INPUT -i tun+ -j ACCEPT
-A XL-Firewall-1-INPUT -j REJECT --reject-with icmp-host-prohibited
COMMIT
*mangle
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
COMMIT
*nat
:PREROUTING ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
-A POSTROUTING -s 10.1.0.0/24 -j MASQUERADE
COMMIT" > /tmp/iptables
#create ip6tables file
echo "*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:XL-Firewall-1-INPUT - [0:0]
-A INPUT -j XL-Firewall-1-INPUT
-A FORWARD -j XL-Firewall-1-INPUT
-A XL-Firewall-1-INPUT -i lo -j ACCEPT
-A XL-Firewall-1-INPUT -p icmpv6 -j ACCEPT
-A XL-Firewall-1-INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
-A XL-Firewall-1-INPUT -m state --state NEW -m $PORTL -p $PORTL --dport $PORTN -j ACCEPT
-A XL-Firewall-1-INPUT -i tun+ -j ACCEPT
-A XL-Firewall-1-INPUT -j REJECT --reject-with icmp6-adm-prohibited
COMMIT
*mangle
:PREROUTING ACCEPT [0:0]
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
COMMIT
*nat
:PREROUTING ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
-A POSTROUTING -s fd6c:62d9:eb8c::/112 -j MASQUERADE
COMMIT" > /tmp/ip6tables
#start services
if cat /etc/*release | grep ^NAME | grep CentOS; then
cp /tmp/ip6tables /etc/sysconfig/ip6tables
cp /tmp/iptables /etc/sysconfig/iptables
systemctl enable iptables & systemctl start iptables
systemctl enable ip6tables & systemctl start ip6tables
systemctl enable openvpn@server & systemctl start openvpn@server
systemctl restart iptables & systemctl restart ip6tables
elif cat /etc/*release | grep ^NAME | grep Ubuntu; then
cp /tmp/ip6tables /etc/iptables/rules.v6
cp /tmp/iptables /etc/iptables/rules.v4
systemctl enable netfilter-persistent & systemctl start netfilter-persistent
systemctl enable openvpn@server & systemctl start openvpn@server
systemctl restart netfilter-persistent
fi
#generate client config
echo -e "client
dev tun
dev-type tun
#bind to interface if needed
#dev-node \042Ethernet 4\042
ns-cert-type server
setenv opt tls-version-min 1.0 or-highest
#block local dns
setenv opt block-outside-dns
nobind
remote $EIP $PORTN $PORTL
cipher $CIPHER
auth SHA256
resolv-retry infinite
persist-key
persist-tun
comp-lzo
mssfix 0
verb 3
ping 10
tls-client
float" >> /etc/openvpn/client.ovpn
#generate bash script to create one-file config for clients
echo -e "#! /bin/bash
# Script to automate creating new OpenVPN clients
#
# H Cooper - 05/02/11
# Y Frolov - 08/06/16 - bundle config added (unified format)
# Usage: newclient.sh <common-name>
echo \042Script to generate unified config for Windows App\042
echo \042sage: newclient.sh <common-name>\042
# Set vars
OPENVPN_DIR=/etc/openvpn
OPENVPN_RSA_DIR=/etc/openvpn/easy-rsa
OPENVPN_KEYS=\044OPENVPN_RSA_DIR/keys
BUNDLE_DIR=/etc/openvpn/bundles
# Either read the CN from \0441 or prompt for it
if [ -z \042\0441\042 ]
then echo -n \042Enter new client common name (CN): \042
read -e CN
else
CN=\u00241
fi
# Ensure CN isn't blank
if [ -z \042\044CN\042 ]
then echo \042You must provide a CN.\042
exit
fi
# Check the CN doesn't already exist
if [ -f \044OPENVPN_KEYS/\044CN.crt ]
then echo \042Error: certificate with the CN \044CN alread exists!\042
echo \042 \044OPENVPN_KEYS/\044CN.crt\042
exit
fi
# Establish the default variables
export EASY_RSA=\042/etc/openvpn/easy-rsa\042
export OPENSSL=\042openssl\042
export PKCS11TOOL=\042pkcs11-tool\042
export GREP=\042grep\042
export KEY_CONFIG=\x60\044EASY_RSA/whichopensslcnf \044EASY_RSA\x60
export KEY_DIR=\042\044EASY_RSA/keys\042
export PKCS11_MODULE_PATH=\042dummy\042
export PKCS11_PIN=\042dummy\042
export KEY_SIZE=2048
export CA_EXPIRE=3650
export KEY_EXPIRE=1825
export KEY_COUNTRY=\042US\042
export KEY_PROVINCE=\042CA\042
export KEY_CITY=\042SanFrancisco\042
export KEY_ORG=\042Fort-Funston\042
export KEY_EMAIL=\[email protected]\042
export KEY_OU=\042MyVPN\042
export KEY_NAME=\042EasyRSA\042
# Copied from build-key script (to ensure it works!)
export EASY_RSA=\042\044{EASY_RSA:-.}\042
\042\044EASY_RSA/pkitool\042 --batch \044CN
# Add all certs to unified client config file
# Default config for client
cp \044OPENVPN_DIR/client.ovpn \044BUNDLE_DIR/\044CN.ovpn
# CA
echo \042<ca>\042 >> \044BUNDLE_DIR/\044CN.ovpn
cat \044OPENVPN_KEYS/ca.crt >> \044BUNDLE_DIR/\044CN.ovpn
echo \042</ca>\042 >> \044BUNDLE_DIR/\044CN.ovpn
# Client cert
echo \042<cert>\042 >> \044BUNDLE_DIR/\044CN.ovpn
cat \044OPENVPN_KEYS/\044CN.crt >> \044BUNDLE_DIR/\044CN.ovpn
echo \042</cert>\042 >> \044BUNDLE_DIR/\044CN.ovpn
# Client key
echo \042<key>\042 >> \044BUNDLE_DIR/\044CN.ovpn
cat \044OPENVPN_KEYS/\044CN.key >> \044BUNDLE_DIR/\044CN.ovpn
echo \042</key>\042 >> \044BUNDLE_DIR/\044CN.ovpn
if openvpn --version | grep 2.3; then
# ta tls auth OpenVPN 2.3.x
echo \042key-direction 1\042 >> \044BUNDLE_DIR/\044CN.ovpn
echo \042<tls-auth>\042 >> \044BUNDLE_DIR/\044CN.ovpn
cat \044OPENVPN_KEYS/ta.key >> \044BUNDLE_DIR/\044CN.ovpn
echo \042</tls-auth>\042 >> \044BUNDLE_DIR/\044CN.ovpn
else
# ta tls crypt OpenVPN 2.4.x
echo \042<tls-crypt>\042 >> \044BUNDLE_DIR/\044CN.ovpn
cat \044OPENVPN_KEYS/ta.key >> \044BUNDLE_DIR/\044CN.ovpn
echo \042</tls-crypt>\042 >> \044BUNDLE_DIR/\044CN.ovpn
fi
# DH key
echo \042<dh>\042 >> \044BUNDLE_DIR/\044CN.ovpn
cat \044OPENVPN_KEYS/dh.pem >> \044BUNDLE_DIR/\044CN.ovpn
echo \042</dh>\042 >> \044BUNDLE_DIR/\044CN.ovpn
#echo \042\042
echo \042COMPLETE! Copy the new unified config from here: /etc/openvpn/bundles/\044CN.ovpn\042" > /etc/openvpn/newclient.sh
chmod +x /etc/openvpn/newclient.sh
echo "Setup is complete. Happy VPNing!"
echo "Use /etc/openvpn/newclient.sh to generate client config"
exit 1
|
xl-tech/OpenVPN-easy-setup
|
openvpnsetup.sh
|
Shell
|
gpl-3.0
| 13,462 |
#!/bin/bash
test_info()
{
cat <<EOF
Restart the ctdbd daemons of a CTDB cluster.
No error if ctdbd is not already running on the cluster.
Prerequisites:
* Nodes must be accessible via 'onnode'.
Steps:
1. Restart the ctdb daemons on all nodes using a method according to
the test environment and platform.
Expected results:
* The cluster is healthy within a reasonable timeframe.
EOF
}
. ctdb_test_functions.bash
ctdb_test_init "$@"
set -e
setup_ctdb
restart_ctdb
|
wolfmuel/ctdb
|
tests/simple/00_ctdb_init.sh
|
Shell
|
gpl-3.0
| 482 |
#!/bin/bash
mkdir SymbolicCpp
cp SymbolicC++3-3.35.zip SymbolicCpp
cd SymbolicCpp
unzip SymbolicC++3-3.35.zip
|
richelbilderbeek/RibiLibraries
|
SymbolicCpp.sh
|
Shell
|
gpl-3.0
| 110 |
#!/bin/bash
[ ! -z $( command -v python3 ) ] && PYCOM="python3" || PYCOM="python"
[ -z $PYTHONPATH ] && {
export PYTHONPATH=../bastis-python-toolbox;
} || {
export PYTHONPATH=${PYTHONPATH}:../bastis-python-toolbox
}
echo "Using python binary: $PYCOM"
echo "Using python path: $PYTHONPATH"
$PYCOM manage.py test && $PYCOM manage.py runserver localhost:8000
|
BastiTee/pyntrest
|
devops-tools/dev-server.sh
|
Shell
|
gpl-3.0
| 367 |
#!/bin/bash
while true
do
out="$(cat /home/bmw417/.config/keyboardInput/k3enabled)"
while [ $out -eq 0 ]
do
out="$(cat /home/bmw417/.config/keyboardInput/k3enabled)"
sleep 0.1
done
tee /sys/class/leds/asus::kbd_backlight/brightness <<< 3
done
|
bmw417/the_bmw_repo
|
keyboardInput/k3input.sh
|
Shell
|
gpl-3.0
| 261 |
#!/bin/sh
set -eu
. .env
echo "Waiting for postgres..."
until nc -z "${POSTGRES_HOST}" "${POSTGRES_PORT}"; do
sleep 1
done
php artisan migrate --force --seed
php-fpm7
test -d /run/nginx || mkdir /run/nginx
nginx -t
nginx
tail -f /var/log/nginx/*
|
emilianobovetti/brewis
|
web/startup.sh
|
Shell
|
gpl-3.0
| 258 |
# . bash_bits.sh
hex2bits () {
echo 'obase=2;ibase=16;'$1 | bc
}
|
gipi/information-theory
|
bash_bits.sh
|
Shell
|
gpl-3.0
| 68 |
curl -v --data "client_orderid=inv0001001&order_desc=Test Order Description&first_name=John&last_name=Smith&ssn=1267&birthday=19820115&address1=100 Main st&city=Seattle&state=WA&zip_code=98102&country=US&phone=+12063582043&cell_phone=+19023384543&amount=10.4&[email protected]¤cy=USD&ipaddress=65.153.12.232&site_url=www.google.com&credit_card_number=4538977399606732&card_printed_name=CARD HOLDER&expire_month=12&expire_year=2099&cvv2=123&purpose=www.twitch.tv/dreadztv&redirect_url=http://doc.fibonatix.com/doc/dummy.htm&server_callback_url=http://doc.fibonatix.com/doc/dummy.htm&merchant_data=VIP customer&control=709be79dd85191fad169fb8082927b9842e87200" http://localhost:52380/paynet/api/v2/preauth/250
|
alexeybezverkhiy/fibonatix
|
Merchant/MerchantAPI/Tests/fb-preauth.sh
|
Shell
|
gpl-3.0
| 723 |
#!/usr/bin/env sh
# generated from catkin/python/catkin/environment_cache.py
# based on a snapshot of the environment before and after calling the setup script
# it emulates the modifications of the setup script without recurring computations
# new environment variables
# modified environment variables
export LD_LIBRARY_PATH="/home/bjornl/ros/workspaces/bjorn_ws/devel/lib/x86_64-linux-gnu:/opt/ros/indigo/lib/x86_64-linux-gnu:/home/bjornl/ros/workspaces/bjorn_ws/devel/lib:/opt/ros/indigo/lib"
export PKG_CONFIG_PATH="/home/bjornl/ros/workspaces/bjorn_ws/devel/lib/x86_64-linux-gnu/pkgconfig:/opt/ros/indigo/lib/x86_64-linux-gnu/pkgconfig:/home/bjornl/ros/workspaces/bjorn_ws/devel/lib/pkgconfig:/opt/ros/indigo/lib/pkgconfig"
export PWD="/home/bjornl/ros/workspaces/bjorn_ws/build"
|
blutjens/perc_neuron_ros_ur10
|
pn_ros/bjorn_ws/build/catkin_generated/setup_cached.sh
|
Shell
|
gpl-3.0
| 788 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=MinGW-Windows
CND_CONF=Debug
CND_DISTDIR=dist
CND_BUILDDIR=build
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/c
OUTPUT_BASENAME=c
PACKAGE_TOP_DIR=c/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/c/bin"
copyFileToTmpDir "${OUTPUT_PATH}.exe" "${NBTMPDIR}/${PACKAGE_TOP_DIR}bin/${OUTPUT_BASENAME}.exe" 0755
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/c.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/c.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
tadvent/OnlineJudgeSolutions-Cpp
|
Other/BaiduAstar/2012-0531/C/nbproject/Package-Debug.bash
|
Shell
|
gpl-3.0
| 1,399 |
## Configuration
export_env HOSTALIASES /host/etc/host_aliases
## Software
apt_install_permanent rsync rsnapshot openssh-client
|
Boukefalos/docker-deployment
|
build/scripts/rsnapshot.sh
|
Shell
|
gpl-3.0
| 128 |
# DESCRIPTION
# Resizes all images of type T (via parameter $1) in the current path, by nearest-neighbor method, to target format F ($2), at size A x B ($3 x $4). (Nearest neighbor method will keep hard edges, or look "pixelated.") Runs `img2imgNN.sh` repeatedly to do this.
# USAGE
# This script uses the same parameters as `img2imgNN.sh`, EXCEPT that parameter $1 is a file type instead of a specific file. All files of type $1 will be passed to imgs2imgsNN.sh:
# - $1 source file type
# - $2 destination format
# - $3 scale by nearest neighbor method to pixels X
# - $4 OPTIONAL. Force this Y-dimension, regardless of aspect. Scales by nearest neighbor method to pixels Y. ONLY USED for ppms. Ignored for all other types (aspect kept). SEE COMMENTS in i_view32.exe code lines area for options to maintain aspect and/or rotate image (wanted for my purposes at times).
# Example command:
# imgs2imgsnn.sh ppm png 640
# OR, to force a given x by y dimension for a ppm:
# imgs2imgsNN.sh ppm png 640 480
# CODE
array=(`find . -maxdepth 1 -type f -iname \*.$1 -printf '%f\n' | tr -d '\15\32'`)
for img in ${array[@]}
do
img2imgNN.sh $img $2 $3 $4
done
|
r-alex-hall/fontDevTools
|
scripts/imgAndVideo/imgs2imgsNN.sh
|
Shell
|
gpl-3.0
| 1,158 |
# 0AD - Game - http://wildfiregames.com/0ad/
0AD() {
echo "[$FUNCNAME]"
# Generate repo
if [[ ! -f /etc/yum.repos.d/fedora-0ad.repo ]]; then
cat << EOF > /etc/yum.repos.d/fedora-0ad.repo
[fedora-0ad]
name=Cross-Platform RTS Game of Ancient Warfare
baseurl=http://repos.fedorapeople.org/repos/bioinfornatics/0ad/fedora-\$releasever/\$basearch/
enabled=0
skip_if_unavailable=1
gpgcheck=0
[fedora-0ad-source]
name=Cross-Platform RTS Game of Ancient Warfare - Source
baseurl=http://repos.fedorapeople.org/repos/bioinfornatics/0ad/fedora-\$releasever/SRPMS
enabled=0
skip_if_unavailable=1
gpgcheck=0
EOF
fi
# Install libraries and dependencies
dnf install -y mesa-libGLw mesa-libGL mesa-libGLw-devel mesa-libGL-devel @fedora-packager
# Check if the render acceleration library libtxc_dxtn is installed
# http://wildfiregames.com/users/code/libtxc_dxtn070518.tar.gz
if [[ ! -f /usr/lib/libtxc_dxtn.so ]]; then
LIBTXCPKG='libtxc_dxtn070518.tar.gz'
cd /usr/local/src
Download name "$LIBTXCPKG"
tar -xzf libtxc_dxtn070518.tar.gz
rm -f libtxc_dxtn070518.tar.gz
cd libtxc_dxtn
make install
# Restore SELinux permissions
restorecon -R /usr/lib
fi
# Install 0ad
dnf install -y --enablerepo=fedora-0ad 0ad
}
|
easyLife/easylife
|
lib/0ad.sh
|
Shell
|
gpl-3.0
| 1,331 |
#!/bin/bash
# vim: ts=4:sw=4
. ~/plescripts/plelib.sh
. ~/plescripts/vmlib.sh
EXEC_CMD_ACTION=EXEC
typeset -r ME=$0
typeset -r PARAMS="$*"
typeset -r str_usage=\
"Usage : $ME
Met à jour suse LEAP, stop toutes les VMs avant pour éviter tout problème, en
cas de mise à jour de VBox.
"
while [ $# -ne 0 ]
do
case $1 in
-emul)
EXEC_CMD_ACTION=NOP
shift
;;
-h|-help|help)
info "$str_usage"
LN
exit 1
;;
*)
error "Arg '$1' invalid."
LN
info "$str_usage"
exit 1
;;
esac
done
#ple_enable_log -params $PARAMS
# Create array vm_running_list
function load_running_vms
{
typeset -ga vm_running_list
typeset vm_name
while read vm_name
do
vm_running_list+=( $vm_name )
done<<<"$(VBoxManage list runningvms|sed "s/^\"\(.*\)\".*$/\1/g")"
LN
}
load_running_vms
info "Arrêt de ${#vm_running_list[*]} VMs."
LN
for vm_name in ${vm_running_list[*]}
do
save_vm $vm_name
LN
done
line_separator
info "Update SUSE"
exec_cmd -c sudo zypper up -y
LN
line_separator
exec_cmd -c sudo zypper ps -s
if [ ${#vm_running_list[*]} -ne 0 ]
then
line_separator
confirm_or_exit "Démarrer ${#vm_running_list[*]} VMs"
for vm_name in ${vm_running_list[*]}
do
start_vm $vm_name
LN
done
fi
|
PhilippeLeroux/plescripts
|
shell/update_suse.sh
|
Shell
|
gpl-3.0
| 1,234 |
#!/bin/bash
# Parses a list of packages from a package list directory
# Usage:
# ./generate_package_list.sh <package list directory>
# Fail upon any errors
set -e
# Read the command line arguments
PACKAGELISTSDIR=$1
# Generate the package list
for i in $PACKAGELISTSDIR/*
do
# Ignore comment lines
PACKAGES+=" $(grep -v '#' $i)"
done
# Return the list of packages
echo $PACKAGES
|
jrruethe/debian-micro
|
buildboot/generate_package_list.sh
|
Shell
|
gpl-3.0
| 389 |
#!/bin/bash
export PATH=/e/br07/build/bin/Release:/e/br07/third_party/Pre-built.2/lib:$PATH
ls *.ply | preprocess -bbox bbox.dat
|
Tonsty/br07
|
scripts/preprocess_bbox.sh
|
Shell
|
gpl-3.0
| 130 |
#!/bin/bash
color() {
local color="$1"
if [ "$3" == "" ]; then
local prefix=""
local txt="$2"
else
local prefix="$2 "
local txt="$3"
fi
echo -en "${prefix}\x1b[;${color}m${txt}\x1b[0m"
}
red() {
color 31 "$1" "$2"
}
green() {
color 32 "$1" "$2"
}
OPTIONS="--nosectionsname --nocolor"
VERBOSE=0
__diff() {
local name=$1
local suffix=""
local more_opt=""
local tmp=tmp$$
if [ "$2" != "" ]; then
local more_opt="-x=$2"
local suffix="_$2"
fi
if [ -f "tests/${name}${suffix}.rev" ]; then
./run_reverse.py "tests/${name}.bin" $more_opt $OPTIONS >$tmp 2>/dev/null
if [ $? -eq 0 ]; then
if [ $VERBOSE -eq 1 ]; then
diff $tmp "tests/${name}${suffix}.rev"
else
diff -q $tmp "tests/${name}${suffix}.rev" >/dev/null
fi
if [ $? -eq 0 ]; then
green "$name$suffix" "[OK]\n"
else
red "$name$suffix" "[FAIL]\n"
fi
rm $tmp
else
red "$name$suffix" "[EXCEPTION]\n"
fi
else
red "$name$suffix" "[NOT FOUND]\n"
fi
}
name=`basename "$1" .rev`
shift
while true; do
case "$1" in
"1")
VERBOSE=1
;;
-*)
OPTIONS="$OPTIONS $1"
;;
*)
break
;;
esac
shift
done
if [ "$1" == "" ]; then
__diff "$name"
else
while [ "$1" != "" ]; do
__diff "$name" "$1"
shift
done
fi
|
d4nnyk/reverse
|
diff.sh
|
Shell
|
gpl-3.0
| 1,593 |
#!/bin/csh -v
rm -rf testdir
mkdir testdir
cd testdir
ln -s ../build/release/example/* .
ln -s ../example/lena.ppm .
./display < lena.ppm
./display1 lena.ppm
./invert < lena.ppm > inv.ppm ; display inv.ppm
./fft < lena.ppm > fft.ppm ; display fft.ppm
./mirror < lena.ppm > mirror.ppm ; display mirror.ppm
./mirror1 < lena.ppm > mirror1.ppm ; display mirror1.ppm
./pow0_5 < lena.ppm > pow.ppm ; display pow.ppm
./sobel1 < lena.ppm > sobel1.ppm ; display sobel1.ppm
./sobel2 < lena.ppm > sobel2.ppm ; display sobel2.ppm
./togray < lena.ppm > gray.ppm ; display gray.ppm
./togray1 < lena.ppm > gray1.ppm ; display gray1.ppm
./warp < lena.ppm > warp.ppm ; display warp.ppm
./roi1 < lena.ppm > roi1.ppm ; display roi1.ppm
./roi2 > roi2.ppm ; display roi2.ppm
|
mbe2014/ClipOCV
|
test.sh
|
Shell
|
mpl-2.0
| 774 |
#!/bin/bash
# This file is part of MorphoDiTa <http://github.com/ufal/morphodita/>.
#
# Copyright 2016 Institute of Formal and Applied Linguistics, Faculty of
# Mathematics and Physics, Charles University in Prague, Czech Republic.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
rm -rf ../src/utils/*
git -C ../src/utils clone --depth=1 --branch=stable https://github.com/ufal/cpp_utils
mv ../src/utils/cpp_utils/src/* ../src/utils/
mv ../src/utils/cpp_utils/{AUTHORS,CHANGES,LICENSE,README} ../src/utils/
rm -rf ../src/utils/cpp_utils/
sed '
s/^namespace utils {/namespace morphodita {\n&/
s/^} \/\/ namespace utils/&\n} \/\/ namespace morphodita/
' -i ../src/utils/*
|
ufal/morphodita
|
scripts/update_utils.sh
|
Shell
|
mpl-2.0
| 831 |
#!/bin/sh -x
# prepare vm and install api server.
#
# depends on the vagrant plugin vbguest. install it with:
# `vagrant plugin install vagrant-vbguest`
# newspeak.io
# Copyright (C) 2013 Jahn Bertsch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
cd $(dirname $0)
# install vagrant plugins
vagrant bindler setup
vagrant plugin bundle
vagrant up --no-provision
# remove outdated guest additions and chef
vagrant ssh -c "sudo apt-get -y purge virtualbox-guest-dkms virtualbox-guest-utils virtualbox-guest-x11 chef"
# remove some more unneeded packages
vagrant ssh -c "sudo apt-get -y --purge autoremove"
# rename default user
#user=newspeak
#vagrant ssh -c "sudo usermod -l $user ubuntu"
#vagrant ssh -c "sudo groupmod -n $user ubuntu"
#vagrant ssh -c "sudo usermod -d /home/$user -m $user"
#vagrant ssh -c "sudo mv /etc/sudoers.d/90-cloud-init-users /etc/sudoers.d/90-cloud-init-$user"
#vagrant ssh -c "sudo perl -pi -e \"s/ubuntu/$user/g;\" /etc/sudoers.d/90-cloud-init-$user"
# update guest additions
vagrant vbguest
# copy over cached files to speed up compilation
vagrant ssh -c "mkdir -p /home/vagrant/.m2"
../shared/scripts/vagrant/scp.sh -r ../cache/maven/* default:/home/vagrant/.m2
# start api server installation
vagrant ssh -c "/vagrant/api/api-install.sh"
# update cache to speed up subsequent installs
rm -rf ../cache
mkdir -p ../cache/maven
../shared/scripts/vagrant/scp.sh -r default:/home/vagrant/.m2/* ../cache/maven
|
newspeak/newspeak-server
|
api-vm/vagrant-setup-api.sh
|
Shell
|
agpl-3.0
| 1,994 |
#!/bin/sh
gcc -O3 ../menu.c ../lib/lib_gpio.c ../lib/lib_png.c ../lib/lib_spi.c ../lib/lib_oled.c ../lib/lib_ui.c ../lib/lib_jpeg.c -I../lib/ -std=gnu11 -o ../menu $(pkg-config --cflags --libs cairo) && mv ../menu ../../bin/
|
timelapseplus/VIEW
|
src/script/build-menu.sh
|
Shell
|
agpl-3.0
| 224 |
#!/bin/bash
CURRENT_DIR=`pwd`
cd `dirname $0`
SCRIPT_DIR=`pwd`
export SCRIPT_DIR
PID=`cat application.pid`
kill ${PID}
cd ${CURRENT_DIR}
|
opensource21/sze
|
scripts/stop.sh
|
Shell
|
agpl-3.0
| 140 |
# Because of a long-running npm issue (https://github.com/npm/npm/issues/3059)
# prepublish runs after `npm install` and `npm pack`.
# In order to only run prepublish before `npm publish`, we have to check argv.
if node -e "process.exit(($npm_config_argv).original[0].indexOf('pu') === 0)"; then
exit 0;
fi
# Publishing to NPM is currently supported by Travis CI, which ensures that all
# tests pass first and the deployed module contains the correct file structure.
# In order to prevent inadvertently circumventing this, we ensure that a CI
# environment exists before continuing.
if [ "$CI" != true ]; then
echo "\n\n\n \033[101;30m Only Travis CI can publish to NPM. \033[0m" 1>&2;
echo " Ensure git is left is a good state by backing out any commits and deleting any tags." 1>&2;
echo " Then read CONTRIBUTING.md to learn how to publish to NPM.\n\n\n" 1>&2;
exit 1;
fi;
# When Travis CI publishes to NPM:
babel src --optional runtime --ignore __tests__ --out-dir dist/ && cp package.json dist/ && cp ./src/cli/run/static/* ./dist/cli/run/static/
|
meldio/meldio
|
resources/prepublish.sh
|
Shell
|
agpl-3.0
| 1,067 |
#! /bin/sh
./clipping.sh c1 c4
|
slitvinov/gts
|
test/clipping/clipping_c1_c4.sh
|
Shell
|
lgpl-2.1
| 32 |
ARCHIVE_PATH=/run/media/hughsie/Backup/mirror
VERSION=34
echo "Building applications..."
appstream-builder \
--verbose \
--veto-ignore=add-default-icons \
--min-icon-size=48 \
--enable-hidpi \
--include-failed \
--log-dir=./logs/fedora-${VERSION} \
--temp-dir=./tmp/fedora-${VERSION} \
--cache-dir=../cache-f${VERSION} \
--packages-dir=${ARCHIVE_PATH}/Fedora/f${VERSION}/Packages \
--packages-dir=${ARCHIVE_PATH}/Fedora/f${VERSION}-updates \
--packages-dir=${ARCHIVE_PATH}/Fedora/openh264 \
--output-dir=./metadata/f${VERSION} \
--basename=fedora-${VERSION} \
--origin=fedora | tee fedora-${VERSION}.log
# exit if failed
rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi
echo "Extracting font screenshots"
cd ./metadata/f${VERSION}/source
tar -xvf ../fedora-${VERSION}-screenshots.tar
cd -
echo "Mirroring screenshots"
appstream-util mirror-screenshots \
./metadata/f${VERSION}/fedora-${VERSION}.xml.gz \
http://dl.fedoraproject.org/pub/alt/screenshots/f${VERSION} \
../cache ./metadata/f${VERSION}
echo "Creating status pages"
appstream-util non-package-yaml \
./metadata/f${VERSION}/fedora-${VERSION}.xml.gz \
./metadata/f${VERSION}/applications-to-import.yaml
appstream-util status-html \
./metadata/f${VERSION}/fedora-${VERSION}.xml.gz \
./metadata/f${VERSION}/status.html
appstream-util status-html \
./metadata/f${VERSION}/fedora-${VERSION}-failed.xml.gz \
./metadata/f${VERSION}/failed.html
appstream-util matrix-html \
./metadata/f${VERSION}/fedora-${VERSION}.xml.gz \
./metadata/f${VERSION}/fedora-${VERSION}-failed.xml.gz \
./metadata/f${VERSION}/matrix.html
echo "Uploading new metadata"
cd metadata/
./upload.sh
cd -
|
hughsie/appstream-scripts
|
fedora/fedora-34.sh
|
Shell
|
lgpl-2.1
| 1,742 |
#! /bin/sh
ANT_OPTS="-Xss8m -Xmx1024m" ant -lib dist/share/strategoxt/strategoxt.jar test
|
lichtemo/strategoxt
|
strategoxt/test.sh
|
Shell
|
lgpl-2.1
| 91 |
[ -d "node_modules/" ] || time npm install --verbose
cd cola
../node_modules/cordova/bin/cordova platform add android
../node_modules/cordova/bin/cordova plugin add cordova-plugin-geolocation
cd ..
|
manpages/cola
|
init.sh
|
Shell
|
lgpl-3.0
| 203 |
for v in 1.7.2 ; do
add_package http://www.physics.rutgers.edu/pythtb/_downloads/pythtb-$v.tar.gz
pack_set -s $IS_MODULE
pack_set --install-query $(pack_get --LD)/python$pV/site-packages/pythtb.py
# Add requirments when creating the module
pack_set --module-requirement numpy \
--module-requirement matplotlib
pack_cmd "$(get_parent_exec) setup.py build"
pack_cmd "$(get_parent_exec) setup.py install" \
"--prefix=$(pack_get --prefix)" \
done
|
zerothi/bash-build
|
python/pythtb.bash
|
Shell
|
lgpl-3.0
| 495 |
dir=`dirname "$0"`
java -Xmx2000M -Xss8m -Djava.library.path=lib -Djri.ignore.ule="yes" -jar "$dir/Mesquite.jar"
#================
#INSTRUCTIONS
#Increase the numbers above to enable more than 2000M total heap and 8M per thread for the stack. The latter enables larger trees, e.g. more than 5000 taxa
#IF MESQUITE DOESN'T START, TRY THESE:
#The default command in this file as it is original distributed is as follows (but without the "#"):
#java -Xmx2000M -Xss8m -Djava.library.path=lib -Djri.ignore.ule="yes" -jar "$dir/Mesquite.jar"
#That uses the default version of Java to run Mesquite. If you need to use another Java, put its full path instead of the word java. For instance, to run Java 1.8 on some macOS versions, use a command like this (but without the "#").
#/Library/Internet\ Plug-Ins/JavaAppletPlugin.plugin/Contents/Home/bin/java -Xmx2000M -Xss8m -Djava.library.path=lib -Djri.ignore.ule="yes" -jar "$dir/Mesquite.jar"
#On the other hand, if you use a new version of java, you may need to add an "add-export" flag as follows:
#java --add-exports java.desktop/com.apple.eawt=ALL-UNNAMED -Xmx2000M -Xss8m -Djava.library.path=lib -Djri.ignore.ule="yes" -jar "$dir/Mesquite.jar"
|
MesquiteProject/MesquiteCore
|
Executables/All OSs - Scripts and Instructions to Users/macOS/Mesquite_Starter-S.command
|
Shell
|
lgpl-3.0
| 1,200 |
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/UPBase.sql > ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/SBase.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Thing.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Rule.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Entity.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/UtilityClass.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Interaction.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/PhysicalEntity.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/ControlledVocabulary.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/CellularLocationVocabulary.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/EntityFeature.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/EntityReference.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/PathwayStep.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/SequenceLocation.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Xref.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Control.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Conversion.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/BiochemicalReaction.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/BindingFeature.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/BiochemicalPathwayStep.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/BioSource.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Catalysis.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/CellVocabulary.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/ChemicalStructure.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/ComplexAssembly.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Complex.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/CovalentBindingFeature.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Degradation.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/DeltaG.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Dna.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/DnaReference.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/DnaRegion.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/DnaRegionReference.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/EntityReferenceTypeVocabulary.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/EvidenceCodeVocabulary.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Evidence.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/ExperimentalForm.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/ExperimentalFormVocabulary.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/FragmentFeature.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Gene.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/GeneticInteraction.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/InteractionVocabulary.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/KPrime.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/ModificationFeature.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Modulation.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/MolecularInteraction.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Pathway.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/PhenotypeVocabulary.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Protein.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/ProteinReference.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Provenance.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/PublicationXref.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/RelationshipTypeVocabulary.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/RelationshipXref.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Rna.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/RnaReference.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/RnaRegion.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/RnaRegionReference.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Score.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/SequenceInterval.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/SequenceModificationVocabulary.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/SequenceRegionVocabulary.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/SequenceSite.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/SmallMolecule.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/SmallMoleculeReference.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Stoichiometry.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/TemplateReaction.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/TemplateReactionRegulation.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/TissueVocabulary.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/Transport.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/TransportWithBiochemicalReaction.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/BIOPAX/UnificationXref.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/AlgebraicRule.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/AssignmentRule.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Constraint.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/DataMatrix.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Delay.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/EventAssignment.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Event.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Experiment.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/FunctionDefinition.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/IdMapping.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/ImportSource.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/InitialAssignment.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/KineticLaw.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/LocalParameter.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Math.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Model.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Parameter.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Priority.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/RateRule.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/ResultObject.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Sample.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/SampleData.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Series.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Trigger.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/UnitDefinition.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
cat ../include/UNIPAX/PERSISTENCE/MYSQL/EXT/Unit.sql >> ../include/UNIPAX/PERSISTENCE/MYSQL/UniPAX_MySQL.sql
|
thortiede/unipax
|
scripts/merge_sqls.sh
|
Shell
|
lgpl-3.0
| 11,765 |
#!/usr/bin/env bash
grunt
git add media
|
pipaslot/forms
|
pre-commit.sh
|
Shell
|
lgpl-3.0
| 40 |
#!/bin/bash
# wf.sh: ·ÖÎöÎı¾ÎļþÖдʻã³öÏֵįµÂÊ.
# "wf2.sh"½Å±¾ÊÇÒ»¸öЧÂʸü¸ßµÄ°æ±¾.
# ´ÓÃüÁîÐÐÖмì²éÊäÈëµÄÎļþ.
ARGS=1
E_BADARGS=65
E_NOFILE=66
if [ $# -ne "$ARGS" ] # ¼ìÑé´«µÝµ½½Å±¾ÖвÎÊýµÄ¸öÊý.
then
echo "Usage: `basename $0` filename"
exit $E_BADARGS
fi
if [ ! -f "$1" ] # ¼ì²é´«ÈëµÄÎļþÊÇ·ñ´æÔÚ.
then
echo "File \"$1\" does not exist."
exit $E_NOFILE
fi
########################################################
# main ()
sed -e 's/\.//g' -e 's/\,//g' -e 's/ /\
/g' "$1" | tr 'A-Z' 'a-z' | sort | uniq -c | sort -nr
# =========================
# ¼ì²éµ¥´Ê³öÏֵįµÂÊ
# ¹ýÂ˵ô¾äºÅºÍ¶ººÅ,
#+ ²¢ÇҰѵ¥´Ê¼äµÄ¿Õ¸ñת»¯Îª»»ÐÐ,
#+ È»ºóת»¯ÎªÐ¡Ð´,
#+ ×îºóͳ¼Æµ¥´Ê³öÏֵįµÂʲ¢°´ÆµÂÊÅÅÐò.
# Arun Giridhar½¨Ò齫ÉϱߵĴúÂëÐÞ¸ÄΪ:
# . . . | sort | uniq -c | sort +1 [-f] | sort +0 -nr
# Õâ¾äÌí¼ÓÁ˵Ú2¸öÅÅÐòÖ÷¼ü, ËùÒÔ
#+ Õâ¸öÓëÉϱߵȼ۵ÄÀý×Ó½«°´ÕÕ×Öĸ˳Ðò½øÐÐÅÅÐò.
# ¾ÍÏñËûËù½âÊ͵Ä:
# "ÕâÊÇÒ»¸öÓÐЧµÄ¸ùÅÅÐò, Ê×ÏÈ¶ÔÆµÂÊ×îÉÙµÄ
#+ ÁнøÐÐÅÅÐò
#+ (µ¥´Ê»òÕß×Ö·û´®, ºöÂÔ´óСд)
#+ È»ºó¶ÔƵÂÊ×î¸ßµÄÁнøÐÐÅÅÐò."
#
# ÏñFrank WangËù½âÊ͵ÄÄÇÑù, ÉϱߵĴúÂëµÈ¼ÛÓÚ:
#+ . . . | sort | uniq -c | sort +0 -nr
#+ ÓÃϱßÕâÐÐÒ²ÐÐ:
#+ . . . | sort | uniq -c | sort -k1nr -k
########################################################
exit 0
# Á·Ï°:
# -----
# 1) ʹÓÃ'sed'ÃüÁîÀ´¹ýÂËÆäËûµÄ±êµã·ûºÅ,
#+ ±ÈÈç·ÖºÅ.
# 2) ÐÞ¸ÄÕâ¸ö½Å±¾, Ìí¼ÓÄܹ»¹ýÂ˶à¸ö¿Õ¸ñ»òÕß
# ¿Õ°×µÄÄÜÁ¦.
|
liruiqiu/tlpi
|
sh/sh/wf.sh
|
Shell
|
lgpl-3.0
| 1,452 |
cd core
mvn clean install -DskipTests -q
cd ../api
mvn clean package -DskipTests -q
cp target/api-1.0-jar-with-dependencies.jar ../blip.jar
|
mauro-idsia/blip
|
compile-blip.sh
|
Shell
|
lgpl-3.0
| 146 |
#!/bin/bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
cd "${DIR}"
benchmark_dir="${DIR}/benchmarks"
rm -rf "${benchmark_dir}"
mkdir -p "${benchmark_dir}"
overwrite_file() {
local file_content="#ifndef SETTINGS_H_
#define SETTINGS_H_
#define PRIME_FIELD_BINARY_BIT_LENGTH (131)
#define LIMB_SIZE_IN_BITS (${limb_size_in_bits}) /* 8, 16, 32, 64 */
#define FULL_LIMB_PRECISION (${full_limb_precision}) /* 0, 1 */
#define MULX (${mulx}) /* 0, 1: Only useful if LIMB_SIZE_IN_BITS == 64 */
#define SIMD_PARALLEL_WALKS (${simd_parallel_walks}) /* 0, 1 */
#define TEST (0) /* 0, 1 */
#define BENCHMARK_PRIME_FIELD_ARITHMETIC (${benchmark_prime_field_arithmetic}) /* 0, 1 */
#define BENCHMARK_ELLIPTIC_CURVE_ARITHMETIC (${benchmark_elliptic_curve_arithmetic}) /* 0, 1 */
#define BENCHMARK_USE_GMP (${benchmark_use_gmp}) /* 0, 1 */
#endif /* SETTINGS_H_ */"
echo "${file_content}" > "settings.h"
}
compile() {
cd "${DIR}/Release"
make clean
make all
}
get_result_file_name() {
if [ ${benchmark_prime_field_arithmetic} -eq 1 ]; then
echo "primefield_${benchmark_use_gmp}_${limb_size_in_bits}_${full_limb_precision}_${mulx}_${simd_parallel_walks}"
else
echo "point_${benchmark_use_gmp}_${limb_size_in_bits}_${full_limb_precision}_${mulx}_${simd_parallel_walks}"
fi
}
benchmark() {
cd "${DIR}/Release"
./pollard_rho > "${benchmark_dir}/$(get_result_file_name)"
}
LIMB_SIZE_IN_BITS="8 16 32 64"
FULL_LIMB_PRECISION="0 1"
SIMD_PARALLEL_WALKS="0 1"
BENCHMARK_USE_GMP="0 1"
for limb_size_in_bits in ${LIMB_SIZE_IN_BITS}; do
for full_limb_precision in ${FULL_LIMB_PRECISION}; do
for simd_parallel_walks in ${SIMD_PARALLEL_WALKS}; do
for benchmark_use_gmp in ${BENCHMARK_USE_GMP}; do
if [ ${limb_size_in_bits} -eq 64 ] && [ ${simd_parallel_walks} -eq 0 ]; then
mulx="1"
else
mulx="0"
fi
for benchmark_prime_field_arithmetic in 0 1; do
if [ ${benchmark_prime_field_arithmetic} -eq 0 ]; then
benchmark_elliptic_curve_arithmetic=1
else
benchmark_elliptic_curve_arithmetic=0
fi
overwrite_file
compile
benchmark
done
done
done
done
done
|
sahandKashani/prime-field-arithmetic-AVX2
|
benchmark.sh
|
Shell
|
unlicense
| 2,605 |
#!/bin/bash
DIRNAME=$(dirname $0)
DATE_INTERVAL=1
CPU_INTERVAL=1
RED="^fg(red)"
GREEN="^fg(green)"
ORANGE="^fg(orange)"
NORMAL="^fg(#ffffff)"
ICON_DIR=${DIRNAME}/../icons
function dateformat
{
date "+%A %d.%m.%Y %H:%M:%S"
}
function cputemp
{
TEMP=$1
TEMP_NUM=$(echo $TEMP | sed -e 's/°C//g')
if [ `echo "${TEMP_NUM} >= 60" | bc` ]
then
COLOR="${RED}"
elif [ `echo "${TEMP_NUM} >= 45" | bc` ]
then
COLOR="${ORANGE}"
else
COLOR="${GREEN}"
fi
echo "${COLOR}${TEMP}${NORMAL}"
}
function print_status
{
if [ ! $DATE_CPT ]
then
export DATE_CPT=${DATE_INTERVAL}
else
export DATE_CPT=$DATE_CPT
fi
if [ ! ${CPU_CPT} ]
then
export CPU_CPT=${CPU_INTERVAL}
else
export CPU_CPT=${CPU_CPT}
fi
if [ $DATE_CPT -ge $DATE_INTERVAL ]
then
PDATE=$(dateformat)
DATE_CPT=0
fi
if [ $CPU_CPT -ge $CPU_INTERVAL ]
then
TEMP=$(sensors | grep ^temp | awk '{print $2}' | tr -d "+")
TEMP_1=$(echo -e "$TEMP" | head -1)
TEMP_2=$(echo -e "$TEMP" | tail -1)
PCPU="$(cputemp ${TEMP_1})/$(cputemp ${TEMP_2})"
CPU_CPT=0
fi
#echo "^fg($GREEN)${PCPU} ^fg($NORMAL) ^p(3)^r(3x3)^p(3) $PDATE"
echo "${PCPU}$NORMAL ^i(${ICON_DIR}/separator.xbm) $PDATE"
DATE_CPT=$((${DATE_CPT}+1))
CPU_CPT=$((${CPU_CPT}+1))
}
|
agoulamhoussen/dotFiles
|
home/.xmonad/scripts/status.sh
|
Shell
|
unlicense
| 1,249 |
source ~/stackrc
time openstack overcloud deploy --templates \
-r ~/custom-templates/custom-roles-mixed.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/puppet-pacemaker.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml \
-e ~/custom-templates/network-mixed.yaml \
-e ~/custom-templates/ceph-mixed.yaml \
-e ~/custom-templates/compute-mixed.yaml \
-e ~/custom-templates/layout-mixed.yaml
|
RHsyseng/hci
|
other-scenarios/mixed-nodes/deploy-mixed.sh
|
Shell
|
apache-2.0
| 533 |
#!/bin/bash -e
# Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
image_name=gcr.io/ml-pipeline/sample/keras/train_classifier
image_tag=latest
full_image_name=${image_name}:${image_tag}
base_image_tag=1.12.0-py3
cd "$(dirname "$0")"
docker build --build-arg BASE_IMAGE_TAG=$base_image_tag -t "$full_image_name" .
docker push "$full_image_name"
#Output the strict image name (which contains the sha256 image digest)
#This name can be used by the subsequent steps to refer to the exact image that was built even if another image with the same name was pushed.
image_name_with_digest=$(docker inspect --format="{{index .RepoDigests 0}}" "$IMAGE_NAME")
strict_image_name_output_file=./versions/image_digests_for_tags/$image_tag
mkdir -p "$(dirname "$strict_image_name_output_file")"
echo $image_name_with_digest | tee "$strict_image_name_output_file"
|
kubeflow/pipelines
|
components/contrib/sample/keras/train_classifier/build_image.sh
|
Shell
|
apache-2.0
| 1,385 |
#!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ROOT="$( cd $DIR && cd .. && pwd)"
LIB="$ROOT"/lib
RESOURCES="$( cd $ROOT && cd .. && pwd)"/vua-resources
# assumes vua-resources is installed next to this installation
# git clone https://github.com/cltl/vua-resources.git
#
# the software assume that the input files are in NAF format minially with the following layers:
# - tokens, terms, entities, coreference for events, srl, timeExpressions
# to create an event coreference layer, use the event-coreference scripts
#DUTCH
ontology ="$RESOURCES"/dbpedia_nl_types.tsv.gz
#ENGLISH
ontology ="$RESOURCES"/instance_types_en.ttl.gz
java -Xmx2000m -cp "$LIB/EventCoreference-v3.1.2-jar-with-dependencies.jar" eu.newsreader.eventcoreference.naf.GetSimpleSemFromNafFolder --naf-folder $1 --extension ".naf" --ontology $ontology
|
cltl/EventCoreference
|
scripts/naf-folder-to-simple-trig.sh
|
Shell
|
apache-2.0
| 852 |
#!/usr/bin/env bash
apt-get update
apt-get upgrade
mkdir /etc/redis
mkdir /var/redis
mkdir /var/redis/6379
cd
wget http://download.redis.io/redis-stable.tar.gz
tar xvzf redis-stable.tar.gz
cd redis-stable
make
make install
cp utils/redis_init_script /etc/init.d/redis_6379
cp /vagrant/redis.conf /etc/redis/6379.conf
update-rc.d redis_6379 defaults
/etc/init.d/redis_6379 start
|
everlution/redlock
|
tests/vagrant/bootstrap.sh
|
Shell
|
apache-2.0
| 382 |
#!/bin/bash
set -e
# Builds or removes Go plugin object code.
#
# Specify plugin root as first arg, e.g.
#
# ./hack/buildExternalGoPlugins.sh $KUSTOMIZE_PLUGIN_HOME
# ./hack/buildExternalGoPlugins.sh $XDG_CONFIG_HOME/kustomize/plugin
# ./hack/buildExternalGoPlugins.sh ${HOME}/.config/kustomize/plugin
# ./hack/buildExternalGoPlugins.sh ./plugin
#
# add 2nd arg "clean" to remove instead of build.
root=$1
if [ ! -d $root ]; then
echo "Don't see directory $root."
exit 1
fi
fn=buildPlugin
if [ "$2" == "clean" ]; then
fn=removePlugin
fi
function buildPlugin {
echo "Building $1/$2.so"
# Change dir so local go.mod applies
pushd $1 >& /dev/null
go build -buildmode plugin -o $2.so $2.go
popd >& /dev/null
}
function removePlugin {
local f="$1/$2.so"
if [ -f "$f" ]; then
echo "Removing $f"
rm "$f"
fi
}
goPlugins=$(
find $root -name "*.go" |
grep -v builtin/ |
xargs grep -l "var KustomizePlugin")
for p in $goPlugins; do
d=$(dirname "$p")
n=$(basename "$p" | cut -f 1 -d '.')
$fn $d $n
done
|
kubernetes-sigs/kustomize
|
hack/buildExternalGoPlugins.sh
|
Shell
|
apache-2.0
| 1,048 |
# -----------------------------------------------------------------------------
#
# Package : github.com/storageos/go-api
# Version : v0.0.0-20180912212459-343b3eff91fc
# Source repo : https://github.com/storageos/go-api
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=github.com/storageos/go-api
PACKAGE_VERSION=v0.0.0-20180912212459-343b3eff91fc
PACKAGE_URL=https://github.com/storageos/go-api
yum -y update && yum install -y nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git jq wget gcc-c++
wget https://golang.org/dl/go1.16.1.linux-ppc64le.tar.gz && tar -C /bin -xf go1.16.1.linux-ppc64le.tar.gz && mkdir -p /home/tester/go/src /home/tester/go/bin /home/tester/go/pkg
export PATH=$PATH:/bin/go/bin
export GOPATH=/home/tester/go
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
export PATH=$GOPATH/bin:$PATH
export GO111MODULE=on
function test_with_master_without_flag_u(){
echo "Building $PACKAGE_PATH with master branch"
export GO111MODULE=auto
if ! go get -d -t $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/install_fails
echo "$PACKAGE_NAME | $PACKAGE_VERSION | master | $OS_NAME | GitHub | Fail | Install_Fails" > /home/tester/output/version_tracker
exit 0
else
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*)
echo "Testing $PACKAGE_PATH with master branch without flag -u"
# Ensure go.mod file exists
go mod init
if ! gi test ./...; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_fails
echo "$PACKAGE_NAME | $PACKAGE_VERSION | master | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails" > /home/tester/output/version_tracker
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | master | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker
exit 0
fi
fi
}
function test_with_master(){
echo "Building $PACKAGE_PATH with master"
export GO111MODULE=auto
if ! go get -d -u -t $PACKAGE_NAME@$PACKAGE_VERSION; then
test_with_master_without_flag_u
exit 0
fi
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*)
echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION"
# Ensure go.mod file exists
go mod init
if ! go test ./...; then
test_with_master_without_flag_u
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker
exit 0
fi
}
function test_without_flag_u(){
echo "Building $PACKAGE_PATH with $PACKAGE_VERSION and without -u flag"
if ! go get -d -t $PACKAGE_NAME@$PACKAGE_VERSION; then
test_with_master
exit 0
fi
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*)
echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION"
# Ensure go.mod file exists
go mod init
if ! go test ./...; then
test_with_master
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker
exit 0
fi
}
echo "Building $PACKAGE_PATH with $PACKAGE_VERSION"
if ! go get -d -u -t $PACKAGE_NAME@$PACKAGE_VERSION; then
test_without_flag_u
exit 0
fi
cd $(ls -d $GOPATH/pkg/mod/$PACKAGE_NAME*)
echo "Testing $PACKAGE_PATH with $PACKAGE_VERSION"
# Ensure go.mod file exists
go mod init
if ! go test ./...; then
test_with_master
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_VERSION $PACKAGE_NAME" > /home/tester/output/test_success
echo "$PACKAGE_NAME | $PACKAGE_VERSION | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success" > /home/tester/output/version_tracker
exit 0
fi
|
ppc64le/build-scripts
|
g/github.com__storageos__go-api/github.com__storageos__go-api_rhel_8.3.sh
|
Shell
|
apache-2.0
| 5,121 |
#!/bin/bash
# Get Project Repo
aspectran_repo=$(git config --get remote.origin.url 2>&1)
echo "Repo detected: ${aspectran_repo}"
# Get Commit Message
commit_message=$(git log --format=%B -n 1)
echo "Current commit detected: ${commit_message}"
# Get the Java version.
# Java 1.5 will give 15.
# Java 1.6 will give 16.
# Java 1.7 will give 17.
# Java 1.8 will give 18.
VER=`java -version 2>&1 | sed 's/java version "\(.*\)\.\(.*\)\..*"/\1\2/; 1q'`
echo "Java detected: ${VER}"
# We build for several JDKs on Travis.
# Some actions, like analyzing the code (Coveralls) and uploading
# artifacts on a Maven repository, should only be made for one version.
if [ "$aspectran_repo" == "https://github.com/aspectran/aspectran-parent.git" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_BRANCH" == "master" ] && [[ "$commit_message" != *"[maven-release-plugin]"* ]]; then
if [ $VER == "18" ]; then
mvn clean deploy -q --settings ./travis/settings.xml
echo -e "Successfully deployed SNAPSHOT artifacts to Sonatype under Travis job ${TRAVIS_JOB_NUMBER}"
fi
else
echo "Travis build skipped"
fi
|
aspectran/aspectran-parent
|
travis/after_success.sh
|
Shell
|
apache-2.0
| 1,110 |
#!/bin/bash -x
# -------------------------------------------------------------------------- #
# Copyright 2011-2012, Research In Motion (rim.com) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
LOGFILE=/tmp/hbasemaster.log
if [ -f /mnt/context.sh ]; then
. /mnt/context.sh
HOME=/home/$DEFUSER
else
. $HOME/context.sh
fi
setupmaster () {
echo "Starting HBase master install " >> $LOGFILE
wget http://$CARINA_IP/cgi-bin/updateappstatus.sh?service=$SERVICE_NAME\&vmid=$VMID\&envid=$ENVID\&status=APP_INSTALL_START 2> /dev/null
yes | apt-get install curl
curl -s http://archive.cloudera.com/debian/archive.key | sudo apt-key add -
# Install Java
cd /usr/local/share
wget http://$CARINA_IP/cgi-bin/updateappstatus.sh?service=$SERVICE_NAME\&vmid=$VMID\&envid=$ENVID\&status=JDK_INSTALL_START 2> /dev/null
wget http://$CARINA_IP/downloads/jdk-6u27-linux-x64.bin
chmod +x ./jdk-6u27-linux-x64.bin
yes | ./jdk-6u27-linux-x64.bin
cat >> /etc/apt/sources.list.d/cloudera.list << EOF
deb http://archive.cloudera.com/debian maverick-cdh3 contrib
deb-src http://archive.cloudera.com/debian maverick-cdh3 contrib
EOF
wget http://$CARINA_IP/cgi-bin/updateappstatus.sh?service=$SERVICE_NAME\&vmid=$VMID\&envid=$ENVID\&status=HBASE_INSTALL_START 2> /dev/null
apt-get update
yes | apt-get install hadoop-0.20-namenode
yes | apt-get install hadoop-0.20-datanode
yes | apt-get install hadoop-0.20-secondarynamenode
yes | apt-get install hadoop-hbase-master
yes | apt-get install hadoop-hbase-regionserver
cat >> /etc/default/hadoop-hbase << EOF
export JAVA_HOME=/usr/local/share/jdk1.6.0_27/
export PATH=$JAVA_HOME/bin:$PATH
EOF
wget http://$CARINA_IP/cgi-bin/updateappstatus.sh?service=$SERVICE_NAME\&vmid=$VMID\&envid=$ENVID\&status=HBASE_CONFIG_START 2> /dev/null
mkdir -p /var/lib/hadoop-0.20/cache/hadoop/dfs/name
cd /var/lib
chgrp -R hadoop hadoop-0.20
cd /var/lib/hadoop-0.20/cache/hadoop/
chown -R hdfs dfs
mkdir -p /var/lib/hadoop-0.20/cache/hdfs/dfs/data
chgrp -R hdfs /var/lib/hadoop-0.20/cache/hdfs/
chown -R hdfs /var/lib/hadoop-0.20/cache/hdfs/
mkdir -p /var/lib/hadoop-0.20/cache/hdfs/dfs/namesecondary
cd /var/lib/hadoop-0.20/cache
chown -R hdfs hdfs
chgrp -R hdfs hdfs
cd $HADOOP_CONF
mv core-site.xml core-site.xml.bak
mv hdfs-site.xml hdfs-site.xml.bak
wget http://$CARINA_IP/downloads/core-site.xml
wget http://$CARINA_IP/downloads/hdfs-site.xml
sed -i -e "s/%HBASE_MASTER_NAME/$MASTER/g" core-site.xml
cd $HBASE_CONF
mv hbase-site.xml hbase-site.xml.bak
wget http://$CARINA_IP/downloads/hbase-site.xml
sed -i -e "s/%HBASE_MASTER_NAME/$MASTER/g" hbase-site.xml
# To allow slaves to download my config files (assume web server installed)
DOWNLOAD_DIR=/var/www/downloads
mkdir -p $DOWNLOAD_DIR
ln -s $HADOOP_CONF/core-site.xml $DOWNLOAD_DIR/core-site.xml
ln -s $HADOOP_CONF/hdfs-site.xml $DOWNLOAD_DIR/hdfs-site.xml
ln -s $HBASE_CONF/hbase-site.xml $DOWNLOAD_DIR/hbase-site.xml
echo $MASTER > /usr/lib/hadoop-0.20/conf/masters
echo $MASTER > /usr/lib/hadoop-0.20/conf/slaves
echo $MASTER > /usr/lib/hbase/conf/regionservers
# Have to make slaves file writeable by default VM admin ($DEFUSER)
# so slaves can be added at run time
chmod 0666 /usr/lib/hadoop-0.20/conf/slaves
chmod 0666 /usr/lib/hbase/conf/regionservers
wget http://$CARINA_IP/cgi-bin/updateappstatus.sh?service=$SERVICE_NAME\&vmid=$VMID\&envid=$ENVID\&status=HBASE_DBFORMAT_START 2> /dev/null
# Format the namenode
/bin/su hdfs -c "echo Y | /usr/lib/hadoop/bin/hadoop namenode -format"
# Bring up the HDFS layer
wget http://$CARINA_IP/cgi-bin/updateappstatus.sh?service=$SERVICE_NAME\&vmid=$VMID\&envid=$ENVID\&status=HADOOP_SVCS_START 2> /dev/null
/usr/lib/zookeeper/bin/zkServer.sh start
/etc/init.d/hadoop-0.20-datanode start
/etc/init.d/hadoop-0.20-namenode start
/etc/init.d/hadoop-0.20-secondarynamenode start
# A little delay to make sure HDFS comes up ok
sleep 30
# Set up directories/permissiosn for HBase
/bin/su hdfs -c "/usr/lib/hadoop/bin/hadoop fs -mkdir /hbase"
/bin/su hdfs -c "/usr/lib/hadoop/bin/hadoop fs -chown hbase /hbase"
wget http://$CARINA_IP/cgi-bin/updateappstatus.sh?service=$SERVICE_NAME\&vmid=$VMID\&envid=$ENVID\&status=HBASE_SVCS_START 2> /dev/null
/etc/init.d/hadoop-hbase-regionserver start
/etc/init.d/hadoop-hbase-master start
# Copy self to allow it to be invoked later to add/remove IPs via ssh
cp /mnt/context.sh /home/$DEFUSER/context.sh
cp /mnt/hbasemaster.sh /home/$DEFUSER/hbasemaster.sh
wget http://$CARINA_IP/cgi-bin/updateappstatus.sh?service=$SERVICE_NAME\&vmid=$VMID\&envid=$ENVID\&status=MASTER_INIT_DONE 2> /dev/null
}
add_slave() {
echo $SLAVE_IP >> $HADOOP_CONF/slaves
echo $SLAVE_IP >> $HBASE_CONF/regionservers
}
delete_slave() {
echo "Delete slave"
# Tell HDFS to exclude the node
echo $1 >> $HADOOP_CONF/excludes
hadoop dfsadmin -refershNodes
# Wait for node to be decomissioned
cnt=10
while [ $cnt -ge 0 ]; do
STATUS=`hadoop dfsadmin -report | grep -A 1 $1 | grep Status | awk '{print $4}'`
if [[ $STATUS != "Decomissioned" ]]; then
echo "Waiting for node to be decomissioned"
sleep 30
else
break
fi
cnt=`expr $cnt - 1`
done
# Remove the slave from the excludes, slaves and regionservers file
grep -Ev "$1" $HADOOP_CONF/excludes > $HADOOP_CONF/excludes.new
mv $HADOOP_CONF/excludes.new $HADOOP_CONF/excludes
grep -Ev "$1" $HADOOP_CONF/slaves > $HADOOP_CONF/slaves.new
mv $HADOOP_CONF/slaves.new $HADOOP_CONF/slaves
grep -Ev "$1" $HBASE_CONF/regionservers > $HBASE_CONF/regionservers.new
mv $HBASE_CONF/regionservers.new $HBASE_CONF/regionservers
# Refersh the nodes
hadoop dfsadmin -refershNodes
}
MASTER=`hostname`
HADOOP_CONF=/usr/lib/hadoop-0.20/conf
HBASE_CONF=/usr/lib/hbase/conf
OPER=$1
if [[ $OPER == "init" || $OPER == "" ]]; then
setupmaster
fi
if [[ $OPER == "add" ]]; then
SLAVE_IP=$2
add_slave
fi
if [[ $OPER == "delete" ]]; then
SLAVE_IP=$3
delete_slave
fi
|
blackberry/OpenNebula-Carina
|
context/hbasemaster.sh
|
Shell
|
apache-2.0
| 7,141 |
#!/bin/bash
#
# Options
# -g: Use global(Public) IP in network communication. Its value can be true or false. Default value is false.
#
# -s: Use Public IP as server name. Its value can be true or false. Default value is false.
#
# -r: Replace candidate address with server name. Its value can be true or false. Default value is false
#
# -m: Server mode. It can be standalone or cluster. If cluster mode is specified then mongodb host, username and password should also be provided.
# There is no default value for mode
#
# -h: MongoDB host
#
# -u: MongoDB username
#
# -p: MongoDB password
if [ -z "$RED5_HOME" ]; then
BASEDIR=$(dirname "$0")
cd $BASEDIR
export RED5_HOME=`pwd`
fi
source $RED5_HOME/conf/functions.sh
USE_GLOBAL_IP=false
USE_PUBLIC_IP_AS_SERVER_NAME=false
REPLACE_CANDIDATE_ADDRESS_WITH_SERVER_NAME=false
SERVER_MODE=
MONGODB_HOST=
MONGODB_USERNAME=
MONGODB_PASSWORD=
while getopts g:s:r:m:h:u:p:t option
do
case "${option}" in
g) USE_GLOBAL_IP=${OPTARG};;
s) USE_PUBLIC_IP_AS_SERVER_NAME=${OPTARG};;
r) REPLACE_CANDIDATE_ADDRESS_WITH_SERVER_NAME=${OPTARG};;
m) SERVER_MODE=${OPTARG};;
h) MONGODB_HOST=${OPTARG};;
u) MONGODB_USERNAME=${OPTARG};;
p) MONGODB_PASSWORD=${OPTARG};;
esac
done
OS_NAME=`uname`
if [ "$OS_NAME" = "Darwin" ]; then
AMS_INSTALL_LOCATION=`pwd`
SED_COMPATIBILITY='.bak'
fi
# Set use global IP
sed -i $SED_COMPATIBILITY 's/useGlobalIp=.*/useGlobalIp='$USE_GLOBAL_IP'/' $RED5_HOME/conf/red5.properties
################################################
# Set server name
SERVER_ADDRESS=
if [ "$USE_PUBLIC_IP_AS_SERVER_NAME" = "true" ]; then
# get server public ip address
SERVER_ADDRESS=`curl -s http://checkip.amazonaws.com`
fi
sed -i $SED_COMPATIBILITY 's/server.name=.*/server.name='$SERVER_ADDRESS'/' $RED5_HOME/conf/red5.properties
################################################
################################################
# Replace candidate with Server address property
replaceCandidateAddressWithServer() {
# first parameter is the properties file of the application
# second parameter is the value of the property
if [ $(grep -E "settings.replaceCandidateAddrWithServerAddr" $1 | wc -l) -eq "0" ]; then
echo " " >> $1 #add new line
echo "settings.replaceCandidateAddrWithServerAddr=$2" >> $1
else
sed -i $SED_COMPATIBILITY 's/settings.replaceCandidateAddrWithServerAddr=.*/settings.replaceCandidateAddrWithServerAddr='$2'/' $1
fi
}
LIST_APPS=`ls -d $RED5_HOME/webapps/*/`
for i in $LIST_APPS; do
replaceCandidateAddressWithServer $i/WEB-INF/red5-web.properties $REPLACE_CANDIDATE_ADDRESS_WITH_SERVER_NAME
done
################################################
################################################
# Set server mode cluster or standalone. Below method is available is functions.sh
if [ ! -z "${SERVER_MODE}" ]; then
change_server_mode $SERVER_MODE $MONGODB_HOST $MONGODB_USERNAME $MONGODB_PASSWORD
fi
################################################
P=":" # The default classpath separator
OS=`uname`
case "$OS" in
CYGWIN*|MINGW*) # Windows Cygwin or Windows MinGW
P=";" # Since these are actually Windows, let Java know
;;
Linux*)
ARCH=`uname -m`
LD_LIBRARY_PATH=$RED5_HOME/lib/native-linux-$ARCH
export LD_LIBRARY_PATH
# Native path
# First arch parameter is running start.sh directly and second lib/native parameter is installation for init.d scripts
NATIVE="-Djava.library.path=$LD_LIBRARY_PATH:$RED5_HOME/lib/native"
;;
Darwin*)
DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH:$RED5_HOME/lib/native-mac
export DYLD_LIBRARY_PATH
# Native path
NATIVE="-Djava.library.path=$DYLD_LIBRARY_PATH:$RED5_HOME/lib/native"
;;
SunOS*)
if [ -z "$JAVA_HOME" ]; then
export JAVA_HOME=/opt/local/java/sun6;
fi
;;
*)
# Do nothing
;;
esac
echo "Running on " $OS
# JAVA options
# You can set JVM additional options here if you want
if [ -z "$JVM_OPTS" ]; then
JVM_OPTS="-Xms256m -Djava.awt.headless=true -Xverify:none -XX:+HeapDumpOnOutOfMemoryError -XX:+TieredCompilation -XX:+UseBiasedLocking -XX:InitialCodeCacheSize=8m -XX:ReservedCodeCacheSize=32m -Dorg.terracotta.quartz.skipUpdateCheck=true -XX:MaxMetaspaceSize=128m -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -XX:ParallelGCThreads=10 -XX:ConcGCThreads=5 -Djava.system.class.loader=org.red5.server.classloading.ServerClassLoader -Xshare:off "
fi
# Set up security options
SECURITY_OPTS="-Djava.security.debug=failure -Djava.security.egd=file:/dev/./urandom"
# Set up tomcat options
TOMCAT_OPTS="-Dcatalina.home=$RED5_HOME -Dcatalina.useNaming=true -Djava.net.preferIPv4Stack=true"
# Jython options
JYTHON="-Dpython.home=lib"
export JAVA_OPTS="$SECURITY_OPTS $JAVA_OPTS $JVM_OPTS $TOMCAT_OPTS $NATIVE $JYTHON"
if [ -z "$RED5_MAINCLASS" ]; then
export RED5_MAINCLASS=org.red5.server.Bootstrap
fi
if [ -z "$RED5_OPTS" ]; then
export RED5_OPTS=9999
fi
for JAVA in "${JAVA_HOME}/bin/java" "${JAVA_HOME}/Home/bin/java" "/usr/bin/java" "/usr/local/bin/java"
do
if [ -x "$JAVA" ]
then
break
fi
done
if [ ! -x "$JAVA" ]
then
echo "Unable to locate Java. Please set JAVA_HOME environment variable."
exit
fi
export RED5_CLASSPATH="${RED5_HOME}/ant-media-server-service.jar${P}${RED5_HOME}/conf${P}${CLASSPATH}"
# create log directory if not exist
if [ ! -d "/var/log/antmedia" ]
then
mkdir /var/log/antmedia
fi
#create soft link if not exists
if [ ! -L "${RED5_HOME}/log" ]
then
ln -sf /var/log/antmedia ${RED5_HOME}/log
fi
# start Ant Media Server
if [ "$RED5_MAINCLASS" = "org.red5.server.Bootstrap" ]; then
# start Ant Media Server
echo "Starting Ant Media Server"
elif [ "$RED5_MAINCLASS" = "org.red5.server.Shutdown" ]; then
# stop Ant Media Server
echo "Stopping Ant Media Server"
fi
exec "$JAVA" -Dred5.root="${RED5_HOME}" $JAVA_OPTS -cp "${RED5_CLASSPATH}" "$RED5_MAINCLASS" $RED5_OPTS 2>>${RED5_HOME}/log/antmedia-error.log
|
ant-media/Ant-Media-Server
|
src/main/server/start.sh
|
Shell
|
apache-2.0
| 5,975 |
# -----------------------------------------------------------------------------
#
# Package : mime-db
# Version : 1.45.0
# Source repo : https://github.com/jshttp/mime-db
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=mime-db
PACKAGE_VERSION=1.45.0
PACKAGE_URL=https://github.com/jshttp/mime-db
yum -y update && yum install -y yum-utils nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git gcc gcc-c++ libffi libffi-devel ncurses git jq make cmake
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/appstream/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/baseos/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/7Server/ppc64le/optional/
yum install -y firefox liberation-fonts xdg-utils && npm install n -g && n latest && npm install -g npm@latest && export PATH="$PATH" && npm install --global yarn grunt-bump xo testem acorn
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME" > /home/tester/output/clone_fails
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails" > /home/tester/output/version_tracker
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
PACKAGE_VERSION=$(jq -r ".version" package.json)
# run the test command from test.sh
if ! npm install && npm audit fix && npm audit fix --force; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! npm test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
m/mime-db/mime-db_rhel_8.3.sh
|
Shell
|
apache-2.0
| 3,046 |
#!/bin/sh
usage() {
echo <<DELIM "Echo public IP of a running service.
Usage:
$0 {instance}.{service}.{group} [nameserver]
Example:
$0 2.web.gulaghypercloud"
DELIM
}
[[ $# -lt 1 ]] && usage && exit
[[ ${1} == '-h' ]] && usage && exit
INSTANCE=${1} # Instance must be specified, in the form '<instance>.<service>.<group>'.
NAMESERVER=${2+'@'$2} # Use default nameserver if not specified.
dig ${NAMESERVER} ${INSTANCE}.container +short A
|
daprlabs/daprdockr
|
util/get-ip.sh
|
Shell
|
apache-2.0
| 441 |
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
wget -P value_dice/datasets/ https://storage.googleapis.com/gresearch/value_dice/datasets/Ant-v2.npz
wget -P value_dice/datasets/ https://storage.googleapis.com/gresearch/value_dice/datasets/HalfCheetah-v2.npz
wget -P value_dice/datasets/ https://storage.googleapis.com/gresearch/value_dice/datasets/Hopper-v2.npz
wget -P value_dice/datasets/ https://storage.googleapis.com/gresearch/value_dice/datasets/Walker2d-v2.npz
declare -a env_names=("HalfCheetah-v2" "Hopper-v2" "Walker2d-v2" "Ant-v2")
declare -a algos=("bc" "dac" "value_dice")
expert_dir="./datasets/"
save_dir="./save/"
for algo in "${algos[@]}"
do
for env_name in "${env_names[@]}"
do
for ((seed=0;seed<10;seed+=1))
do
python -m value_dice.train_eval \
--expert_dir $expert_dir \
--save_dir $save_dir \
--algo $algo \
--env_name $env_name \
--seed $seed \
--num_trajectories 1 \
--alsologtostderr
done
done
done
|
google-research/google-research
|
value_dice/run_experiments.sh
|
Shell
|
apache-2.0
| 1,566 |
cp -rf .inputrc .vim* .python* .bash* bashscripts .pylint* .tmux* .profile .gitconfig ~/
cp -rf .config/* ~/.config/
|
jyapayne/dotfiles
|
deploy.sh
|
Shell
|
apache-2.0
| 117 |
#!/bin/bash
set -e
# Copyright 2009-2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey)
# Apache 2.0.
# This is modified from the script in standard Kaldi recipe to account
# for the way the WSJ data is structured on the Edinburgh systems.
# - Arnab Ghoshal, 29/05/12
if [ $# -ne 1 ]; then
printf "\nUSAGE: %s <corpus-directory>\n\n" `basename $0`
echo "The argument should be a the top-level WSJ corpus directory."
echo "It is assumed that there will be a 'wsj0' and a 'wsj1' subdirectory"
echo "within the top-level corpus directory."
exit 1;
fi
CORPUS=$1
dir=`pwd`/data/local/data
lmdir=`pwd`/data/local/nist_lm
mkdir -p $dir $lmdir
local=`pwd`/local
utils=`pwd`/utils
. ./path.sh # Needed for KALDI_ROOT
export PATH=$PATH:$KALDI_ROOT/tools/irstlm/bin
sph2pipe=$KALDI_ROOT/tools/sph2pipe_v2.5/sph2pipe
if [ ! -x $sph2pipe ]; then
echo "Could not find (or execute) the sph2pipe program at $sph2pipe";
exit 1;
fi
cd $dir
# This version for SI-84
cat $CORPUS/wsj0/doc/indices/train/tr_s_wv1.ndx \
| $local/cstr_ndx2flist.pl $CORPUS | sort \
| grep -v wsj0/si_tr_s/401 > train_si84.flist
# This version for SI-284
cat $CORPUS/wsj1/doc/indices/si_tr_s.ndx \
$CORPUS/wsj0/doc/indices/train/tr_s_wv1.ndx \
| $local/cstr_ndx2flist.pl $CORPUS | sort \
| grep -v wsj0/si_tr_s/401 > train_si284.flist
# Now for the test sets.
# $CORPUS/wsj1/doc/indices/readme.doc
# describes all the different test sets.
# Note: each test-set seems to come in multiple versions depending
# on different vocabulary sizes, verbalized vs. non-verbalized
# pronunciations, etc. We use the largest vocab and non-verbalized
# pronunciations.
# The most normal one seems to be the "baseline 60k test set", which
# is h1_p0.
# Nov'92 (333 utts)
# These index files have a slightly different format;
# have to add .wv1, which is done in cstr_ndx2flist.pl
cat $CORPUS/wsj0/doc/indices/test/nvp/si_et_20.ndx | \
$local/cstr_ndx2flist.pl $CORPUS | sort > test_eval92.flist
# Nov'92 (330 utts, 5k vocab)
cat $CORPUS/wsj0/doc/indices/test/nvp/si_et_05.ndx | \
$local/cstr_ndx2flist.pl $CORPUS | sort > test_eval92_5k.flist
# Nov'93: (213 utts)
# Have to replace a wrong disk-id.
cat $CORPUS/wsj1/doc/indices/wsj1/eval/h1_p0.ndx | \
$local/cstr_ndx2flist.pl $CORPUS | sort > test_eval93.flist
# Nov'93: (215 utts, 5k)
cat $CORPUS/wsj1/doc/indices/wsj1/eval/h2_p0.ndx | \
$local/cstr_ndx2flist.pl $CORPUS | sort > test_eval93_5k.flist
# Dev-set for Nov'93 (503 utts)
cat $CORPUS/wsj1/doc/indices/h1_p0.ndx | \
$local/cstr_ndx2flist.pl $CORPUS | sort > test_dev93.flist
# Dev-set for Nov'93 (513 utts, 5k vocab)
cat $CORPUS/wsj1/doc/indices/h2_p0.ndx | \
$local/cstr_ndx2flist.pl $CORPUS | sort > test_dev93_5k.flist
# Dev-set Hub 1,2 (503, 913 utterances)
# Note: the ???'s below match WSJ and SI_DT, or wsj and si_dt.
# Sometimes this gets copied from the CD's with upcasing, don't know
# why (could be older versions of the disks).
find $CORPUS/???1/??_??_20 -print | grep -i ".wv1" | sort > dev_dt_20.flist
find $CORPUS/???1/??_??_05 -print | grep -i ".wv1" | sort > dev_dt_05.flist
# Finding the transcript files:
find -L $CORPUS -iname '*.dot' > dot_files.flist
# Convert the transcripts into our format (no normalization yet)
for x in train_si84 train_si284 test_eval92 test_eval93 test_dev93 test_eval92_5k test_eval93_5k test_dev93_5k dev_dt_05 dev_dt_20; do
$local/flist2scp.pl $x.flist | sort > ${x}_sph.scp
cat ${x}_sph.scp | awk '{print $1}' \
| $local/find_transcripts.pl dot_files.flist > $x.trans1
done
# Do some basic normalization steps. At this point we don't remove OOVs--
# that will be done inside the training scripts, as we'd like to make the
# data-preparation stage independent of the specific lexicon used.
noiseword="<NOISE>";
for x in train_si84 train_si284 test_eval92 test_eval93 test_dev93 test_eval92_5k test_eval93_5k test_dev93_5k dev_dt_05 dev_dt_20; do
cat $x.trans1 | $local/normalize_transcript.pl $noiseword \
| sort > $x.txt || exit 1;
done
# Create scp's with wav's. (the wv1 in the distribution is not really wav, it is sph.)
for x in train_si84 train_si284 test_eval92 test_eval93 test_dev93 test_eval92_5k test_eval93_5k test_dev93_5k dev_dt_05 dev_dt_20; do
awk '{printf("%s '$sph2pipe' -f wav %s |\n", $1, $2);}' < ${x}_sph.scp \
> ${x}_wav.scp
done
# Make the utt2spk and spk2utt files.
for x in train_si84 train_si284 test_eval92 test_eval93 test_dev93 test_eval92_5k test_eval93_5k test_dev93_5k dev_dt_05 dev_dt_20; do
cat ${x}_sph.scp | awk '{print $1}' \
| perl -ane 'chop; m:^...:; print "$_ $&\n";' > $x.utt2spk
cat $x.utt2spk | $utils/utt2spk_to_spk2utt.pl > $x.spk2utt || exit 1;
done
#in case we want to limit lm's on most frequent words, copy lm training word frequency list
cp $CORPUS/wsj1/doc/lng_modl/vocab/wfl_64.lst $lmdir
chmod u+w $lmdir/*.lst # had weird permissions on source.
# The 20K vocab, open-vocabulary language model (i.e. the one with UNK), without
# verbalized pronunciations. This is the most common test setup, I understand.
cp $CORPUS/wsj1/doc/lng_modl/base_lm/bcb20onp.z $lmdir/lm_bg.arpa.gz || exit 1;
chmod u+w $lmdir/lm_bg.arpa.gz
# trigram would be:
cat $CORPUS/wsj1/doc/lng_modl/base_lm/tcb20onp.z | \
perl -e 'while(<>){ if(m/^\\data\\/){ print; last; } } while(<>){ print; }' \
| gzip -c -f > $lmdir/lm_tg.arpa.gz || exit 1;
prune-lm --threshold=1e-7 $lmdir/lm_tg.arpa.gz $lmdir/lm_tgpr.arpa || exit 1;
gzip -f $lmdir/lm_tgpr.arpa || exit 1;
# repeat for 5k language models
cp $CORPUS/wsj1/doc/lng_modl/base_lm/bcb05onp.z $lmdir/lm_bg_5k.arpa.gz || exit 1;
chmod u+w $lmdir/lm_bg_5k.arpa.gz
# trigram would be: !only closed vocabulary here!
cp $CORPUS/wsj1/doc/lng_modl/base_lm/tcb05cnp.z $lmdir/lm_tg_5k.arpa.gz || exit 1;
chmod u+w $lmdir/lm_tg_5k.arpa.gz
gunzip $lmdir/lm_tg_5k.arpa.gz
tail -n 4328839 $lmdir/lm_tg_5k.arpa | gzip -c -f > $lmdir/lm_tg_5k.arpa.gz
rm $lmdir/lm_tg_5k.arpa
prune-lm --threshold=1e-7 $lmdir/lm_tg_5k.arpa.gz $lmdir/lm_tgpr_5k.arpa || exit 1;
gzip -f $lmdir/lm_tgpr_5k.arpa || exit 1;
if [ ! -f wsj0-train-spkrinfo.txt ] || [ `cat wsj0-train-spkrinfo.txt | wc -l` -ne 134 ]; then
rm -f wsj0-train-spkrinfo.txt
wget http://www.ldc.upenn.edu/Catalog/docs/LDC93S6A/wsj0-train-spkrinfo.txt \
|| ( echo "Getting wsj0-train-spkrinfo.txt from backup location" && \
wget --no-check-certificate https://sourceforge.net/projects/kaldi/files/wsj0-train-spkrinfo.txt );
fi
if [ ! -f wsj0-train-spkrinfo.txt ]; then
echo "Could not get the spkrinfo.txt file from LDC website (moved)?"
echo "This is possibly omitted from the training disks; couldn't find it."
echo "Everything else may have worked; we just may be missing gender info"
echo "which is only needed for VTLN-related diagnostics anyway."
exit 1
fi
# Note: wsj0-train-spkrinfo.txt doesn't seem to be on the disks but the
# LDC put it on the web. Perhaps it was accidentally omitted from the
# disks.
cat $CORPUS/wsj0/doc/spkrinfo.txt \
$CORPUS/wsj1/doc/evl_spok/spkrinfo.txt \
$CORPUS/wsj1/doc/dev_spok/spkrinfo.txt \
$CORPUS/wsj1/doc/train/spkrinfo.txt \
./wsj0-train-spkrinfo.txt | \
perl -ane 'tr/A-Z/a-z/; m/^;/ || print;' | \
awk '{print $1, $2}' | grep -v -- -- | sort | uniq > spk2gender
echo "Data preparation succeeded"
|
AlexHung780312/aurora4_egs
|
s5/local/cstr_wsj_data_prep.sh
|
Shell
|
apache-2.0
| 7,359 |
#!/usr/bin/env bash
# Copyright © 2014-2016 Cask Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# Build script for docs
source ../_common/common-build.sh
DEFAULT_XML="../../cdap-common/src/main/resources/cdap-default.xml"
DEFAULT_XML_MD5_HASH="7e3228aefe286ca3f9627c75df0947c0"
DEFAULT_TOOL="../tools/doc-cdap-default.py"
DEFAULT_RST="cdap-default-table.rst"
CHECK_INCLUDES=${TRUE}
function rewrite_references_sed() {
local source_rst=${1}
local target_rst=${2}
local source_pattern=${3}
local target_pattern=${4}
sed -e "s|${source_pattern}|${target_pattern}|g" ${source_rst} > ${target_rst}
echo "Copied file ${source_rst} changing '${source_pattern}' to '${target_pattern}'"
}
function download_includes() {
local target_includes_dir=${1}
echo_red_bold "Check guarded files for changes."
test_an_include "${DEFAULT_XML_MD5_HASH}" "${DEFAULT_XML}"
echo "Building rst file from cdap-default.xml..."
python "${DEFAULT_TOOL}" --generate --target "${target_includes_dir}/${DEFAULT_RST}"
echo "Copying files, changing references..."
local source_rst="${target_includes_dir}/../../source/_includes/installation"
local pattern="\|distribution\|"
local distributions="cloudera ambari mapr packages"
local types="installation configuration starting"
for dist in ${distributions}; do
for type in ${types}; do
rewrite_references_sed "${source_rst}/${type}.txt" "${target_includes_dir}/${dist}-${type}.rst" "${pattern}" "${dist}"
done
echo
done
}
run_command ${1}
|
chtyim/cdap
|
cdap-docs/admin-manual/build.sh
|
Shell
|
apache-2.0
| 2,036 |
pkg_name=kubectl
pkg_origin=core
pkg_description="kubectl CLI tool"
pkg_upstream_url=https://github.com/kubernetes/kubernetes
pkg_license=('Apache-2.0')
pkg_maintainer="The Habitat Maintainers <[email protected]>"
pkg_version=1.9.6
pkg_source=https://github.com/kubernetes/kubernetes/archive/v${pkg_version}.tar.gz
pkg_shasum=d0c0edba4410426bf9b7b8f9f0efd0d59816cdeaef25fc99311a90ece843c1d3
pkg_dirname="kubernetes-${pkg_version}"
pkg_bin_dirs=(bin)
pkg_build_deps=(
lilian/git
lilian/make
lilian/gcc
lilian/go
lilian/diffutils
lilian/which
lilian/rsync
)
pkg_deps=(
core/glibc
)
do_build() {
make kubectl
return $?
}
do_install() {
cp _output/bin/kubectl "${pkg_prefix}/bin/"
return $?
}
|
be-plans/be
|
kubectl/plan.sh
|
Shell
|
apache-2.0
| 718 |
# coding=utf-8
# Copyright 2019 The Google UDA Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
task_name=cifar10
# preprocess supervised data
python2 preprocess.py \
--data_type=sup \
--sup_size=250 \
--dev_size=50 \
--task_name=${task_name} \
--raw_data_dir=data/raw_data/${task_name} \
--output_base_dir=data/proc_data/${task_name}
|
google-research/uda
|
image/scripts/preprocess_with_dev.sh
|
Shell
|
apache-2.0
| 872 |
#!/bin/bash
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/setup_env.sh"
# Single Edge TPU examples.
# Download data set for last layer backprop example.
if [ ! -d "/tmp/retrain/flower_photos/" ]
then
mkdir -p /tmp/retrain
curl http://download.tensorflow.org/example_images/flower_photos.tgz \
| tar xz -C /tmp/retrain
mogrify -format bmp /tmp/retrain/flower_photos/*/*.jpg
fi
run_env "${CPP_EXAMPLES_DIR}/backprop_last_layer" \
--embedding_extractor_path="${TEST_DATA_DIR}/mobilenet_v1_1.0_224_quant_embedding_extractor_edgetpu.tflite"
run_env "${CPP_EXAMPLES_DIR}/two_models_one_tpu" \
"${TEST_DATA_DIR}/mobilenet_v2_1.0_224_inat_bird_quant_edgetpu.tflite" \
"${TEST_DATA_DIR}/mobilenet_v2_1.0_224_inat_plant_quant_edgetpu.tflite" \
"${TEST_DATA_DIR}/bird.bmp" \
"${TEST_DATA_DIR}/sunflower.bmp"
# Multiple Edge TPU examples.
while [[ $(count_edgetpus) -lt 2 ]]; do
echo "You need at least two Edge TPU devices plugged in to run the following tests."
echo "Press Enter when ready."
read LINE
done
run_env "${CPP_EXAMPLES_DIR}/two_models_two_tpus_threaded" \
"${TEST_DATA_DIR}/mobilenet_v2_1.0_224_inat_bird_quant_edgetpu.tflite" \
"${TEST_DATA_DIR}/mobilenet_v2_1.0_224_inat_plant_quant_edgetpu.tflite" \
"${TEST_DATA_DIR}/bird.bmp" \
"${TEST_DATA_DIR}/sunflower.bmp"
|
google-coral/edgetpu
|
scripts/run_cpp_examples.sh
|
Shell
|
apache-2.0
| 1,921 |
#!/bin/bash
# Copyright 2022, Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xe
packer_dir=$(dirname "$(dirname "$0")")
# Check if an official base box is added to vagrant
s3_variables="${packer_dir}/config/s3.json"
config_variables="${packer_dir}/config/variables.json"
S3_CONFIG_FILE=$(jq -r '.s3_config_file' "$s3_variables")
DOWNLOAD_BUCKET=$(jq -r '.download_bucket' "$s3_variables")
DOWNLOAD_PACKER_BOX=$(jq -r '.download_packer_box' "$s3_variables")
OUTPUT_PACKER_BOX_NAME=$(jq -r '.output_packer_box_name' "$config_variables")
if [ "$DOWNLOAD_BUCKET" == "null" ] \
|| [ "$DOWNLOAD_PACKER_BOX" == "null" ] \
|| [ "$OUTPUT_PACKER_BOX_NAME" == "null" ]; then
printf "One or more variables in %s are undefined.\n" "$config_variables"
exit 1
fi
#download base box from s3 storage
if [ "$S3_CONFIG_FILE" == "null" ]; then
s3cmd get --force "${DOWNLOAD_BUCKET}/${DOWNLOAD_PACKER_BOX}" \
"${packer_dir}/download/${DOWNLOAD_PACKER_BOX}"
else
s3cmd -c "${S3_CONFIG_FILE}" get --force \
"${DOWNLOAD_BUCKET}/${DOWNLOAD_PACKER_BOX}" \
"${packer_dir}/download/${DOWNLOAD_PACKER_BOX}"
fi
# add the downloaded box as the output box in vagrant
VAGRANT_VAGRANTFILE=Vagrantfile vagrant box add \
--force \
--clean \
--name "$OUTPUT_PACKER_BOX_NAME" \
file://"${packer_dir}/download/${DOWNLOAD_PACKER_BOX}"
|
bloomberg/chef-bcpc
|
virtual/packer/bin/download-packer-box.sh
|
Shell
|
apache-2.0
| 1,951 |
#!/bin/bash
echo "=== Acquiring datasets ==="
echo "---"
mkdir -p data
cd data
echo "- Downloading ptb Treebank (PTB)"
mkdir -p ptb
cd ptb
wget --quiet --continue https://github.com/pytorch/examples/raw/master/word_language_model/data/penn/train.txt
wget --quiet --continue https://github.com/pytorch/examples/raw/master/word_language_model/data/penn/valid.txt
wget --quiet --continue https://github.com/pytorch/examples/raw/master/word_language_model/data/penn/test.txt
mv train.txt ptb.train.txt
mv valid.txt ptb.valid.txt
mv test.txt ptb.test.txt
cd ..
echo "- Downloading WikiText-2 (WT2)"
wget --quiet --continue https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip
unzip -q wikitext-2-v1.zip
cd ..
python3 -u pre_process_wikitext.py
cd data/wikitext-2
mv wiki.train.tokens.sents wiki2.train.txt
mv wiki.valid.tokens.sents wiki2.valid.txt
mv wiki.test.tokens.sents wiki2.test.txt
echo "---"
echo "Happy language modeling :)"
|
giancds/attentive_lm
|
get_and_process_data.sh
|
Shell
|
apache-2.0
| 958 |
export XOS_DIR=/opt/xos
python syndicate-backend.py -C $XOS_DIR/syndicate_observer/syndicate_observer_config
|
wathsalav/xos
|
xos/syndicate_observer/run.sh
|
Shell
|
apache-2.0
| 110 |
#!/bin/bash
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unit tests for docker_build
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
source ${DIR}/testenv.sh || { echo "testenv.sh not found!" >&2; exit 1; }
readonly PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
if [ "${PLATFORM}" = "darwin" ]; then
readonly MAGIC_TIMESTAMP="$(date -r 0 "+%b %e %Y")"
else
readonly MAGIC_TIMESTAMP="$(date --date=@0 "+%F %R")"
fi
function EXPECT_CONTAINS() {
local complete="${1}"
local substring="${2}"
local message="${3:-Expected '${substring}' not found in '${complete}'}"
echo "${complete}" | grep -Fsq -- "${substring}" \
|| fail "$message"
}
function no_check() {
echo "${@}"
}
function check_property() {
local property="${1}"
local tarball="${2}"
local layer="${3}"
local expected="${4}"
local test_data="${TEST_DATA_DIR}/${tarball}.tar"
local metadata="$(tar xOf "${test_data}" "./${layer}/json")"
# This would be much more accurate if we had 'jq' everywhere.
EXPECT_CONTAINS "${metadata}" "\"${property}\": ${expected}"
}
function check_manifest_property() {
local property="${1}"
local tarball="${2}"
local expected="${3}"
local test_data="${TEST_DATA_DIR}/${tarball}.tar"
local metadata="$(tar xOf "${test_data}" "./manifest.json")"
# This would be much more accurate if we had 'jq' everywhere.
EXPECT_CONTAINS "${metadata}" "\"${property}\": ${expected}"
}
function check_no_property() {
local property="${1}"
local tarball="${2}"
local layer="${3}"
local test_data="${TEST_DATA_DIR}/${tarball}.tar"
tar xOf "${test_data}" "./${layer}/json" >$TEST_log
expect_not_log "\"${property}\":"
# notop variant
test_data="${TEST_DATA_DIR}/notop_${tarball}.tar"
tar xOf "${test_data}" "./${layer}/json" >$TEST_log
expect_not_log "\"${property}\":"
}
function check_size() {
check_property Size "${@}"
}
function check_id() {
check_property id "${@}"
}
function check_parent() {
check_property parent "${@}"
}
function check_entrypoint() {
input="$1"
shift
check_property Entrypoint "${input}" "${@}"
check_property Entrypoint "notop_${input}" "${@}"
}
function check_cmd() {
input="$1"
shift
check_property Cmd "${input}" "${@}"
check_property Cmd "notop_${input}" "${@}"
}
function check_ports() {
input="$1"
shift
check_property ExposedPorts "${input}" "${@}"
check_property ExposedPorts "${input}" "${@}"
}
function check_volumes() {
input="$1"
shift
check_property Volumes "${input}" "${@}"
check_property Volumes "notop_${input}" "${@}"
}
function check_env() {
input="$1"
shift
check_property Env "${input}" "${@}"
check_property Env "notop_${input}" "${@}"
}
function check_label() {
input="$1"
shift
check_property Label "${input}" "${@}"
check_property Label "notop_${input}" "${@}"
}
function check_workdir() {
input="$1"
shift
check_property WorkingDir "${input}" "${@}"
check_property WorkingDir "notop_${input}" "${@}"
}
function check_user() {
input="$1"
shift
check_property User "${input}" "${@}"
check_property User "notop_${input}" "${@}"
}
function check_layers_aux() {
local ancestry_check=${1}
shift 1
local input=${1}
shift 1
local expected_layers=(${*})
local expected_layers_sorted=(
$(for i in ${expected_layers[*]}; do echo $i; done | sort)
)
local test_data="${TEST_DATA_DIR}/${input}.tar"
# Verbose output for testing.
tar tvf "${test_data}"
local actual_layers=(
$(tar tvf ${test_data} | tr -s ' ' | cut -d' ' -f 4- | sort \
| cut -d'/' -f 2 | grep -E '^[0-9a-f]+$' | sort | uniq))
# Verbose output for testing.
echo Expected: ${expected_layers_sorted[@]}
echo Actual: ${actual_layers[@]}
check_eq "${#expected_layers[@]}" "${#actual_layers[@]}"
local index=0
local parent=
while [ "${index}" -lt "${#expected_layers[@]}" ]
do
# Check that the nth sorted layer matches
check_eq "${expected_layers_sorted[$index]}" "${actual_layers[$index]}"
# Grab the ordered layer and check it.
local layer="${expected_layers[$index]}"
# Verbose output for testing.
echo Checking layer: "${layer}"
local listing="$(tar xOf "${test_data}" "./${layer}/layer.tar" | tar tv)"
# Check that all files in the layer, if any, have the magic timestamp
check_eq "$(echo "${listing}" | grep -Fv "${MAGIC_TIMESTAMP}" || true)" ""
check_id "${input}" "${layer}" "\"${layer}\""
# Check that the layer contains its predecessor as its parent in the JSON.
if [[ -n "${parent}" ]]; then
"${ancestry_check}" "${input}" "${layer}" "\"${parent}\""
fi
# Check that the layer's size metadata matches the layer's tarball's size.
local layer_size=$(tar xOf "${test_data}" "./${layer}/layer.tar" | wc -c | xargs)
check_size "${input}" "${layer}" "${layer_size}"
index=$((index + 1))
parent=$layer
done
}
function check_layers() {
local input=$1
shift
check_layers_aux "check_parent" "$input" "$@"
check_layers_aux "check_parent" "notop_$input" "$@"
}
function test_gen_image() {
grep -Fsq "./gen.out" "$TEST_DATA_DIR/gen_image.tar" \
|| fail "'./gen.out' not found in '$TEST_DATA_DIR/gen_image.tar'"
}
function test_dummy_repository() {
local layer="0279f3ce8b08d10506abcf452393b3e48439f5eca41b836fae59a0d509fbafea"
local test_data="${TEST_DATA_DIR}/dummy_repository.tar"
check_layers_aux "check_parent" "dummy_repository" "$layer"
local repositories="$(tar xOf "${test_data}" "./repositories")"
# This would really need to use `jq` instead.
echo "${repositories}" | \
grep -Esq -- "\"gcr.io/dummy/[a-zA-Z_/]*/docker/testdata\": {" \
|| fail "Cannot find image in repository gcr.io/dummy in '${repositories}'"
EXPECT_CONTAINS "${repositories}" "\"dummy_repository\": \"$layer\""
}
function test_files_base() {
check_layers "files_base" \
"82ca3945f7d07df82f274d7fafe83fd664c2154e5c64c988916ccd5b217bb710"
}
function test_files_with_files_base() {
check_layers "files_with_files_base" \
"82ca3945f7d07df82f274d7fafe83fd664c2154e5c64c988916ccd5b217bb710" \
"84c0d09919ae8b06cb6b064d8cd5eab63341a46f11ccc7ecbe270ad3e1f52744"
}
function test_tar_base() {
check_layers "tar_base" \
"8b9e4db9dd4b990ee6d8adc2843ad64702ad9063ae6c22e8ca5f94aa54e71277"
# Check that this layer doesn't have any entrypoint data by looking
# for *any* entrypoint.
check_no_property "Entrypoint" "tar_base" \
"8b9e4db9dd4b990ee6d8adc2843ad64702ad9063ae6c22e8ca5f94aa54e71277"
}
function test_tar_with_tar_base() {
check_layers "tar_with_tar_base" \
"8b9e4db9dd4b990ee6d8adc2843ad64702ad9063ae6c22e8ca5f94aa54e71277" \
"1cc81a2aaec2e3727d98d48bf9ba09d3ac96ef48adf5edae861d15dd0191dc40"
}
function test_directory_with_tar_base() {
check_layers "directory_with_tar_base" \
"8b9e4db9dd4b990ee6d8adc2843ad64702ad9063ae6c22e8ca5f94aa54e71277" \
"e56ddeb8279698484f50d480f71cb5380223ad0f451766b7b9a9348129d02542"
}
function test_files_with_tar_base() {
check_layers "files_with_tar_base" \
"8b9e4db9dd4b990ee6d8adc2843ad64702ad9063ae6c22e8ca5f94aa54e71277" \
"f099727fa58f9b688e77b511b3cc728b86ae0e84d197b9330bd51082ad5589f2"
}
function test_workdir_with_tar_base() {
check_layers "workdir_with_tar_base" \
"8b9e4db9dd4b990ee6d8adc2843ad64702ad9063ae6c22e8ca5f94aa54e71277" \
"f24cbe53bd1b78909c6dba0bd47016354f3488b35b85aeee68ecc423062b927e"
}
function test_tar_with_files_base() {
check_layers "tar_with_files_base" \
"82ca3945f7d07df82f274d7fafe83fd664c2154e5c64c988916ccd5b217bb710" \
"bee1a325e4b51a1dcfd7e447987b4e130590815865ab22e8744878053d525f20"
}
function test_base_with_entrypoint() {
check_layers "base_with_entrypoint" \
"4acbeb0495918726c0107e372b421e1d2a6fd4825d58fc3f0b0b2a719fb3ce1b"
check_entrypoint "base_with_entrypoint" \
"4acbeb0495918726c0107e372b421e1d2a6fd4825d58fc3f0b0b2a719fb3ce1b" \
'["/bar"]'
# Check that the base layer has a port exposed.
check_ports "base_with_entrypoint" \
"4acbeb0495918726c0107e372b421e1d2a6fd4825d58fc3f0b0b2a719fb3ce1b" \
'{"8080/tcp": {}}'
}
function test_derivative_with_shadowed_cmd() {
check_layers "derivative_with_shadowed_cmd" \
"4acbeb0495918726c0107e372b421e1d2a6fd4825d58fc3f0b0b2a719fb3ce1b" \
"e35f57dc6c1e84ae67dcaaf3479a3a3c0f52ac4d194073bd6214e04c05beab42"
}
function test_derivative_with_cmd() {
check_layers "derivative_with_cmd" \
"4acbeb0495918726c0107e372b421e1d2a6fd4825d58fc3f0b0b2a719fb3ce1b" \
"e35f57dc6c1e84ae67dcaaf3479a3a3c0f52ac4d194073bd6214e04c05beab42" \
"186289545131e34510006ac79498078dcf41736a5eb9a36920a6b30d3f45bc01"
check_entrypoint "derivative_with_cmd" \
"186289545131e34510006ac79498078dcf41736a5eb9a36920a6b30d3f45bc01" \
'["/bar"]'
# Check that the middle layer has our shadowed arg.
check_cmd "derivative_with_cmd" \
"e35f57dc6c1e84ae67dcaaf3479a3a3c0f52ac4d194073bd6214e04c05beab42" \
'["shadowed-arg"]'
# Check that our topmost layer excludes the shadowed arg.
check_cmd "derivative_with_cmd" \
"186289545131e34510006ac79498078dcf41736a5eb9a36920a6b30d3f45bc01" \
'["arg1", "arg2"]'
# Check that the topmost layer has the ports exposed by the bottom
# layer, and itself.
check_ports "derivative_with_cmd" \
"186289545131e34510006ac79498078dcf41736a5eb9a36920a6b30d3f45bc01" \
'{"80/tcp": {}, "8080/tcp": {}}'
}
function test_derivative_with_volume() {
check_layers "derivative_with_volume" \
"125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
"08424283ad3a7e020e210bec22b166d7ebba57f7ba2d0713c2fd7bd1e2038f88"
# Check that the topmost layer has the ports exposed by the bottom
# layer, and itself.
check_volumes "derivative_with_volume" \
"125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
'{"/logs": {}}'
check_volumes "derivative_with_volume" \
"08424283ad3a7e020e210bec22b166d7ebba57f7ba2d0713c2fd7bd1e2038f88" \
'{"/asdf": {}, "/blah": {}, "/logs": {}}'
}
function test_generated_tarball() {
check_layers "generated_tarball" \
"54b8328604115255cc76c12a2a51939be65c40bf182ff5a898a5fb57c38f7772"
}
function test_with_env() {
check_layers "with_env" \
"125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
"42a1bd0f449f61a23b8a7776875ffb6707b34ee99c87d6428a7394f5e55e8624"
check_env "with_env" \
"42a1bd0f449f61a23b8a7776875ffb6707b34ee99c87d6428a7394f5e55e8624" \
'["bar=blah blah blah", "foo=/asdf"]'
# We should have a tag in our manifest, otherwise it will be untagged
# when loaded in newer clients.
check_manifest_property "RepoTags" "with_env" \
"[\"bazel/${TEST_DATA_TARGET_BASE}:with_env\"]"
}
function test_with_double_env() {
check_layers "with_double_env" \
"125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
"42a1bd0f449f61a23b8a7776875ffb6707b34ee99c87d6428a7394f5e55e8624" \
"576a9fd9c690be04dc7aacbb9dbd1f14816e32dbbcc510f4d42325bbff7163dd"
# Check both the aggregation and the expansion of embedded variables.
check_env "with_double_env" \
"576a9fd9c690be04dc7aacbb9dbd1f14816e32dbbcc510f4d42325bbff7163dd" \
'["bar=blah blah blah", "baz=/asdf blah blah blah", "foo=/asdf"]'
}
function test_with_label() {
check_layers "with_label" \
"125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
"eba6abda3d259ab6ed5f4d48b76df72a5193fad894d4ae78fbf0a363d8f9e8fd"
check_label "with_label" \
"eba6abda3d259ab6ed5f4d48b76df72a5193fad894d4ae78fbf0a363d8f9e8fd" \
'["com.example.bar={\"name\": \"blah\"}", "com.example.baz=qux", "com.example.foo={\"name\": \"blah\"}"]'
}
function test_with_double_label() {
check_layers "with_double_label" \
"125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
"eba6abda3d259ab6ed5f4d48b76df72a5193fad894d4ae78fbf0a363d8f9e8fd" \
"bfe88fbb5e24fc5bff138f7a1923d53a2ee1bbc8e54b6f5d9c371d5f48b6b023" \
check_label "with_double_label" \
"bfe88fbb5e24fc5bff138f7a1923d53a2ee1bbc8e54b6f5d9c371d5f48b6b023" \
'["com.example.bar={\"name\": \"blah\"}", "com.example.baz=qux", "com.example.foo={\"name\": \"blah\"}", "com.example.qux={\"name\": \"blah-blah\"}"]'
}
function test_with_user() {
check_user "with_user" \
"65664d4d78ff321684e2a8bf165792ce562c5990c9ba992e6288dcb1ec7f675c" \
"\"nobody\""
}
function get_layer_listing() {
local input=$1
local layer=$2
local test_data="${TEST_DATA_DIR}/${input}.tar"
tar xOf "${test_data}" \
"./${layer}/layer.tar" | tar tv | sed -e 's/^.*:00 //'
}
function test_data_path() {
local no_data_path_sha="451d182e5c71840f00ba9726dc0239db73a21b7e89e79c77f677e3f7c5c23d44"
local data_path_sha="9a41c9e1709558f7ef06f28f66e9056feafa7e0f83990801e1b27c987278d8e8"
local absolute_data_path_sha="f196c42ab4f3eb850d9655b950b824db2c99c01527703ac486a7b48bb2a34f44"
local root_data_path_sha="19d7fd26d67bfaeedd6232dcd441f14ee163bc81c56ed565cc20e73311c418b6"
check_layers_aux "check_parent" "no_data_path_image" "${no_data_path_sha}"
check_layers_aux "check_parent" "data_path_image" "${data_path_sha}"
check_layers_aux "check_parent" "absolute_data_path_image" "${absolute_data_path_sha}"
check_layers_aux "check_parent" "root_data_path_image" "${root_data_path_sha}"
# Without data_path = "." the file will be inserted as `./test`
# (since it is the path in the package) and with data_path = "."
# the file will be inserted relatively to the testdata package
# (so `./test/test`).
check_eq "$(get_layer_listing "no_data_path_image" "${no_data_path_sha}")" \
'./
./test'
check_eq "$(get_layer_listing "data_path_image" "${data_path_sha}")" \
'./
./test/
./test/test'
# With an absolute path for data_path, we should strip that prefix
# from the files' paths. Since the testdata images are in
# //tools/build_defs/docker/testdata and data_path is set to
# "/tools/build_defs", we should have `docker` as the top-level
# directory.
check_eq "$(get_layer_listing "absolute_data_path_image" "${absolute_data_path_sha}")" \
'./
./docker/
./docker/testdata/
./docker/testdata/test/
./docker/testdata/test/test'
# With data_path = "/", we expect the entire path from the repository
# root.
check_eq "$(get_layer_listing "root_data_path_image" "${root_data_path_sha}")" \
"./
./tools/
./tools/build_defs/
./tools/build_defs/docker/
./tools/build_defs/docker/testdata/
./tools/build_defs/docker/testdata/test/
./tools/build_defs/docker/testdata/test/test"
}
function test_extras_with_deb() {
local test_data="${TEST_DATA_DIR}/extras_with_deb.tar"
local sha=$(tar xOf ${test_data} ./top)
# The content of the layer should have no duplicate
local layer_listing="$(get_layer_listing "extras_with_deb" "${sha}" | sort)"
check_eq "${layer_listing}" \
"./
./etc/
./etc/nsswitch.conf
./tmp/
./usr/
./usr/bin/
./usr/bin/java -> /path/to/bin/java
./usr/titi"
}
function test_bundle() {
# Check that we have these layers, but ignore the parent check, since
# this is a tree not a list.
check_layers_aux "no_check" "bundle_test" \
"125e7cfb9d4a6d803a57b88bcdb05d9a6a47ac0d6312a8b4cff52a2685c5c858" \
"42a1bd0f449f61a23b8a7776875ffb6707b34ee99c87d6428a7394f5e55e8624" \
"4acbeb0495918726c0107e372b421e1d2a6fd4825d58fc3f0b0b2a719fb3ce1b" \
"576a9fd9c690be04dc7aacbb9dbd1f14816e32dbbcc510f4d42325bbff7163dd" \
"82ca3945f7d07df82f274d7fafe83fd664c2154e5c64c988916ccd5b217bb710" \
"e5cfc312de72ce09488d789f525189a26a686d60fcc1c74249a3d7ce62986a82"
# Our bundle should have the following aliases.
check_manifest_property "RepoTags" "bundle_test" \
"[\"bazel/${TEST_DATA_TARGET_BASE}:base_with_entrypoint\", \"docker.io/ubuntu:latest\"]"
check_manifest_property "RepoTags" "bundle_test" \
"[\"bazel/${TEST_DATA_TARGET_BASE}:link_with_files_base\", \"us.gcr.io/google-appengine/base:fresh\"]"
check_manifest_property "RepoTags" "bundle_test" \
"[\"bazel/${TEST_DATA_TARGET_BASE}:with_double_env\", \"gcr.io/google-containers/pause:2.0\"]"
}
run_suite "build_test"
|
Asana/bazel
|
tools/build_defs/docker/build_test.sh
|
Shell
|
apache-2.0
| 16,655 |
GPU_ID="0"
EVERY=1000
MODEL=PositionalCnnDeepCombineChainModel
MODEL_DIR="../model/positional_cnn_deep_combine_chain"
start=$1
DIR="$(pwd)"
for checkpoint in $(cd $MODEL_DIR && python ${DIR}/training_utils/select.py $EVERY); do
echo $checkpoint;
if [ $checkpoint -gt $start ]; then
echo $checkpoint;
CUDA_VISIBLE_DEVICES=$GPU_ID python eval.py \
--train_dir="$MODEL_DIR" \
--model_checkpoint_path="${MODEL_DIR}/model.ckpt-${checkpoint}" \
--eval_data_pattern="/Youtube-8M/data/frame/validate/validatea*" \
--frame_features=True \
--feature_names="rgb,audio" \
--feature_sizes="1024,128" \
--batch_size=64 \
--model=$MODEL \
--moe_num_mixtures=4 \
--deep_chain_layers=2 \
--deep_chain_relu_cells=128 \
--label_loss=MultiTaskCrossEntropyLoss \
--multitask=True \
--support_type="label,label" \
--support_loss_percent=0.1 \
--run_once=True
fi
done
|
wangheda/youtube-8m
|
youtube-8m-wangheda/eval_scripts/eval-positional-cnn-dcc.sh
|
Shell
|
apache-2.0
| 919 |
SCRIPT_ROOT=$(echo "$1")
PROJECT_ROOT=$(echo "$2")
PUPPET_VERSION=$(echo "$3")
$SCRIPT_ROOT/os-detect-setup.sh $SCRIPT_ROOT $PROJECT_ROOT
$SCRIPT_ROOT/initial-setup.sh $SCRIPT_ROOT $PROJECT_ROOT
$SCRIPT_ROOT/update-puppet.sh $SCRIPT_ROOT $PROJECT_ROOT $PUPPET_VERSION
$SCRIPT_ROOT/librarian-puppet-vagrant.sh $SCRIPT_ROOT $PROJECT_ROOT
|
Spantree/spantree-puppet-bootstrap
|
shell/initialize-all.sh
|
Shell
|
apache-2.0
| 336 |
#!/usr/bin/env bash
# setup docker container
docker run -it --name sap-hana -v /Users/terma/Downloads/sap-hana:/sap-hana-install opensuse bash
# connect to existent container
# in container
cd /sap-hana-install
zypper update
zypper install numactl
./setup-
# ...
# yum install libtool
# yum install libtool-ltdl
# to get centos install GLIBCXX_3.4.20
# yum install centos-release-scl-rh
# yum install devtoolset-3-gcc devtoolset-3-gcc-c++
# scl enable devtoolset-3 bash
|
terma/fast-select
|
prepare-sap-hana.sh
|
Shell
|
apache-2.0
| 474 |
#!/bin/bash
set -e -u
if [ $# -lt 1 -a -z "${PLANET-}" ]; then
echo "This script updates a planet or an extract, processes metro networks in it"
echo "and produses a set of HTML files with validation results."
echo
echo "Usage: $0 <planet.o5m>"
echo
echo "Variable reference:"
echo "- PLANET: path for the source o5m file (the entire planet or an extract)"
echo "- CITY: name of a city to process"
echo "- BBOX: bounding box of an extract; x1,y1,x2,y2"
echo "- DUMP: file name to dump city data"
echo "- MAPSME: file name for maps.me json output"
echo "- OSMCTOOLS: path to osmconvert and osmupdate binaries"
echo "- PYTHON: python 3 executable"
echo "- GIT_PULL: set to 1 to update the scripts"
echo "- TMPDIR: path to temporary files"
echo "- HTML_DIR: target path for generated HTML files"
echo "- SERVER: server name and path to upload HTML files (e.g. [email protected]:/var/www/)"
echo "- SERVER_KEY: rsa key to supply for uploading the files"
echo "- REMOVE_HTML: set to 1 to remove HTML_DIR after uploading"
exit 1
fi
[ -n "${WHAT-}" ] && echo WHAT
PLANET="${PLANET:-${1-}}"
[ ! -f "$PLANET" ] && echo "Cannot find planet file $PLANET" && exit 2
OSMCTOOLS="${OSMCTOOLS:-$HOME/osmctools}"
if [ ! -f "$OSMCTOOLS/osmupdate" ]; then
if which osmupdate > /dev/null; then
OSMCTOOLS="$(dirname "$(which osmupdate)")"
else
echo "Please compile osmctools to $OSMCTOOLS"
exit 3
fi
fi
PYTHON=${PYTHON:-python3}
# This will fail if there is no python
"$PYTHON" --version > /dev/null
SUBWAYS_PATH="$(dirname "$0")/.."
[ ! -f "$SUBWAYS_PATH/process_subways.py" ] && echo "Please clone the subways repo to $SUBWAYS_PATH" && exit 4
TMPDIR="${TMPDIR:-$SUBWAYS_PATH}"
# Downloading the latest version of the subways script
if [ -n "${GIT_PULL-}" ]; then (
cd "$SUBWAYS_PATH"
git pull origin master
) fi
# Updating the planet file
PLANET_ABS="$(cd "$(dirname "$PLANET")"; pwd)/$(basename "$PLANET")"
pushd "$OSMCTOOLS" # osmupdate requires osmconvert in a current directory
OSMUPDATE_ERRORS=$(./osmupdate --drop-author --out-o5m "$PLANET_ABS" ${BBOX+"-b=$BBOX"} "$PLANET_ABS.new.o5m" 2>&1)
if [ -n "$OSMUPDATE_ERRORS" ]; then
echo "osmupdate failed: $OSMUPDATE_ERRORS"
exit 5
fi
popd
mv "$PLANET_ABS.new.o5m" "$PLANET_ABS"
# Filtering it
FILTERED_DATA="$TMPDIR/subways.osm"
QRELATIONS="route=subway =light_rail =monorail =train route_master=subway =light_rail =monorail =train public_transport=stop_area =stop_area_group"
QNODES="railway=station station=subway =light_rail =monorail railway=subway_entrance subway=yes light_rail=yes monorail=yes train=yes"
"$OSMCTOOLS/osmfilter" "$PLANET" --keep= --keep-relations="$QRELATIONS" --keep-nodes="$QNODES" --drop-author -o="$FILTERED_DATA"
# Running the validation
VALIDATION="$TMPDIR/validation.json"
"$PYTHON" "$SUBWAYS_PATH/process_subways.py" -q -x "$FILTERED_DATA" -l "$VALIDATION" ${MAPSME+-o "$MAPSME"}\
${CITY+-c "$CITY"} ${DUMP+-d "$DUMP"} ${GEOJSON+-j "$GEOJSON"}\
${ELEMENTS_CACHE+-i "$ELEMENTS_CACHE"} ${CITY_CACHE+--cache "$CITY_CACHE"}\
${RECOVERY_PATH+-r "$RECOVERY_PATH"}
rm "$FILTERED_DATA"
# Preparing HTML files
if [ -z "${HTML_DIR-}" ]; then
HTML_DIR="$SUBWAYS_PATH/html"
REMOVE_HTML=1
fi
mkdir -p $HTML_DIR
rm -f "$HTML_DIR"/*.html
"$PYTHON" "$SUBWAYS_PATH/validation_to_html.py" "$VALIDATION" "$HTML_DIR"
rm "$VALIDATION"
# Uploading files to the server
if [ -n "${SERVER-}" ]; then
scp -q ${SERVER_KEY+-i "$SERVER_KEY"} "$HTML_DIR"/* "$SERVER"
if [ -n "${REMOVE_HTML-}" ]; then
rm -r "$HTML_DIR"
fi
fi
|
mapsme/subways
|
scripts/process_subways.sh
|
Shell
|
apache-2.0
| 3,566 |
#!/bin/sh
. $(dirname "$0")/driver.sh
test_group "tree tests for fake.dts"
test_result 0 <<EOF
/ (/)
proc0 (/proc0)
fsi@20000 (/proc0/fsi)
pib@20100 (/proc0/pib)
core@10010 (/proc0/pib/core@10010)
thread@0 (/proc0/pib/core@10010/thread@0)
thread@1 (/proc0/pib/core@10010/thread@1)
core@10020 (/proc0/pib/core@10020)
thread@0 (/proc0/pib/core@10020/thread@0)
thread@1 (/proc0/pib/core@10020/thread@1)
core@10030 (/proc0/pib/core@10030)
thread@0 (/proc0/pib/core@10030/thread@0)
thread@1 (/proc0/pib/core@10030/thread@1)
core@10040 (/proc0/pib/core@10040)
thread@0 (/proc0/pib/core@10040/thread@0)
thread@1 (/proc0/pib/core@10040/thread@1)
proc1 (/proc1)
fsi@21000 (/proc1/fsi)
pib@21100 (/proc1/pib)
core@10010 (/proc1/pib/core@10010)
thread@0 (/proc1/pib/core@10010/thread@0)
thread@1 (/proc1/pib/core@10010/thread@1)
core@10020 (/proc1/pib/core@10020)
thread@0 (/proc1/pib/core@10020/thread@0)
thread@1 (/proc1/pib/core@10020/thread@1)
core@10030 (/proc1/pib/core@10030)
thread@0 (/proc1/pib/core@10030/thread@0)
thread@1 (/proc1/pib/core@10030/thread@1)
core@10040 (/proc1/pib/core@10040)
thread@0 (/proc1/pib/core@10040/thread@0)
thread@1 (/proc1/pib/core@10040/thread@1)
proc2 (/proc2)
fsi@22000 (/proc2/fsi)
pib@22100 (/proc2/pib)
core@10010 (/proc2/pib/core@10010)
thread@0 (/proc2/pib/core@10010/thread@0)
thread@1 (/proc2/pib/core@10010/thread@1)
core@10020 (/proc2/pib/core@10020)
thread@0 (/proc2/pib/core@10020/thread@0)
thread@1 (/proc2/pib/core@10020/thread@1)
core@10030 (/proc2/pib/core@10030)
thread@0 (/proc2/pib/core@10030/thread@0)
thread@1 (/proc2/pib/core@10030/thread@1)
core@10040 (/proc2/pib/core@10040)
thread@0 (/proc2/pib/core@10040/thread@0)
thread@1 (/proc2/pib/core@10040/thread@1)
proc3 (/proc3)
fsi@23000 (/proc3/fsi)
pib@23100 (/proc3/pib)
core@10010 (/proc3/pib/core@10010)
thread@0 (/proc3/pib/core@10010/thread@0)
thread@1 (/proc3/pib/core@10010/thread@1)
core@10020 (/proc3/pib/core@10020)
thread@0 (/proc3/pib/core@10020/thread@0)
thread@1 (/proc3/pib/core@10020/thread@1)
core@10030 (/proc3/pib/core@10030)
thread@0 (/proc3/pib/core@10030/thread@0)
thread@1 (/proc3/pib/core@10030/thread@1)
core@10040 (/proc3/pib/core@10040)
thread@0 (/proc3/pib/core@10040/thread@0)
thread@1 (/proc3/pib/core@10040/thread@1)
proc4 (/proc4)
fsi@24000 (/proc4/fsi)
pib@24100 (/proc4/pib)
core@10010 (/proc4/pib/core@10010)
thread@0 (/proc4/pib/core@10010/thread@0)
thread@1 (/proc4/pib/core@10010/thread@1)
core@10020 (/proc4/pib/core@10020)
thread@0 (/proc4/pib/core@10020/thread@0)
thread@1 (/proc4/pib/core@10020/thread@1)
core@10030 (/proc4/pib/core@10030)
thread@0 (/proc4/pib/core@10030/thread@0)
thread@1 (/proc4/pib/core@10030/thread@1)
core@10040 (/proc4/pib/core@10040)
thread@0 (/proc4/pib/core@10040/thread@0)
thread@1 (/proc4/pib/core@10040/thread@1)
proc5 (/proc5)
fsi@25000 (/proc5/fsi)
pib@25100 (/proc5/pib)
core@10010 (/proc5/pib/core@10010)
thread@0 (/proc5/pib/core@10010/thread@0)
thread@1 (/proc5/pib/core@10010/thread@1)
core@10020 (/proc5/pib/core@10020)
thread@0 (/proc5/pib/core@10020/thread@0)
thread@1 (/proc5/pib/core@10020/thread@1)
core@10030 (/proc5/pib/core@10030)
thread@0 (/proc5/pib/core@10030/thread@0)
thread@1 (/proc5/pib/core@10030/thread@1)
core@10040 (/proc5/pib/core@10040)
thread@0 (/proc5/pib/core@10040/thread@0)
thread@1 (/proc5/pib/core@10040/thread@1)
proc6 (/proc6)
fsi@26000 (/proc6/fsi)
pib@26100 (/proc6/pib)
core@10010 (/proc6/pib/core@10010)
thread@0 (/proc6/pib/core@10010/thread@0)
thread@1 (/proc6/pib/core@10010/thread@1)
core@10020 (/proc6/pib/core@10020)
thread@0 (/proc6/pib/core@10020/thread@0)
thread@1 (/proc6/pib/core@10020/thread@1)
core@10030 (/proc6/pib/core@10030)
thread@0 (/proc6/pib/core@10030/thread@0)
thread@1 (/proc6/pib/core@10030/thread@1)
core@10040 (/proc6/pib/core@10040)
thread@0 (/proc6/pib/core@10040/thread@0)
thread@1 (/proc6/pib/core@10040/thread@1)
proc7 (/proc7)
fsi@27000 (/proc7/fsi)
pib@27100 (/proc7/pib)
core@10010 (/proc7/pib/core@10010)
thread@0 (/proc7/pib/core@10010/thread@0)
thread@1 (/proc7/pib/core@10010/thread@1)
core@10020 (/proc7/pib/core@10020)
thread@0 (/proc7/pib/core@10020/thread@0)
thread@1 (/proc7/pib/core@10020/thread@1)
core@10030 (/proc7/pib/core@10030)
thread@0 (/proc7/pib/core@10030/thread@0)
thread@1 (/proc7/pib/core@10030/thread@1)
core@10040 (/proc7/pib/core@10040)
thread@0 (/proc7/pib/core@10040/thread@0)
thread@1 (/proc7/pib/core@10040/thread@1)
EOF
test_run libpdbg_dtree_test tree system /
test_result 0 <<EOF
/ (/)
fsi@20000 (/proc0/fsi)
pib@20100 (/proc0/pib)
core@10010 (/proc0/pib/core@10010)
thread@0 (/proc0/pib/core@10010/thread@0)
thread@1 (/proc0/pib/core@10010/thread@1)
core@10020 (/proc0/pib/core@10020)
thread@0 (/proc0/pib/core@10020/thread@0)
thread@1 (/proc0/pib/core@10020/thread@1)
core@10030 (/proc0/pib/core@10030)
thread@0 (/proc0/pib/core@10030/thread@0)
thread@1 (/proc0/pib/core@10030/thread@1)
core@10040 (/proc0/pib/core@10040)
thread@0 (/proc0/pib/core@10040/thread@0)
thread@1 (/proc0/pib/core@10040/thread@1)
fsi@21000 (/proc1/fsi)
pib@21100 (/proc1/pib)
core@10010 (/proc1/pib/core@10010)
thread@0 (/proc1/pib/core@10010/thread@0)
thread@1 (/proc1/pib/core@10010/thread@1)
core@10020 (/proc1/pib/core@10020)
thread@0 (/proc1/pib/core@10020/thread@0)
thread@1 (/proc1/pib/core@10020/thread@1)
core@10030 (/proc1/pib/core@10030)
thread@0 (/proc1/pib/core@10030/thread@0)
thread@1 (/proc1/pib/core@10030/thread@1)
core@10040 (/proc1/pib/core@10040)
thread@0 (/proc1/pib/core@10040/thread@0)
thread@1 (/proc1/pib/core@10040/thread@1)
fsi@22000 (/proc2/fsi)
pib@22100 (/proc2/pib)
core@10010 (/proc2/pib/core@10010)
thread@0 (/proc2/pib/core@10010/thread@0)
thread@1 (/proc2/pib/core@10010/thread@1)
core@10020 (/proc2/pib/core@10020)
thread@0 (/proc2/pib/core@10020/thread@0)
thread@1 (/proc2/pib/core@10020/thread@1)
core@10030 (/proc2/pib/core@10030)
thread@0 (/proc2/pib/core@10030/thread@0)
thread@1 (/proc2/pib/core@10030/thread@1)
core@10040 (/proc2/pib/core@10040)
thread@0 (/proc2/pib/core@10040/thread@0)
thread@1 (/proc2/pib/core@10040/thread@1)
fsi@23000 (/proc3/fsi)
pib@23100 (/proc3/pib)
core@10010 (/proc3/pib/core@10010)
thread@0 (/proc3/pib/core@10010/thread@0)
thread@1 (/proc3/pib/core@10010/thread@1)
core@10020 (/proc3/pib/core@10020)
thread@0 (/proc3/pib/core@10020/thread@0)
thread@1 (/proc3/pib/core@10020/thread@1)
core@10030 (/proc3/pib/core@10030)
thread@0 (/proc3/pib/core@10030/thread@0)
thread@1 (/proc3/pib/core@10030/thread@1)
core@10040 (/proc3/pib/core@10040)
thread@0 (/proc3/pib/core@10040/thread@0)
thread@1 (/proc3/pib/core@10040/thread@1)
fsi@24000 (/proc4/fsi)
pib@24100 (/proc4/pib)
core@10010 (/proc4/pib/core@10010)
thread@0 (/proc4/pib/core@10010/thread@0)
thread@1 (/proc4/pib/core@10010/thread@1)
core@10020 (/proc4/pib/core@10020)
thread@0 (/proc4/pib/core@10020/thread@0)
thread@1 (/proc4/pib/core@10020/thread@1)
core@10030 (/proc4/pib/core@10030)
thread@0 (/proc4/pib/core@10030/thread@0)
thread@1 (/proc4/pib/core@10030/thread@1)
core@10040 (/proc4/pib/core@10040)
thread@0 (/proc4/pib/core@10040/thread@0)
thread@1 (/proc4/pib/core@10040/thread@1)
fsi@25000 (/proc5/fsi)
pib@25100 (/proc5/pib)
core@10010 (/proc5/pib/core@10010)
thread@0 (/proc5/pib/core@10010/thread@0)
thread@1 (/proc5/pib/core@10010/thread@1)
core@10020 (/proc5/pib/core@10020)
thread@0 (/proc5/pib/core@10020/thread@0)
thread@1 (/proc5/pib/core@10020/thread@1)
core@10030 (/proc5/pib/core@10030)
thread@0 (/proc5/pib/core@10030/thread@0)
thread@1 (/proc5/pib/core@10030/thread@1)
core@10040 (/proc5/pib/core@10040)
thread@0 (/proc5/pib/core@10040/thread@0)
thread@1 (/proc5/pib/core@10040/thread@1)
fsi@26000 (/proc6/fsi)
pib@26100 (/proc6/pib)
core@10010 (/proc6/pib/core@10010)
thread@0 (/proc6/pib/core@10010/thread@0)
thread@1 (/proc6/pib/core@10010/thread@1)
core@10020 (/proc6/pib/core@10020)
thread@0 (/proc6/pib/core@10020/thread@0)
thread@1 (/proc6/pib/core@10020/thread@1)
core@10030 (/proc6/pib/core@10030)
thread@0 (/proc6/pib/core@10030/thread@0)
thread@1 (/proc6/pib/core@10030/thread@1)
core@10040 (/proc6/pib/core@10040)
thread@0 (/proc6/pib/core@10040/thread@0)
thread@1 (/proc6/pib/core@10040/thread@1)
fsi@27000 (/proc7/fsi)
pib@27100 (/proc7/pib)
core@10010 (/proc7/pib/core@10010)
thread@0 (/proc7/pib/core@10010/thread@0)
thread@1 (/proc7/pib/core@10010/thread@1)
core@10020 (/proc7/pib/core@10020)
thread@0 (/proc7/pib/core@10020/thread@0)
thread@1 (/proc7/pib/core@10020/thread@1)
core@10030 (/proc7/pib/core@10030)
thread@0 (/proc7/pib/core@10030/thread@0)
thread@1 (/proc7/pib/core@10030/thread@1)
core@10040 (/proc7/pib/core@10040)
thread@0 (/proc7/pib/core@10040/thread@0)
thread@1 (/proc7/pib/core@10040/thread@1)
proc0 (/proc0)
proc1 (/proc1)
proc2 (/proc2)
proc3 (/proc3)
proc4 (/proc4)
proc5 (/proc5)
proc6 (/proc6)
proc7 (/proc7)
EOF
test_run libpdbg_dtree_test tree backend /
test_result 0 <<EOF
proc1 (/proc1)
fsi@21000 (/proc1/fsi)
pib@21100 (/proc1/pib)
core@10010 (/proc1/pib/core@10010)
thread@0 (/proc1/pib/core@10010/thread@0)
thread@1 (/proc1/pib/core@10010/thread@1)
core@10020 (/proc1/pib/core@10020)
thread@0 (/proc1/pib/core@10020/thread@0)
thread@1 (/proc1/pib/core@10020/thread@1)
core@10030 (/proc1/pib/core@10030)
thread@0 (/proc1/pib/core@10030/thread@0)
thread@1 (/proc1/pib/core@10030/thread@1)
core@10040 (/proc1/pib/core@10040)
thread@0 (/proc1/pib/core@10040/thread@0)
thread@1 (/proc1/pib/core@10040/thread@1)
EOF
test_run libpdbg_dtree_test tree system /proc1
test_result 0 <<EOF
proc1 (/proc1)
EOF
test_run libpdbg_dtree_test tree backend /proc1
test_result 0 <<EOF
fsi@20000 (/proc0/fsi)
EOF
test_run libpdbg_dtree_test tree system /proc0/fsi
test_result 0 <<EOF
fsi@20000 (/proc0/fsi)
pib@20100 (/proc0/pib)
core@10010 (/proc0/pib/core@10010)
thread@0 (/proc0/pib/core@10010/thread@0)
thread@1 (/proc0/pib/core@10010/thread@1)
core@10020 (/proc0/pib/core@10020)
thread@0 (/proc0/pib/core@10020/thread@0)
thread@1 (/proc0/pib/core@10020/thread@1)
core@10030 (/proc0/pib/core@10030)
thread@0 (/proc0/pib/core@10030/thread@0)
thread@1 (/proc0/pib/core@10030/thread@1)
core@10040 (/proc0/pib/core@10040)
thread@0 (/proc0/pib/core@10040/thread@0)
thread@1 (/proc0/pib/core@10040/thread@1)
EOF
test_run libpdbg_dtree_test tree backend /proc0/fsi
test_result 0 <<EOF
pib@22100 (/proc2/pib)
core@10010 (/proc2/pib/core@10010)
thread@0 (/proc2/pib/core@10010/thread@0)
thread@1 (/proc2/pib/core@10010/thread@1)
core@10020 (/proc2/pib/core@10020)
thread@0 (/proc2/pib/core@10020/thread@0)
thread@1 (/proc2/pib/core@10020/thread@1)
core@10030 (/proc2/pib/core@10030)
thread@0 (/proc2/pib/core@10030/thread@0)
thread@1 (/proc2/pib/core@10030/thread@1)
core@10040 (/proc2/pib/core@10040)
thread@0 (/proc2/pib/core@10040/thread@0)
thread@1 (/proc2/pib/core@10040/thread@1)
EOF
test_run libpdbg_dtree_test tree system /proc2/pib
test_result 0 <<EOF
pib@22100 (/proc2/pib)
core@10010 (/proc2/pib/core@10010)
thread@0 (/proc2/pib/core@10010/thread@0)
thread@1 (/proc2/pib/core@10010/thread@1)
core@10020 (/proc2/pib/core@10020)
thread@0 (/proc2/pib/core@10020/thread@0)
thread@1 (/proc2/pib/core@10020/thread@1)
core@10030 (/proc2/pib/core@10030)
thread@0 (/proc2/pib/core@10030/thread@0)
thread@1 (/proc2/pib/core@10030/thread@1)
core@10040 (/proc2/pib/core@10040)
thread@0 (/proc2/pib/core@10040/thread@0)
thread@1 (/proc2/pib/core@10040/thread@1)
EOF
test_run libpdbg_dtree_test tree backend /proc2/pib
test_result 0 <<EOF
thread@1
core@10040
pib@27100
proc7
/
EOF
test_run libpdbg_dtree_test rtree system /proc7/pib/core@10040/thread@1
test_result 0 <<EOF
thread@1
core@10040
pib@27100
proc7
/
EOF
test_run libpdbg_dtree_test rtree system /fsi@27000/pib@27100/core@10040/thread@1
test_result 0 <<EOF
thread@1
core@10040
pib@27100
fsi@27000
/
EOF
test_run libpdbg_dtree_test rtree backend /proc7/pib/core@10040/thread@1
test_result 0 <<EOF
thread@1
core@10040
pib@27100
fsi@27000
/
EOF
test_run libpdbg_dtree_test rtree backend /fsi@27000/pib@27100/core@10040/thread@1
|
open-power/pdbg
|
tests/test_tree.sh
|
Shell
|
apache-2.0
| 14,684 |
source ~/stackrc
rm -f reachable.txt intermittent.txt unreachable.txt > /dev/null 2>&1
for ip in `openstack server list -f value -c Networks | sed s/ctlplane=//`
do
server=$(openstack server list | grep $ip | awk {'print$2'})
node=$(openstack baremetal node list | grep $server | awk {'print$2'})
TRY=0
while [ "$TRY" -lt 2 ]
do
count=$(ping -c 5 $ip | grep received | awk {'print $4'})
if [ "$count" -eq 5 ]
then
echo $node >> reachable.txt
break
elif [ "$count" -lt 5 ] && [ "$count" -gt 0 ]
then
echo $node >> intermittent.txt
break
else
if [ "$TRY" -eq 1 ]
then
echo $node >> unreachable.txt
fi
fi
TRY=$((TRY+1))
done
done
|
smalleni/openstack-testing
|
ping_nodes.sh
|
Shell
|
apache-2.0
| 921 |
#!/bin/bash
debug="false"
timestamp=`date +%Y-%m-%d-%H-%M-%S`
ROOT_DIR=$DOZAT_ROOT
OUT_LOG=$ROOT_DIR/hyperparams/tune-$timestamp
if [[ "$debug" == "false" ]]; then
mkdir -p $OUT_LOG
fi
echo "Writing to $OUT_LOG"
#num_gpus=100
num_gpus=16
lrs="0.04" # 0.06"
mus="0.9"
nus="0.98"
epsilons="1e-12"
warmup_steps="8000"
batch_sizes="5000"
trans_layers="10 12" # "10 8 6" # 3
num_heads="8" #4 8"
head_sizes="25"
relu_hidden_sizes="800"
parents_penalties="0.0"
rels_penalties="0.0"
#grandparents_penalties="0.0 0.1 1.0 0.01 10.0 0.0001"
parents_layers="parents:4 parents:5"
#grandparents_layers="grandparents:2 grandparents:3 no"
predicate_layers="1"
scheduled_sampling="constant=1.0" # constant=0.0 sigmoid=64000 sigmoid=32000"
use_full_parse="True"
one_example_per_predicates="True False"
reps="2"
# 2*2*2*2 = 16
# array to hold all the commands we'll distribute
declare -a commands
i=1
for lr in ${lrs[@]}; do
for mu in ${mus[@]}; do
for nu in ${nus[@]}; do
for epsilon in ${epsilons[@]}; do
for warmup_steps in ${warmup_steps[@]}; do
for trans_layer in ${trans_layers[@]}; do
for num_head in ${num_heads[@]}; do
for head_size in ${head_sizes[@]}; do
for relu_hidden_size in ${relu_hidden_sizes[@]}; do
for batch_size in ${batch_sizes[@]}; do
for parents_penalty in ${parents_penalties[@]}; do
for rel_penalty in ${rels_penalties[@]}; do
for parents_layer in ${parents_layers[@]}; do
for predicate_layer in ${predicate_layers[@]}; do
for full_parse in ${use_full_parse[@]}; do
for ss in ${scheduled_sampling[@]}; do
for one_example_per_predicate in ${one_example_per_predicates[@]}; do
for rep in `seq $reps`; do
# if [[ "$cnn_layer" != "2" || "$trans_layer" != "10" ]]; then
fname_append="$rep-$lr-$mu-$nu-$epsilon-$warmup_steps-$batch_size-$trans_layer-$num_head-$head_size-$relu_hidden_size-$parents_penalty-$rel_penalty-$parents_layer-$predicate_layer-$ss-$full_parse-$one_example_per_predicate"
partition="titanx-long"
ss_arr=(${ss//=/ })
sampling_sched=${ss_arr[0]}
sample_prob=${ss_arr[1]}
commands+=("srun --gres=gpu:1 --partition=$partition --mem=24G python network.py \
--config_file config/trans-conll05-bio-parse-tan-notrain-goldtrigs.cfg \
--save_dir $OUT_LOG/scores-$fname_append \
--save_every 500 \
--train_iters 5000000 \
--train_batch_size $batch_size \
--test_batch_size $batch_size \
--warmup_steps $warmup_steps \
--learning_rate $lr \
--n_recur $trans_layer \
--num_heads $num_head \
--head_size $head_size \
--relu_hidden_size $relu_hidden_size \
--mu $mu \
--nu $nu \
--epsilon $epsilon \
--predicate_layer $predicate_layer \
--multitask_layers \"$parents_layer\" \
--multitask_penalties \"parents:$parents_penalty\" \
--one_example_per_predicate $one_example_per_predicate \
--eval_by_domain False \
--eval_srl True \
--arc_loss_penalty $parents_penalty \
--rel_loss_penalty $rel_penalty \
--sampling_schedule $sampling_sched \
--sample_prob $sample_prob \
--save True \
&> $OUT_LOG/train-$fname_append.log")
i=$((i + 1))
done
done
done
done
done
done
done
done
done
done
done
done
done
done
done
done
done
done
# now distribute them to the gpus
num_jobs=${#commands[@]}
jobs_per_gpu=$((num_jobs / num_gpus))
echo "Distributing $num_jobs jobs to $num_gpus gpus ($jobs_per_gpu jobs/gpu)"
j=0
for (( gpuid=0; gpuid<num_gpus; gpuid++)); do
for (( i=0; i<jobs_per_gpu; i++ )); do
jobid=$((j * jobs_per_gpu + i))
comm="${commands[$jobid]}"
comm=${comm/XX/$gpuid}
# echo "Starting job $jobid on gpu $gpuid"
echo ${comm}
if [[ "$debug" == "false" ]]; then
eval ${comm}
fi
done &
j=$((j + 1))
done
|
strubell/Parser
|
tune/tune-conll05-goldpreds-tan-notrain.sh
|
Shell
|
apache-2.0
| 7,356 |
# ----------------------------------------------------------------------------
#
# Package : kafka
# Version : 1.1.1, 2.2.0, 2.2.1-rc1
# Source repo : https://github.com/apache/kafka
# Tested on : rhel_7.4
# Script License: Apache License, Version 2 or later
# Maintainer : Priya Seth <[email protected]>
#
# Disclaimer: This script has been tested in non-root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
if [ "$#" -gt 0 ]
then
VERSION=$1
else
VERSION="2.2.0"
fi
#Install dependencies
sudo yum update -y
sudo yum install -y git wget unzip java-1.8.0-openjdk java-1.8.0-openjdk-devel
cd $HOME
wget https://services.gradle.org/distributions/gradle-4.8-bin.zip
unzip gradle-4.8-bin.zip
#Set required environment variables
export PATH=$HOME/gradle-4.8/bin:$PATH
#Build and run unit tests
cd $HOME
git clone https://github.com/apache/kafka
cd kafka
git checkout ${VERSION}
#Note: Downgrading snappy version as version 1.1.7.2 links to GLIBC 2.2, and hence
#does not work on RHEL where we have an older version of GLIBC
#sed -i 's/ snappy: "1.1.7.1",/ snappy: "1.1.4",/g' gradle/dependencies.gradle
sed -i 's/ snappy: "1.1.7.2",/ snappy: "1.1.4",/g' gradle/dependencies.gradle
gradle clean
gradle
./gradlew jar
./gradlew releaseTarGz -x signArchives
#Please uncomment below line if you want to to execute unit tests
#Results are "5031 tests completed, 1 failed, 1 skipped"
#./gradlew unitTest
|
ppc64le/build-scripts
|
k/kafka/kafka_rhel7.6.sh
|
Shell
|
apache-2.0
| 1,743 |
#!/bin/bash
rm -Rf rescue64 uefi64
mkdir -p rescue64 uefi64
mcopy -sv -i RESCUE64.img@@1M ::EFI/boot/\* rescue64
mcopy -sv -i UEFI64.img@@1M ::EFI/boot/\* uefi64
|
schlomo/gss-grub
|
opal/prepare.sh
|
Shell
|
apache-2.0
| 162 |
#!/bin/bash
port=
forwarded_port=
ports=
#http_port=8080
VBOX_MANAGE=/usr/bin/VBoxManage
OCTETS="192.168."
ORIGINAL_OCTETS=${OCTETS}"56"
function replace_port() {
res=0
p=
while [ $res -eq 0 ] ; do
p=`shuf -i 20000-65000 -n 1`
# If the port is already in the file, try again
grep $p Vagrantfile
r1=$?
$(netstat -lptn | grep $p 2>&1 > /dev/null)
r2=$?
if [ $r1 -eq 0 ] || [ $r2 -eq 0 ] ; then
p=`shuf -i 2000-65000 -n 1`
else
res=1
fi
done
if [ "$forwarded_port" != "22" ] ; then
perl -pi -e "s/$forwarded_port/$p/g" Vagrantfile
perl -pi -e "s/$p/$forwarded_port/" Vagrantfile
echo "$port -> $p"
else
echo "New port is: $p"
sed "0,/RE/s/10022/$p/" Vagrantfile > Vagrantfile.new
sed "0,/RE/s/10023/$(expr $p + 1)/" Vagrantfile.new > Vagrantfile
sed "0,/RE/s/10024/$(expr $p + 2)/" Vagrantfile > Vagrantfile.new
mv Vagrantfile.new Vagrantfile
fi
}
function parse_ports() {
SAVEIFS=$IFS
# Change IFS to new line.
IFS=$'\n'
ports=$(grep forward Vagrantfile | grep -Eo '[0-9]{2,5}'|xargs)
count=0
echo "Found forwarded Ports:"
ports=($ports)
# Restore IFS
IFS=$SAVEIFS
for i in $ports ; do
odd=$(($count % 2))
if [ $odd -eq 1 ] ; then
echo "$i"
else
echo -n "$i -> "
fi
count=$(($count + 1))
done
}
function change_subnet() {
priv_subnets=($($VBOX_MANAGE list hostonlyifs | grep "IPAddress:" | awk -F' ' '{print $2}' | awk -F'.' '{print $3}'))
if [ ${#priv_subnets[@]} -gt 0 ]; then
SAVEIFS=$IFS
IFS=$'\n'
sorted=($(sort <<<"${priv_subnets[*]}"))
IFS=$SAVEIFS
new_subnet=$((${sorted[-1]} + 1))
new_octets=${OCTETS}${new_subnet}
sed -i "s/${ORIGINAL_OCTETS}/${new_octets}/g" Vagrantfile
sed -i "s/${ORIGINAL_OCTETS}/${new_octets}/g" cluster.yml
fi
}
PORTS=1
parse_ports
count=0
for i in $ports ; do
odd=$(($count % 2))
if [ $odd -eq 1 ] ; then
port=$i
replace_port
else
forwarded_port=$i
fi
count=$(($count + 1))
done
change_subnet
echo "Removing old vendored cookbooks"
rm -rf cookbooks > /dev/null 2>&1
rm -f Berksfile.lock > /dev/null 2>&1
echo "Vendoring cookbooks using 'berks vendor cookbooks'"
berks vendor cookbooks
echo "Running the Vagrantfile using 'vagrant up'"
vagrant up
|
SirOibaf/karamel-chef
|
test.sh
|
Shell
|
apache-2.0
| 2,312 |
scan() {
#statements
ping -c 1 $1.$2 > /dev/null && echo "$1.$2 is alive"
}
for i in `seq 1 254`
do
scan $1 $i &
done
|
momomoxiaoxi/security
|
Scripts/scan.sh
|
Shell
|
apache-2.0
| 134 |
#VAGRANT specific functions and globals
#global variables for vagrant, this need to coincide with the ones in Vagrantfile
#TODO, make the Vagrant file read these variables from node and cluster definitions
VAGRANT_HOST="127.0.0.1"
VAGRANT_WEB_IP="192.168.99.2" #do not use .1 to avoid some vagrant warnings
VAGRANT_ipAddrPrefix="192.168.99.1" # IP Address Base for private network
VAGRANT_sshPortPrefix=2222 #prefix port for the different VMs
#### start provider customizations
# $1 vm name
vm_exists() {
logger "INFO: Checking if VM $1 exists..."
if inside_vagrant || [ ! -z "$(vagrant global-status |grep " $1 ")" ] ; then
logger "INFO: vagrant $vm_name exists."
return 0
else
return 1
fi
}
# $1 vm name
vm_start() {
if ! inside_vagrant ; then
logger "Starting vagrant VM $1"
cd $CONF_DIR/../../; vagrant up "$1"; cd -;
else
logger "INFO: vagrant VM already started"
fi
}
# $1 vm name $2 ssh port
vm_create() {
if ! inside_vagrant ; then
logger "Starting vagrant VM $1"
cd $CONF_DIR/../../; vagrant up "$1" --provision; cd -;
else
die "called vm_create from inside vagrant box"
fi
}
# $1 vm name
vm_reboot() {
if ! inside_vagrant ; then
logger "INFO: Reloading vagrant VM $1"
cd $CONF_DIR/../../; vagrant reload "$1"; cd -;
else
die "ERROR: cannot reboot/reload vagrant VM $1 from inside the VM"
fi
}
#1 $vm_name
node_delete() {
if ! inside_vagrant ; then
logger "WARNING: Forcing delete of vagrant VM $1"
cd $CONF_DIR/../../; vagrant destroy -f "$1"; cd -;
else
die "ERROR: cannot destroy vagrant VM $1 from inside the VM"
fi
}
#1 $vm_name
node_stop() {
if ! inside_vagrant ; then
logger "INFO: Suspending vagrant VM $1"
cd $CONF_DIR/../../; vagrant suspend "$1"; cd -;
else
die "ERROR: cannot suspend vagrant VM $1 from inside the VM"
fi
}
#1 $vm_name
node_start() {
if ! inside_vagrant ; then
logger "INFO: Starting vagrant VM $1"
cd $CONF_DIR/../../; vagrant up "$1"; cd -;
else
die "cannot start vagrant VM $1 from inside the VM"
fi
}
#$1 vm_name
vm_get_status(){
if ! inside_vagrant ; then
echo "$(vagrant global-status |grep " $1 "|cut -d " " -f 5 )"
else
die "cannot start vagrant VM $1 from inside the VM"
fi
}
get_OK_status() {
echo "running"
}
#the default SSH host override to avoid using hosts file, we translate aloja-web to the internal IP
get_ssh_host() {
echo "$VAGRANT_HOST"
}
#overwrite for vagrant
get_repo_path(){
echo "/vagrant"
}
#vm_name must be set, override when needed ie., azure,...
get_vm_ssh_port() {
local node_ssh_port=""
local vagrant_cluster_prefix="$VAGRANT_sshPortPrefix"
#for nodes
if [ "$type" == "node" ] || [ "$vm_name" == 'aloja-web' ] ; then
local node_ssh_port="$vm_ssh_port"
#for clusters
else
for vm_id in $(seq -f '%02g' 0 "$numberOfNodes") ; do #pad the sequence with 0s
local vm_name_tmp="${clusterName}-${vm_id}"
if [ ! -z "$vm_name" ] && [ "$vm_name" == "$vm_name_tmp" ] ; then
local node_ssh_port="$vagrant_cluster_prefix${vm_id:1}"
break #just return on match
fi
done
fi
echo "$node_ssh_port"
}
#default port, override to change i.e. in Azure
get_ssh_port() {
local vm_ssh_port_tmp=""
if inside_vagrant ; then
local vm_ssh_port_tmp="22" #from inside the vagrant box
else
local vm_ssh_port_tmp="$(get_vm_ssh_port)" #default port for the vagrant vm
fi
if [ "$vm_ssh_port_tmp" ] ; then
echo "$vm_ssh_port_tmp"
else
die "cannot get SSH port for VM $vm_name"
fi
}
vagrant_link_repo(){
logger "INFO: Making sure /var/www is linked for the vagrant VM"
vm_execute "
if [ ! -d '/var/www/aloja-web' ] ; then
sudo ln -fs /vagrant /var/www
fi"
}
vagrant_link_share(){
logger "INFO: Making sure ~/share is linked in the vagrant VM"
vm_execute "
if [ ! -L '/home/vagrant/share' ] ; then
mv /home/vagrant/share /home/vagrant/share_backup 2> /dev/null
sudo ln -fs /vagrant/blobs /home/vagrant/share;
touch /home/vagrant/share/safe_store;
fi"
if [ "$type" == "cluster" ] ; then
logger "INFO: Making sure we have scratch folders for bench runs"
vm_execute "
if [ ! -d '/scratch' ] ; then
sudo mkdir -p /scratch/{local,ssd} /scratch/attached/{1..3};
sudo chown -R $userAloja: /scratch;
fi
"
fi
}
make_hosts_file() {
local hosts_file="$VAGRANT_WEB_IP\taloja-web"
#for the aloja-web to know about the cluster IPs
# TODO needs to be dynamic
if [ "$type" == "node" ] ; then
for vm_id in $(seq -f '%02g' 0 "1") ; do #pad the sequence with 0s
local vm_name_tmp="vagrant-99-${vm_id}"
local hosts_file="$hosts_file
${VAGRANT_ipAddrPrefix}${vm_id}\t${vm_name_tmp}"
done
#for clusters (from config file)
else
for vm_id in $(seq -f '%02g' 0 "$numberOfNodes") ; do #pad the sequence with 0s
local vm_name_tmp="${clusterName}-${vm_id}"
local hosts_file="$hosts_file
${VAGRANT_ipAddrPrefix}${vm_id}\t${vm_name_tmp}"
done
fi
echo -e "$hosts_file"
}
vm_final_bootstrap() {
logger "INFO: Finalizing VM $vm_name bootstrap"
#currently is run everytime it is executed
vm_update_hosts_file
vagrant_link_share
}
vm_initialize_disks() {
: #not needed
}
vm_mount_disks() {
: #not needed
}
### cluster functions
cluster_final_boostrap() {
: #not necessary for vagrant (yet)
}
# for bscaloja, has a special /public dir
get_nginx_conf(){
echo -e '
server {
listen 80 default_server;
root /var/www/aloja-web/;
index index.html index.php;
autoindex on;
location / {
index index.php;
try_files $uri $uri/ /index.php?q=$uri&$args;
autoindex on;
}
location /slides {
alias /var/www/aloja-web/presentations/aloja-web;
index template.html;
}
location /ganglia {
root /var/www/;
location ~ \.php$ {
fastcgi_pass unix:/var/run/php5-fpm.sock;
fastcgi_index index.php;
include fastcgi_params;
}
}
location ~ \.php$ {
# try_files $uri =404;
try_files $uri /index.php?c=404&q=$uri&$args;
fastcgi_pass unix:/var/run/php5-fpm.sock;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_read_timeout 600; # Set fairly high for debugging
fastcgi_intercept_errors on;
}
error_page 404 /index.php?c=404&q=$uri&$args;
include /etc/nginx/mime.types;
default_type application/octet-stream;
#keepalive_timeout ;
#avoid caches
sendfile off;
expires off;
# allow the server to close the connection after a client stops responding. Frees up socket-associated memory.
reset_timedout_connection on;
#perf optimizations
tcp_nodelay on;
gzip on;
gzip_comp_level 2;
gzip_proxied any;
gzip_types text/plain text/css text/javascript application/json application/x-javascript text/xml application/xml application/xml+rss;
gzip_disable "msie6";
}'
}
|
Aloja/aloja
|
aloja-deploy/providers/vagrant.sh
|
Shell
|
apache-2.0
| 6,859 |
#!/bin/sh
OS_LIST='sl510-x86_64 sl520-x86_64 sl530-x86_64 sl540-x86_64 sl550-x86_64 sl560-x86_64 sl570-i386 sl580-x86_64 sl590-x86_64 sl610-x86_64 sl620-x86_64 sl630-x86_64 sl640-x86_64'
DATE=`date +%Y%m%d`
# Verify current working directory
if [ ! -d cfg/sites/grif/repository ]; then
echo "ERROR: this should be run from the base dir"
exit 1
fi
# Chose versioning software
vs="echo unknown"
if [ -d .svn ]; then
vs=svn
fi
if [ -d .git ]; then
vs=git
fi
for os in $OS_LIST ; do
if [ -d cfg/os/${os}/rpms/errata ]; then
echo $os
LAST_ERRATA_FIX=`ls -1 cfg/os/$os/rpms/errata/*-fix.tpl 2>/dev/null | grep errata/[0-9]*-fix | tail -n1 | awk 'BEGIN{FS="[/.-]"}{print $7}'`
if [ -f cfg/os/${os}/rpms/errata/$LAST_ERRATA_FIX-fix.tpl ]; then
echo " Copy errata-fix from last errata ($LAST_ERRATA_FIX)"
cp cfg/os/${os}/rpms/errata/$LAST_ERRATA_FIX-fix.tpl cfg/os/${os}/rpms/errata/$DATE-fix.tpl
$vs add cfg/os/${os}/rpms/errata/$DATE-fix.tpl
# echo ' Fix errata-fix name'
sed -i -e "s/$LAST_ERRATA_FIX/$DATE/g" cfg/os/${os}/rpms/errata/$DATE-fix.tpl
fi
LAST_ERRATA_INIT=`ls -1 cfg/os/$os/config/os/errata/*-init.tpl | grep errata/[0-9]*-init | tail -n1 | awk 'BEGIN{FS="[/.-]"}{print $8}'`
if [ -f cfg/os/${os}/config/os/errata/$LAST_ERRATA_INIT-init.tpl ]; then
echo " Copy errata-init from last errata ($LAST_ERRATA_INIT)"
cp cfg/os/${os}/config/os/errata/$LAST_ERRATA_INIT-init.tpl cfg/os/${os}/config/os/errata/$DATE-init.tpl
$vs add cfg/os/${os}/config/os/errata/$DATE-init.tpl
# echo ' Fix errata-init name'
sed -i -e "s/$LAST_ERRATA_INIT/$DATE/g" cfg/os/${os}/config/os/errata/$DATE-init.tpl
fi
echo ' Create errata template'
src/utils/misc/rpmErrata.pl /www/htdocs/packages/os/$os/errata > cfg/os/$os/rpms/errata/$DATE.tpl 2> /dev/null
$vs add cfg/os/$os/rpms/errata/$DATE.tpl
# echo ' Fix template name'
sed -i -e "s/rpms\/errata/rpms\/errata\/$DATE/g" cfg/os/${os}/rpms/errata/$DATE.tpl
echo
fi
done
|
quattor/scdb
|
utils/misc/errata.sh
|
Shell
|
apache-2.0
| 2,164 |
#!/bin/bash
# missing Command
http -f POST 127.0.0.1:50000/cloud_api hello=world AcceptFormat=json
# invalid Command
http -f POST 127.0.0.1:50000/cloud_api Command=hello CloudPlatform=aliyun AcceptFormat=json
# correct Command
http -f POST 127.0.0.1:50000/cloud_api Command=fetchImageFlavor CloudPlatform=aliyun AcceptFormat=json
|
njuallen/noj
|
server/test/aliyun/test_fetch_image_flavor.sh
|
Shell
|
bsd-2-clause
| 334 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.