code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
rm -rf mysql_
# BUILD
tar -xf mariadb-10.5.2.tar.gz
mkdir ~/mysql_
cd mariadb-10.5.2/BUILD/
cmake -DCMAKE_INSTALL_PREFIX=$HOME/mysql_ ..
make -j $NUM_CPU_CORES
echo $? > ~/install-exit-status
make install
# SETUP
cd ~/mysql_
./scripts/mysql_install_db --no-defaults --user=`basename $DEBUG_REAL_HOME` --basedir=$HOME/mysql_ --ldata=$HOME/mysql_/.data
chmod -R 777 .data
cd ~
echo "#!/bin/sh
cd mysql_
./bin/mysqlslap --user=`basename $DEBUG_REAL_HOME` -pphoronix --host=localhost --verbose \$@ > \$LOG_FILE 2>&1
echo \$? > ~/test-exit-status" > mysqlslap
chmod +x mysqlslap
|
phoronix-test-suite/phoronix-test-suite
|
ob-cache/test-profiles/pts/mysqlslap-1.1.1/install.sh
|
Shell
|
gpl-3.0
| 589 |
#! /bin/sh
export SEADAS_HOME=${installer:sys.installationDir}
if [ -z "$SEADAS_HOME" ]; then
echo
echo Error: SEADAS_HOME not found in your environment.
echo Please set the SEADAS_HOME variable in your environment to match the
echo location of the SeaDAS 7.x installation
echo
exit 2
fi
. "$SEADAS_HOME/bin/detect_java.sh"
"$app_java_home/bin/java" \
-Xmx${installer:maxHeapSize} \
-Dceres.context=seadas \
"-Dseadas.mainClass=${seadas.mainClass}" \
"-Dseadas.processorClass=${seadas.processorClass}" \
"-Dseadas.home=$SEADAS_HOME" \
"-Dncsa.hdf.hdflib.HDFLibrary.hdflib=$SEADAS_HOME/modules/lib-hdf-${hdf.version}/lib/libjhdf.so" \
"-Dncsa.hdf.hdf5lib.H5.hdf5lib=$SEADAS_HOME/modules/lib-hdf-${hdf.version}/lib/libjhdf5.so" \
-jar "$SEADAS_HOME/bin/ceres-launcher.jar" "$@"
exit $?
|
marpet/seadas
|
src/main/bin/linux/seadas-cli-template.sh
|
Shell
|
gpl-3.0
| 845 |
#!/usr/bin/env bash
# This file is part of The RetroPie Project
#
# The RetroPie Project is the legal property of its developers, whose names are
# too numerous to list here. Please refer to the COPYRIGHT.md file distributed with this source.
#
# See the LICENSE.md file at the top-level directory of this distribution and
# at https://raw.githubusercontent.com/RetroPie/RetroPie-Setup/master/LICENSE.md
#
rp_module_id="advmame-0.94"
rp_module_desc="AdvanceMAME v0.94.0"
rp_module_help="ROM Extension: .zip\n\nCopy your AdvanceMAME roms to either $romdir/mame-advmame or\n$romdir/arcade"
rp_module_licence="GPL2 https://raw.githubusercontent.com/amadvance/advancemame/master/COPYING"
rp_module_section="opt"
rp_module_flags="!mali !kms"
function depends_advmame-0.94() {
local depends=(libsdl1.2-dev)
getDepends "${depends[@]}"
}
function sources_advmame-0.94() {
downloadAndExtract "$__archive_url/advancemame-0.94.0.tar.gz" "$md_build" 1
_sources_patch_advmame-1.4
}
function build_advmame-0.94() {
./configure CFLAGS="$CFLAGS -fsigned-char -fno-stack-protector" LDFLAGS="-s -lm -Wl,--no-as-needed" --prefix="$md_inst"
make clean
make
}
function install_advmame-0.94() {
make install
}
function configure_advmame-0.94() {
# move any old configuration file
if [[ -f "$md_conf_root/mame-advmame/advmame-0.94.0.rc" ]]; then
mv "$md_conf_root/mame-advmame/advmame-0.94.0.rc" "$md_conf_root/mame-advmame/advmame-0.94.rc"
fi
# remove old emulators.cfg entries
delEmulator advmame-0.94.0 mame-advmame
delEmulator advmame-0.94.0 arcade
configure_advmame
}
|
petrockblog/RetroPie-Setup
|
scriptmodules/emulators/advmame-0.94.sh
|
Shell
|
gpl-3.0
| 1,627 |
#!/bin/sh
#
# Stop Tomcat
#
# Settings
export CATALINA_HOME=/home/openwis/apache-tomcat-6.0.29
export CATALINA_PID=$CATALINA_HOME/openwis-tomcat.pid
# Stop Tomcat
cd $CATALINA_HOME/bin
./shutdown.sh -force
# Ensure PID file is removed
if [ -e $CATALINA_PID ]
then
echo "Cleaning remaining PID file"
rm $CATALINA_PID
fi
|
OpenWIS/openwis
|
openwis-metadataportal/openwis-portal/src/main/scripts/stop_openwis_tomcat.sh
|
Shell
|
gpl-3.0
| 327 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x wikiflow-init_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n wikiflow::init_0:1.0 -N ID0000001 -R condorpool -L example_workflow -T 2017-01-17T19:07:42+00:00 ./wikiflow-init_0-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 2/logs/w-08_1/20170117T190742+0000/00/00/init_0_ID0000001.sh
|
Shell
|
gpl-3.0
| 1,198 |
#!/bin/sh
# Test for this fix: 461231f022bdb3ee392622d31dc475034adceeb2.
# Ensure that seq prints exactly two numbers for a 2-number integral
# range at the limit of floating point precision.
# Copyright (C) 2008-2017 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ seq
getlimits_
# Run this test only with glibc and sizeof (long double) > sizeof (double).
# Otherwise, there are known failures.
cat <<\EOF > long.c
#include <features.h>
#if defined __GNU_LIBRARY__ && __GLIBC__ >= 2
int foo[sizeof (long double) - sizeof (double) - 1];
#else
"run this test only with glibc"
#endif
EOF
$CC -c long.c \
|| skip_ \
'this test runs only on systems with glibc and long double != double'
a=$INTMAX_MAX
b=$INTMAX_OFLOW
seq $a $b > out || fail=1
printf "$a\n$b\n" > exp || fail=1
compare exp out || fail=1
Exit $fail
|
adtools/coreutils
|
tests/misc/seq-long-double.sh
|
Shell
|
gpl-3.0
| 1,501 |
#!/usr/bin/env bash
set -e
ci_dir="$(dirname "$0")"
. "${ci_dir}/ci-common.sh"
git_download mczify
if [ "$DOWNLOAD_ONLY" ]; then exit 0; fi
( cd "${CI_BUILD_DIR}/mczify"
make
)
|
coq/coq
|
dev/ci/ci-mczify.sh
|
Shell
|
lgpl-2.1
| 184 |
#!/bin/sh
# Copyright (c) 2016 The Open Source Geospatial Foundation.
# Licensed under the GNU LGPL version >= 2.1.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License,
# or any later version. This library is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY, without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details, either
# in the "LICENSE.LGPL.txt" file distributed with this software or at
# web page "http://www.fsf.org/licenses/lgpl.html".
#
# About:
# =====
# This script will install pywps as follows
# - python-pywps debian package
# - /etc/pywps (configuration, WSGI wrapper, processes)
# - /etc/apache2/sites-available/pywps.conf
# - /usr/share/applications/pywps.desktop
#
# Requires: Apache2, Python, python-pywps
#
# Uninstall:
# ============
# sudo apt-get remove python libapache2-mod-wsgi python-pywps
#
# sudo a2disconf pywps
# sudo a2dismod wsgi
# sudo apache2ctl restart
# sudo rm -fr /usr/local/share/pywps
# sudo rm -f /etc/apache2/conf-available/pywps.conf
# sudo rm -f /usr/share/applications/pywps.desktop
# sudo rm -f /home/$USER_NAME/Desktop/pywps.desktop
./diskspace_probe.sh "`basename $0`" begin
####
# live disc's username is "user"
if [ -z "$USER_NAME" ] ; then
USER_NAME="user"
fi
USER_HOME="/home/$USER_NAME"
echo 'Installing PyWPS ...'
apt-get install --yes pywps
PYWPS_URL=http://localhost/pywps/wps.py
PYWPS_DESKTOP=/usr/share/applications/pywps.desktop
echo 'Downloading logo'
wget -c --progress=dot:mega \
-O /usr/local/share/icons/pywps.png \
"http://pywps.org/images/pywps.png"
echo 'creating desktop launcher'
cat << EOF > "$PYWPS_DESKTOP"
[Desktop Entry]
Type=Application
Encoding=UTF-8
Name=PyWPS
Comment=PyWPS
Categories=Application;Education;Geography;WPS
Exec=firefox $PYWPS_URL?service=WPS&version=1.0.0&request=GetCapabilities
Icon=/usr/local/share/icons/pywps.png
Terminal=false
StartupNotify=false
Categories=Education;Geography;
EOF
cp "$PYWPS_DESKTOP" "$USER_HOME/Desktop"
chown "$USER_NAME.$USER_NAME" "$USER_HOME/Desktop/pywps.desktop"
echo 'enabling Apache wsgi module'
a2enmod wsgi
echo 'enabling Apache configuration'
a2enconf pywps-wsgi
####
./diskspace_probe.sh "`basename $0`" end
|
guygriffiths/OSGeoLive
|
bin/install_pywps.sh
|
Shell
|
lgpl-2.1
| 2,453 |
# Run image to create container.
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
set -x
cd "${SCRIPT_DIR}/../../.."
project_dir="${PWD}"
docker start "php73" ||
docker run \
--detach \
--interactive \
--mount "type=bind,src=${project_dir},dst=/data,readonly=true" \
--name="php73" \
--tty \
"php-curl-class/php73"
|
php-curl-class/php-curl-class
|
tests/dockerfiles/php73/2_start.sh
|
Shell
|
unlicense
| 379 |
#/bin/sh
rootPassword=$1
SQLFILE=/tmp/alter_table.sql
checkPassword() {
if [ -z $rootPassword ]
then
echo "Please provide a password for mysql root"
exit 1
fi
}
prepare() {
echo "Preparing ..."
rm -f $SQLFILE
}
alter_table() {
echo "Changing engine to InnoDB. Sql is available for review in $SQLFILE"
mysql -u root -p${rootPassword} -Ne "select concat('ALTER TABLE ', table_schema, '.', table_name, ' ENGINE=INNODB;') from information_schema.tables where table_schema in ('openmrs', 'jasperserver') and table_type = 'BASE TABLE';" > $SQLFILE
mysql -u root -p${rootPassword} < $SQLFILE
}
verify_table() {
echo "Verifying ..."
mysqlcheck -uroot -p${rootPassword} openmrs
mysqlcheck -uroot -p${rootPassword} jasperserver
}
checkPassword
prepare
alter_table
verify_table
echo "Done"
|
Bhamni/utilities
|
deprecated/scripts/archived/move-to-innodb.sh
|
Shell
|
apache-2.0
| 803 |
#!/bin/bash
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# CD into build dir if possible - allows building from any directory
script_path='.'
if `readlink -f $0 &>/dev/null`; then
script_path=`readlink -f $0 2>/dev/null`
fi
script_dir=`dirname $script_path`
cd $script_dir
# Ensure gradelw exists before starting
if [[ ! -f gradlew ]]; then
echo 'gradlew file not found! Check that you are in the right directory.'
exit 9
fi
# Copy base class library from wa dist
libs_dir=app/libs
base_class=`python3 -c "import os, wa; print(os.path.join(os.path.dirname(wa.__file__), 'framework', 'uiauto', 'uiauto.aar'))"`
mkdir -p $libs_dir
cp $base_class $libs_dir
# Build and return appropriate exit code if failed
# gradle build
./gradlew clean :app:assembleDebug
exit_code=$?
if [[ $exit_code -ne 0 ]]; then
echo "ERROR: 'gradle build' exited with code $exit_code"
exit $exit_code
fi
# If successful move APK file to workload folder (overwrite previous)
package=com.arm.wa.uiauto.googlephotos
rm -f ../$package
if [[ -f app/build/outputs/apk/debug/$package.apk ]]; then
cp app/build/outputs/apk/debug/$package.apk ../$package.apk
else
echo 'ERROR: UiAutomator apk could not be found!'
exit 9
fi
|
ARM-software/workload-automation
|
wa/workloads/googlephotos/uiauto/build.sh
|
Shell
|
apache-2.0
| 1,760 |
#!/bin/bash
killall app
exit 0
|
agu-mh07/testci
|
codedeploy/ApplicationStop.sh
|
Shell
|
apache-2.0
| 31 |
#!/bin/bash
##
# @license Copyright 2020 The Lighthouse Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
##
set -euo pipefail
# Saves the necessary contents of the `latest-run/` folder to a subfolder for easier A/B comparison.
# Restoring the contents to `latest-run/` is just `cp latest-run/latest-run-bak/* latest-run/`.
DIRNAME="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
LH_ROOT="$DIRNAME/../.."
TARGET_DIR=${1:-latest-run-bak}
cd "$LH_ROOT/latest-run"
mkdir -p "$TARGET_DIR"
for file in *.json ; do
echo "Copying $file to $TARGET_DIR..."
cp "$file" "$TARGET_DIR/$file"
done
|
GoogleChrome/lighthouse
|
lighthouse-core/scripts/save-latest-run.sh
|
Shell
|
apache-2.0
| 1,100 |
#!/bin/sh
# *****************************************************************************
#
# Pentaho Data Integration
#
# Copyright (C) 2006-2019 by Hitachi Vantara : http://www.hitachivantara.com
#
# *****************************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
DIR_REL=`dirname $0`
cd $DIR_REL
DIR=`pwd`
#cd -
. "$DIR/set-pentaho-env.sh"
setPentahoEnv
# uses Java 6 classpath wildcards
# quotes required around classpath to prevent shell expansion
"$_PENTAHO_JAVA" -Xmx2048m -XX:MaxPermSize=256m -classpath "$DIR/plugins/pdi-pur-plugin/*:$DIR/lib/*" com.pentaho.di.purge.RepositoryCleanupUtil "$@"
|
lgrill-pentaho/pentaho-kettle
|
assemblies/static/src/main/resources/purge-utility.sh
|
Shell
|
apache-2.0
| 1,248 |
#!/usr/bin/env bash
set -e
# bits of this were adapted from lxc-checkconfig
# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in
possibleConfigs=(
'/proc/config.gz'
"/boot/config-$(uname -r)"
"/usr/src/linux-$(uname -r)/.config"
'/usr/src/linux/.config'
)
if [ $# -gt 0 ]; then
CONFIG="$1"
else
: ${CONFIG:="${possibleConfigs[0]}"}
fi
if ! command -v zgrep &> /dev/null; then
zgrep() {
zcat "$2" | grep "$1"
}
fi
kernelVersion="$(uname -r)"
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
is_set() {
zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null
}
is_set_in_kernel() {
zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null
}
is_set_as_module() {
zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null
}
color() {
local codes=()
if [ "$1" = 'bold' ]; then
codes=( "${codes[@]}" '1' )
shift
fi
if [ "$#" -gt 0 ]; then
local code=
case "$1" in
# see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
black) code=30 ;;
red) code=31 ;;
green) code=32 ;;
yellow) code=33 ;;
blue) code=34 ;;
magenta) code=35 ;;
cyan) code=36 ;;
white) code=37 ;;
esac
if [ "$code" ]; then
codes=( "${codes[@]}" "$code" )
fi
fi
local IFS=';'
echo -en '\033['"${codes[*]}"'m'
}
wrap_color() {
text="$1"
shift
color "$@"
echo -n "$text"
color reset
echo
}
wrap_good() {
echo "$(wrap_color "$1" white): $(wrap_color "$2" green)"
}
wrap_bad() {
echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)"
}
wrap_warning() {
wrap_color >&2 "$*" red
}
check_flag() {
if is_set_in_kernel "$1"; then
wrap_good "CONFIG_$1" 'enabled'
elif is_set_as_module "$1"; then
wrap_good "CONFIG_$1" 'enabled (as module)'
else
wrap_bad "CONFIG_$1" 'missing'
fi
}
check_flags() {
for flag in "$@"; do
echo "- $(check_flag "$flag")"
done
}
check_command() {
if command -v "$1" >/dev/null 2>&1; then
wrap_good "$1 command" 'available'
else
wrap_bad "$1 command" 'missing'
fi
}
check_device() {
if [ -c "$1" ]; then
wrap_good "$1" 'present'
else
wrap_bad "$1" 'missing'
fi
}
check_distro_userns() {
source /etc/os-release 2>/dev/null || /bin/true
if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then
# this is a CentOS7 or RHEL7 system
grep -q "user_namespace.enable=1" /proc/cmdline || {
# no user namespace support enabled
wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)"
}
fi
}
if [ ! -e "$CONFIG" ]; then
wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..."
for tryConfig in "${possibleConfigs[@]}"; do
if [ -e "$tryConfig" ]; then
CONFIG="$tryConfig"
break
fi
done
if [ ! -e "$CONFIG" ]; then
wrap_warning "error: cannot find kernel config"
wrap_warning " try running this script again, specifying the kernel config:"
wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config"
exit 1
fi
fi
wrap_color "info: reading kernel config from $CONFIG ..." white
echo
echo 'Generally Necessary:'
echo -n '- '
cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)"
cgroupDir="$(dirname "$cgroupSubsystemDir")"
if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then
echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]"
else
if [ "$cgroupSubsystemDir" ]; then
echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]"
else
echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')"
fi
echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)"
fi
if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then
echo -n '- '
if command -v apparmor_parser &> /dev/null; then
echo "$(wrap_good 'apparmor' 'enabled and tools installed')"
else
echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')"
echo -n ' '
if command -v apt-get &> /dev/null; then
echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')"
elif command -v yum &> /dev/null; then
echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')"
else
echo "$(wrap_color '(look for an "apparmor" package for your distribution)')"
fi
fi
fi
flags=(
NAMESPACES {NET,PID,IPC,UTS}_NS
DEVPTS_MULTIPLE_INSTANCES
CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG
KEYS
VETH BRIDGE BRIDGE_NETFILTER
NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE
NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK,IPVS}
NF_NAT NF_NAT_NEEDED
# required for bind-mounting /dev/mqueue into containers
POSIX_MQUEUE
)
check_flags "${flags[@]}"
echo
echo 'Optional Features:'
{
check_flags USER_NS
check_distro_userns
}
{
check_flags SECCOMP
}
{
check_flags CGROUP_PIDS
}
{
check_flags MEMCG_SWAP MEMCG_SWAP_ENABLED
if is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then
echo " $(wrap_color '(note that cgroup swap accounting is not enabled in your kernel config, you can enable it by setting boot option "swapaccount=1")' bold black)"
fi
}
if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -le 5 ]; then
check_flags MEMCG_KMEM
fi
if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 18 ]; then
check_flags RESOURCE_COUNTERS
fi
if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 13 ]; then
netprio=NETPRIO_CGROUP
else
netprio=CGROUP_NET_PRIO
fi
flags=(
BLK_CGROUP BLK_DEV_THROTTLING IOSCHED_CFQ CFQ_GROUP_IOSCHED
CGROUP_PERF
CGROUP_HUGETLB
NET_CLS_CGROUP $netprio
CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED
IP_VS
IP_VS_NFCT
IP_VS_RR
)
check_flags "${flags[@]}"
check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY
if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then
echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)"
fi
check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY
if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then
echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)"
fi
echo '- Network Drivers:'
{
echo '- "'$(wrap_color 'overlay' blue)'":'
check_flags VXLAN | sed 's/^/ /'
echo ' Optional (for secure networks):'
check_flags XFRM_ALGO XFRM_USER | sed 's/^/ /'
echo '- "'$(wrap_color 'ipvlan' blue)'":'
check_flags IPVLAN | sed 's/^/ /'
echo '- "'$(wrap_color 'macvlan' blue)'":'
check_flags MACVLAN DUMMY | sed 's/^/ /'
} | sed 's/^/ /'
echo '- Storage Drivers:'
{
echo '- "'$(wrap_color 'aufs' blue)'":'
check_flags AUFS_FS | sed 's/^/ /'
if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then
echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)"
fi
echo '- "'$(wrap_color 'btrfs' blue)'":'
check_flags BTRFS_FS | sed 's/^/ /'
echo '- "'$(wrap_color 'devicemapper' blue)'":'
check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /'
echo '- "'$(wrap_color 'overlay' blue)'":'
check_flags OVERLAY_FS | sed 's/^/ /'
echo '- "'$(wrap_color 'zfs' blue)'":'
echo " - $(check_device /dev/zfs)"
echo " - $(check_command zfs)"
echo " - $(check_command zpool)"
} | sed 's/^/ /'
echo
check_limit_over()
{
if [ $(cat "$1") -le "$2" ]; then
wrap_bad "- $1" "$(cat $1)"
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
else
wrap_good "- $1" "$(cat $1)"
fi
}
echo 'Limits:'
check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000
echo
|
medallia/docker
|
contrib/check-config.sh
|
Shell
|
apache-2.0
| 7,870 |
#!/bin/bash
source config.sh
if [ -z $db_name ] || [ -z $db_user ] || [ -z $db_password ] || [ -z $db_host ] || [ -z $db_root_pass ] || [ -z $daytona_install_dir ] || [ -z $daytona_data_dir ] || [ -z $ui_admin_pass ] || [ -z $email_user ] || [ -z $email_domain ] || [ -z $smtp_server ] || [ -z $smtp_port ]; then
echo 'one or more variables are undefined in config.sh'
echo 'Please configure config.sh'
echo 'For details that are unknown, enter some dummy values'
exit 1
fi
ip=`hostname`
echo "Updating default exechost"
echo ""
echo update HostAssociationType set default_value="'"`echo $ip`"'" where frameworkid=51 and name="'"execution"';" >> fix_exec.sql
mysql -u ${db_user} -p${db_password} ${db_name} < ./fix_exec.sql
sudo rm -rf fix_exec.sql
|
deepeshmittal/daytona
|
InstallScripts/Ubuntu/fix_sample_framework_ip.sh
|
Shell
|
apache-2.0
| 763 |
#!/usr/bin/env bash
# ******************************************************
# DESC :
# AUTHOR : Alex Stocks
# VERSION : 1.0
# LICENCE : LGPL V3
# EMAIL : [email protected]
# MOD : 2016-03-30 19:25
# FILE : client-load.sh
# ******************************************************
./bin/redis-cli -h 0.0.0.0 -p $1
|
penguin-diors/Exocet
|
redis_cluster/client/client-load.sh
|
Shell
|
apache-2.0
| 332 |
#!/bin/sh
ansible-playbook setup-ec2-scylla.yaml -e "@inventories/ec2/group_vars/all.yaml" -e "setup_name=`echo $ANSIBLE_EC2_PREFIX`" "$@"
|
cloudius-systems/cassandra-test-and-deploy
|
ec2-setup-scylla.sh
|
Shell
|
apache-2.0
| 140 |
#!/bin/bash
set -o errexit
./mvnw clean
./mvnw -Prelease release:prepare
./mvnw -Prelease release:perform
|
lightoze/jOOQ-extras-postgresql
|
release.sh
|
Shell
|
apache-2.0
| 108 |
#!/usr/bin/env bash
if [ -z "$1" ]; then
echo "Supply single argument -- version to update CSL to"
exit
fi
newVersion=$1
function updateVersion() {
sed -E -i -e "s/^(version\\:\\s+)(.+)/\\1$newVersion/" "$1"
}
for CB in $(git ls-files '*/cardano-*.cabal'); do
echo " ${CB}"
updateVersion "${CB}"
done
echo "Updated to version $newVersion"
|
input-output-hk/cardano-sl
|
scripts/haskell/update-cabal-versions.sh
|
Shell
|
apache-2.0
| 360 |
_go() {
cur="${COMP_WORDS[COMP_CWORD]}"
case "${COMP_WORDS[COMP_CWORD-1]}" in
"go")
comms="build clean doc env fix fmt generate get install list run test tool version vet"
COMPREPLY=($(compgen -W "${comms}" -- ${cur}))
;;
*)
files=$(find ${PWD} -mindepth 1 -maxdepth 1 -type f -iname "*.go" -exec basename {} \;)
dirs=$(find ${PWD} -mindepth 1 -maxdepth 1 -type d -not -name ".*" -exec basename {} \;)
repl="${files} ${dirs}"
COMPREPLY=($(compgen -W "${repl}" -- ${cur}))
;;
esac
return 0
}
complete -F _go go goapp
_aeremote()
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts="-host -port -dump -load -pretty -batch-size -debug"
if [[ ${prev} == -*load ]]; then
COMPREPLY=( $(compgen -f -- ${cur}) )
elif [[ ${prev} == -*port ]]; then
COMPREPLY=( $(compgen -W "80 443" -- ${cur}) )
else
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
fi
}
complete -F _aeremote aeremote aeremote.sh
|
filipenos/dotfiles
|
completions.sh
|
Shell
|
apache-2.0
| 1,038 |
#!/bin/bash
# Rultor release versioning script for Maven projects.
#
# It looks for the project’s version, which MUST respect the pattern
# [0-9]*\.[0-9]*\.[0-9]*-SNAPSHOT and BE THE FIRST MATCH in pom.xml
#
# What it does: updates the pom.xml version of the project according to
# the variable ${tag} provided to rultor. Specifically, it increments the
# 3rd digit and adds '-SNAPSHOT' to it.
#
# IMPORTANT:
# the given tag has to contain 3 numbers separated by dots!
#
# e.g. tag = 1.0.1 or tag = 3.2.53 will result in new versions of 1.0.2-SNAPSHOT
# or 3.2.54-SNAPSHOT
set -e
set -o pipefail
CURRENT_VERSION=$(grep -o '[0-9]*\.[0-9]*\.[0-9]*-SNAPSHOT' -m 1 pom.xml)
NUMBERS=($(echo $tag | grep -o -E '[0-9]+'))
echo "CURRENT VERSION IS"
echo $CURRENT_VERSION
NEXT_VERSION=${NUMBERS[0]}'.'${NUMBERS[1]}'.'$((${NUMBERS[2]}+1))'-SNAPSHOT'
echo "RELEASE VERSION IS"
echo $tag
echo "NEXT VERSION IS"
echo $NEXT_VERSION
#Right after the project's <version> tag there has to be the comment <!--rrv-sed-flag--> which simplifies the sed regex bellow.
#If the flag comment wouldn't be there, we'd have to write a more complicated regex to catch the artifactif from a row up.
#This is because only a regex for version tag would change all the matching version tags in the file.
sed -i "s/<version>${CURRENT_VERSION}<\/version><\!--rrv-sed-flag-->/<version>${tag}<\/version><\!--rrv-sed-flag-->/" pom.xml
mvn clean deploy -Prelease --settings /home/r/settings.xml
sed -i "s/<version>${tag}<\/version><\!--rrv-sed-flag-->/<version>${NEXT_VERSION}<\/version><\!--rrv-sed-flag-->/" pom.xml
sed -i "s/<version>.*<\/version>/<version>${tag}<\/version>/" README.md
sed -i "s/<a.*>fat<\/a>/<a href=\"https:\/\/oss\.sonatype\.org\/service\/local\/repositories\/releases\/content\/com\/amihaiemil\/ai\/eva\/${tag}\/eva-${tag}-jar-with-dependencies\.jar\">fat<\/a>/" README.md
git commit -am "${NEXT_VERSION}"
git checkout master
git merge __rultor
git checkout __rultor
|
decorators-squad/eva
|
rrv.sh
|
Shell
|
bsd-3-clause
| 1,983 |
#!/bin/bash
echo "enter project name:"
read projectName
echo "clone boilerplate into $projectName"
git clone [email protected]:ircam-jstools/es-next-prototyping-client.git "$projectName"
cd "$projectName"
echo "delete .git project"
rm -Rf .git
rm README.md
echo "npm install"
npm install
npm install --save waves-loaders
npm install --save @ircam/basic-controllers
npm install --save waves-audio
rm package-lock.json
echo "link waves-audio"
npm link waves-audio
echo "copy assets"
cp ../assets/common.css ./css/common.css
cp -R ../assets/audio ./assets/audio
mkdir js
cp ../assets/insert-code.js ./js/insert-code.js
cp ../assets/prism.js ./js/prism.js
cp ../assets/prism.css ./css/prism.css
|
wavesjs/audio
|
examples/create-example.sh
|
Shell
|
bsd-3-clause
| 698 |
#!/bin/sh
export DESTDIR=${DESTDIR-"${HOME}/prefix"}
export PATH="DESTDIR/bin:$DESTDIR/usr/bin:$PATH"
export LD_LIBRARY_PATH="$DESTDIR/usr/lib" #FIXME should be lib64 for a 64bit build
export DYLD_LIBRARY_PATH="$LD_LIBRARY_PATH" # OSX
export PYTHONPATH="../python:$DESTDIR/var/libcrange/python"
#rm python/*.pyc
cd t
for i in *.t; do echo "Testing: $i"; ./$i || exit 1; done
export range_config="
dns_data_file=::CONFIG_BASE::/etc/dns_data.tinydns
yst_ip_list=::CONFIG_BASE::/etc/yst-ip-list
yaml_path=::CONFIG_BASE::/rangedata/range.db
loadmodule ::BUILD_ROOT::/usr/lib/libcrange/sqlite
loadmodule ::BUILD_ROOT::/usr/lib/libcrange/ip
loadmodule ::BUILD_ROOT::/usr/lib/libcrange/yst-ip-list
"
#for i in *.t; do echo "Testing: $i"; ./$i || exit 1; done
|
square/libcrange
|
source/testit.sh
|
Shell
|
bsd-3-clause
| 759 |
#
# This file is part of the CernVM File System
# This script takes care of creating, removing, and maintaining repositories
# on a Stratum 0/1 server
#
# Functionality related to SSL
# This file depends on fuctions implemented in the following files:
# - cvmfs_server_sys.sh
# - cvmfs_server_util.sh
# - cvmfs_server_masterkeycard.sh
create_master_key() {
local name=$1
local user=$2
local master_pub="/etc/cvmfs/keys/$name.pub"
if masterkeycard_cert_available >/dev/null; then
masterkeycard_read_pubkey >$master_pub
else
local master_key="/etc/cvmfs/keys/$name.masterkey"
openssl genrsa -out $master_key 2048 > /dev/null 2>&1
openssl rsa -in $master_key -pubout -out $master_pub > /dev/null 2>&1
chmod 400 $master_key
chown $user $master_key
fi
chmod 444 $master_pub
chown $user $master_pub
}
create_cert() {
local name=$1
local user=$2
local key; key="/etc/cvmfs/keys/$name.key"
local csr; csr="/etc/cvmfs/keys/$name.csr"
local crt; crt="/etc/cvmfs/keys/$name.crt"
# Create self-signed certificate
local cn="$name"
if [ $(echo -n "$cn" | wc -c) -gt 30 ]; then
cn="$(echo -n "$cn" | head -c 30)[...]"
fi
cn="$cn CernVM-FS Release Managers"
openssl genrsa -out $key 2048 > /dev/null 2>&1
openssl req -new -subj "/CN=$cn" \
-key $key -out $csr > /dev/null 2>&1
openssl x509 -req -days 365 -in $csr -signkey $key -out $crt > /dev/null 2>&1
rm -f $csr
chmod 444 $crt
chmod 400 $key
chown $user $crt $key
}
create_whitelist() {
local name=$1
local user=$2
local spooler_definition=$3
local temp_dir=$4
local expire_days=$5
local rewrite_path=$6
local usemasterkeycard=0
local hash_algorithm
local whitelist
whitelist=${temp_dir}/whitelist.$name
local masterkey=/etc/cvmfs/keys/${name}.masterkey
if cvmfs_sys_file_is_regular $masterkey; then
if [ -z "$expire_days" ]; then
expire_days=30
fi
echo -n "Signing $expire_days day whitelist with master key... "
elif masterkeycard_cert_available >/dev/null; then
usemasterkeycard=1
if [ -z "$expire_days" ]; then
expire_days=7
fi
echo -n "Signing $expire_days day whitelist with masterkeycard... "
else
die "Neither masterkey nor masterkeycard is available to sign whitelist!"
fi
echo `date -u "+%Y%m%d%H%M%S"` > ${whitelist}.unsigned
echo "E`date -u --date="+$expire_days days" "+%Y%m%d%H%M%S"`" >> ${whitelist}.unsigned
echo "N$name" >> ${whitelist}.unsigned
if [ -n "$rewrite_path" ]; then
local fingerprint
fingerprint="`cat -v $rewrite_path | awk '/^N/{getline;print;exit}'`"
echo "$fingerprint" >> ${whitelist}.unsigned
hash_algorithm="`echo "$fingerprint"|sed -n 's/.*-//p'|tr '[A-Z]' '[a-z]'`"
hash_algorithm="${hash_algorithm:-sha1}"
else
hash_algorithm="${CVMFS_HASH_ALGORITHM:-sha1}"
openssl x509 -in /etc/cvmfs/keys/${name}.crt -outform der | \
__publish hash -a $hash_algorithm -f >> ${whitelist}.unsigned
fi
local hash;
hash="`cat ${whitelist}.unsigned | __publish hash -a $hash_algorithm`"
echo "--" >> ${whitelist}.unsigned
echo $hash >> ${whitelist}.unsigned
echo -n $hash > ${whitelist}.hash
if [ $usemasterkeycard -eq 1 ]; then
masterkeycard_sign ${whitelist}.hash ${whitelist}.signature
# verify the signature because it is not 100% reliable
local pubkey=/etc/cvmfs/keys/${name}.pub
if [ -f $pubkey ]; then
cp $pubkey ${whitelist}.pub
else
masterkeycard_read_pubkey >${whitelist}.pub
fi
local checkhash="`openssl rsautl -verify -inkey ${whitelist}.pub -pubin -in ${whitelist}.signature 2>/dev/null`"
rm -f ${whitelist}.pub
[ "$hash" = "$checkhash" ] || die "invalid masterkeycard signature"
else
openssl rsautl -inkey $masterkey -sign -in ${whitelist}.hash -out ${whitelist}.signature
fi
cat ${whitelist}.unsigned ${whitelist}.signature > $whitelist
chown $user $whitelist
rm -f ${whitelist}.unsigned ${whitelist}.signature ${whitelist}.hash
if [ -n "$rewrite_path" ]; then
# copy first to a new name in case the filesystem is full
cp -f $whitelist ${rewrite_path}.new
chown $user ${rewrite_path}.new
mv -f ${rewrite_path}.new ${rewrite_path}
else
__swissknife upload -i $whitelist -o .cvmfswhitelist -r $spooler_definition
fi
rm -f $whitelist
syncfs
echo "done"
}
import_keychain() {
local name=$1
local keys_location="$2"
local cvmfs_user=$3
local keys="$4"
local global_key_dir="/etc/cvmfs/keys"
mkdir -p $global_key_dir || return 1
for keyfile in $keys; do
echo -n "importing $keyfile ... "
if [ ! -f "${global_key_dir}/${keyfile}" ]; then
cp "${keys_location}/${keyfile}" $global_key_dir || return 2
fi
local key_mode=400
if echo "$keyfile" | grep -vq '.*key$\|.gw$'; then
key_mode=444
fi
chmod $key_mode "${global_key_dir}/${keyfile}" || return 3
chown $cvmfs_user "${global_key_dir}/${keyfile}" || return 4
echo "done"
done
}
|
DrDaveD/cvmfs
|
cvmfs/server/cvmfs_server_ssl.sh
|
Shell
|
bsd-3-clause
| 4,984 |
#########################################################################
# #
# OCaml #
# #
# Nicolas Pouillard, Berke Durak, projet Gallium, INRIA Rocquencourt #
# #
# Copyright 2007 Institut National de Recherche en Informatique et #
# en Automatique. All rights reserved. This file is distributed #
# under the terms of the Q Public License version 1.0. #
# #
#########################################################################
#!/bin/sh
cd `dirname $0`
set -e
set -x
CMDOTPS="" # -- command args
BUILD="$OCB -I a -I b aa.byte aa.native -no-skip -classic-display $@"
BUILD1="$BUILD $CMDOPTS"
BUILD2="$BUILD -verbose 0 -nothing-should-be-rebuilt $CMDOPTS"
rm -rf _build
$BUILD1
echo looks if libs are there
ls _build/b/libb.cma _build/b/libb.cmxa _build/b/libb.a
$BUILD2
|
ucsd-progsys/ml2
|
eval/ocaml/ocamlbuild/test/test11/test.sh
|
Shell
|
bsd-3-clause
| 1,157 |
#!/usr/bin/env bash
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# The syscall package provides access to the raw system call
# interface of the underlying operating system. Porting Go to
# a new architecture/operating system combination requires
# some manual effort, though there are tools that automate
# much of the process. The auto-generated files have names
# beginning with z.
#
# This script runs or (given -n) prints suggested commands to generate z files
# for the current system. Running those commands is not automatic.
# This script is documentation more than anything else.
#
# * asm_${GOOS}_${GOARCH}.s
#
# This hand-written assembly file implements system call dispatch.
# There are three entry points:
#
# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr);
# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
#
# The first and second are the standard ones; they differ only in
# how many arguments can be passed to the kernel.
# The third is for low-level use by the ForkExec wrapper;
# unlike the first two, it does not call into the scheduler to
# let it know that a system call is running.
#
# * syscall_${GOOS}.go
#
# This hand-written Go file implements system calls that need
# special handling and lists "//sys" comments giving prototypes
# for ones that can be auto-generated. Mksyscall reads those
# comments to generate the stubs.
#
# * syscall_${GOOS}_${GOARCH}.go
#
# Same as syscall_${GOOS}.go except that it contains code specific
# to ${GOOS} on one particular architecture.
#
# * types_${GOOS}.c
#
# This hand-written C file includes standard C headers and then
# creates typedef or enum names beginning with a dollar sign
# (use of $ in variable names is a gcc extension). The hardest
# part about preparing this file is figuring out which headers to
# include and which symbols need to be #defined to get the
# actual data structures that pass through to the kernel system calls.
# Some C libraries present alternate versions for binary compatibility
# and translate them on the way in and out of system calls, but
# there is almost always a #define that can get the real ones.
# See types_darwin.c and types_linux.c for examples.
#
# * zerror_${GOOS}_${GOARCH}.go
#
# This machine-generated file defines the system's error numbers,
# error strings, and signal numbers. The generator is "mkerrors.sh".
# Usually no arguments are needed, but mkerrors.sh will pass its
# arguments on to godefs.
#
# * zsyscall_${GOOS}_${GOARCH}.go
#
# Generated by mksyscall.pl; see syscall_${GOOS}.go above.
#
# * zsysnum_${GOOS}_${GOARCH}.go
#
# Generated by mksysnum_${GOOS}.
#
# * ztypes_${GOOS}_${GOARCH}.go
#
# Generated by godefs; see types_${GOOS}.c above.
GOOSARCH="${GOOS}_${GOARCH}"
# defaults
mksyscall="./mksyscall.pl"
mkerrors="./mkerrors.sh"
zerrors="zerrors_$GOOSARCH.go"
mksysctl=""
zsysctl="zsysctl_$GOOSARCH.go"
run="sh"
case "$1" in
-syscalls)
for i in zsyscall*go
do
sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
rm _$i
done
exit 0
;;
-n)
run="cat"
shift
esac
case "$#" in
0)
;;
*)
echo 'usage: mkall.sh [-n]' 1>&2
exit 2
esac
case "$GOOSARCH" in
_* | *_ | _)
echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
exit 1
;;
darwin_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32"
mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
darwin_amd64)
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
dragonfly_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -dragonfly"
mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
dragonfly_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -dragonfly"
mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32"
mksysnum="curl -s 'http://svn.freebsd.org/base/head/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_amd64)
mkerrors="$mkerrors -m64"
mksysnum="curl -s 'http://svn.freebsd.org/base/head/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_arm)
mkerrors="$mkerrors"
mksyscall="./mksyscall.pl -l32 -arm"
mksysnum="curl -s 'http://svn.freebsd.org/base/head/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
# Let the type of C char be singed for making the bare syscall
# API consistent across over platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
linux_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32"
mksysnum="./mksysnum_linux.pl /usr/include/asm/unistd_32.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
linux_amd64)
unistd_h=$(ls -1 /usr/include/asm/unistd_64.h /usr/include/x86_64-linux-gnu/asm/unistd_64.h 2>/dev/null | head -1)
if [ "$unistd_h" = "" ]; then
echo >&2 cannot find unistd_64.h
exit 1
fi
mkerrors="$mkerrors -m64"
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
linux_arm)
mkerrors="$mkerrors"
mksyscall="./mksyscall.pl -l32 -arm"
mksysnum="curl -s 'http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/arch/arm/include/uapi/asm/unistd.h' | ./mksysnum_linux.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -netbsd"
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -netbsd"
mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_386)
mkerrors="$mkerrors -m32"
mksyscall="./mksyscall.pl -l32 -openbsd"
mksysctl="./mksysctl_openbsd.pl"
zsysctl="zsysctl_openbsd.go"
mksysnum="curl -s 'http://www.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_amd64)
mkerrors="$mkerrors -m64"
mksyscall="./mksyscall.pl -openbsd"
mksysctl="./mksysctl_openbsd.pl"
zsysctl="zsysctl_openbsd.go"
mksysnum="curl -s 'http://www.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
plan9_386)
mkerrors=
mksyscall="./mksyscall.pl -l32 -plan9"
mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h"
mktypes="XXX"
;;
windows_386)
mksyscall="./mksyscall_windows.pl -l32"
mksysnum=
mktypes=
mkerrors="./mkerrors_windows.sh -m32"
zerrors="zerrors_windows.go"
;;
windows_amd64)
mksyscall="./mksyscall_windows.pl"
mksysnum=
mktypes=
mkerrors="./mkerrors_windows.sh -m32"
zerrors="zerrors_windows.go"
;;
*)
echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
exit 1
;;
esac
(
if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
syscall_goos="syscall_$GOOS.go"
case "$GOOS" in
darwin | dragonfly | freebsd | netbsd | openbsd)
syscall_goos="syscall_bsd.go $syscall_goos"
;;
windows)
syscall_goos="$syscall_goos security_windows.go"
;;
esac
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos syscall_$GOOSARCH.go |gofmt >zsyscall_$GOOSARCH.go"; fi
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |gofmt >ztypes_$GOOSARCH.go"; fi
) | $run
|
TomHoenderdos/go-sunos
|
src/pkg/syscall/mkall.sh
|
Shell
|
bsd-3-clause
| 8,154 |
export INSTALL_SCLS=rh-php72
export ENABLE_SCLS=rh-php72
export PHP_HTTPD_PKGS="httpd24 rh-php72-php-fpm"
export HTTPD_SERVICE_NAME="httpd24-httpd rh-php72-php-fpm"
export STATIC_DATA_DIR=/opt/rh/httpd24/root/var/www/html
|
khardix/sclo-ci-tests
|
collections/rh-php72-rh/include.sh
|
Shell
|
bsd-3-clause
| 222 |
#!/usr/bin/env bash
#
# Copyright (c) 2008-2016 the Urho3D project.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Determine source tree and build tree
if [ "$1" ] && [[ ! "$1" =~ ^- ]]; then BUILD=$1; shift; elif [ -f $(pwd)/CMakeCache.txt ]; then BUILD=$(pwd); else caller=$(ps -o args= $PPID |cut -d' ' -f2); if [[ ! "$caller" =~ cmake_.*\.sh$ ]]; then caller=$0; fi; echo "Usage: ${caller##*/} /path/to/build-tree [build-options]"; exit 1; fi
SOURCE=$(cd ${0%/*}; pwd)
if [ "$BUILD" == "." ]; then BUILD=$(pwd); fi
# Define helpers
. "$SOURCE"/.bash_helpers.sh
# Detect CMake toolchains directory if it is not provided explicitly
[ "$TOOLCHAINS" == "" ] && TOOLCHAINS="$SOURCE"/CMake/Toolchains
[ ! -d "$TOOLCHAINS" -a -d "$URHO3D_HOME"/share/Urho3D/CMake/Toolchains ] && TOOLCHAINS="$URHO3D_HOME"/share/Urho3D/CMake/Toolchains
# Default to native generator and toolchain if none is specified explicitly
IFS=#
OPTS=
for a in $@; do
case $a in
--fix-scm)
FIX_SCM=1
;;
Eclipse\ CDT4\ -\ Unix\ Makefiles)
ECLIPSE=1
;;
-DIOS=1)
IOS=1
;;
-DANDROID=1)
ANDROID=1 && OPTS="-DCMAKE_TOOLCHAIN_FILE=$TOOLCHAINS/android.toolchain.cmake"
;;
-DRPI=1)
if [[ ! $(uname -m) =~ ^arm ]]; then OPTS="-DCMAKE_TOOLCHAIN_FILE=$TOOLCHAINS/raspberrypi.toolchain.cmake"; fi
;;
-DARM=1)
if [[ ! $(uname -m) =~ ^(arm|aarch64) ]]; then OPTS="-DCMAKE_TOOLCHAIN_FILE=$TOOLCHAINS/arm-linux.toolchain.cmake"; fi
;;
-DWIN32=1)
OPTS="-DCMAKE_TOOLCHAIN_FILE=$TOOLCHAINS/mingw.toolchain.cmake"
;;
-DWEB=1)
OPTS="-DCMAKE_TOOLCHAIN_FILE=$TOOLCHAINS/emscripten.toolchain.cmake"
;;
esac
done
# Create project with the chosen CMake generator and toolchain
cmake -E make_directory "$BUILD" && cmake -E chdir "$BUILD" cmake $OPTS $@ "$SOURCE" && post_cmake
# vi: set ts=4 sw=4 expandtab:
|
gawag/Spooky-Urho-Sample
|
cmake_generic.sh
|
Shell
|
mit
| 3,032 |
#!/bin/bash
#
# provision.sh
#
# This file is specified in Vagrantfile and is loaded by Vagrant as the primary
# provisioning script whenever the commands `vagrant up`, `vagrant provision`,
# or `vagrant reload` are used. It provides all of the default packages and
# configurations included with Varying Vagrant Vagrants.
# By storing the date now, we can calculate the duration of provisioning at the
# end of this script.
start_seconds="$(date +%s)"
# Network Detection
#
# Make an HTTP request to google.com to determine if outside access is available
# to us. If 3 attempts with a timeout of 5 seconds are not successful, then we'll
# skip a few things further in provisioning rather than create a bunch of errors.
if [[ "$(wget --tries=3 --timeout=5 --spider http://google.com 2>&1 | grep 'connected')" ]]; then
echo "Network connection detected..."
ping_result="Connected"
else
echo "Network connection not detected. Unable to reach google.com..."
ping_result="Not Connected"
fi
# PACKAGE INSTALLATION
#
# Build a bash array to pass all of the packages we want to install to a single
# apt-get command. This avoids doing all the leg work each time a package is
# set to install. It also allows us to easily comment out or add single
# packages. We set the array as empty to begin with so that we can append
# individual packages to it as required.
apt_package_install_list=()
# Start with a bash array containing all packages we want to install in the
# virtual machine. We'll then loop through each of these and check individual
# status before adding them to the apt_package_install_list array.
apt_package_check_list=(
# PHP5
#
# Our base packages for php5. As long as php5-fpm and php5-cli are
# installed, there is no need to install the general php5 package, which
# can sometimes install apache as a requirement.
php5-fpm
php5-cli
# Common and dev packages for php
php5-common
php5-dev
# Extra PHP modules that we find useful
php5-memcache
php5-imagick
php5-mcrypt
php5-mysql
php5-imap
php5-curl
php-pear
php5-gd
# nginx is installed as the default web server
nginx
# memcached is made available for object caching
memcached
# mysql is the default database
mysql-server
# other packages that come in handy
imagemagick
subversion
git-core
zip
unzip
ngrep
curl
make
vim
colordiff
postfix
# ntp service to keep clock current
ntp
# Req'd for i18n tools
gettext
# Req'd for Webgrind
graphviz
# dos2unix
# Allows conversion of DOS style line endings to something we'll have less
# trouble with in Linux.
dos2unix
# nodejs for use by grunt
g++
nodejs
)
echo "Check for apt packages to install..."
# Loop through each of our packages that should be installed on the system. If
# not yet installed, it should be added to the array of packages to install.
for pkg in "${apt_package_check_list[@]}"; do
package_version="$(dpkg -s $pkg 2>&1 | grep 'Version:' | cut -d " " -f 2)"
if [[ -n "${package_version}" ]]; then
space_count="$(expr 20 - "${#pkg}")" #11
pack_space_count="$(expr 30 - "${#package_version}")"
real_space="$(expr ${space_count} + ${pack_space_count} + ${#package_version})"
printf " * $pkg %${real_space}.${#package_version}s ${package_version}\n"
else
echo " *" $pkg [not installed]
apt_package_install_list+=($pkg)
fi
done
# MySQL
#
# Use debconf-set-selections to specify the default password for the root MySQL
# account. This runs on every provision, even if MySQL has been installed. If
# MySQL is already installed, it will not affect anything.
echo mysql-server mysql-server/root_password password root | debconf-set-selections
echo mysql-server mysql-server/root_password_again password root | debconf-set-selections
# Postfix
#
# Use debconf-set-selections to specify the selections in the postfix setup. Set
# up as an 'Internet Site' with the host name 'vvv'. Note that if your current
# Internet connection does not allow communication over port 25, you will not be
# able to send mail, even with postfix installed.
echo postfix postfix/main_mailer_type select Internet Site | debconf-set-selections
echo postfix postfix/mailname string vvv | debconf-set-selections
# Disable ipv6 as some ISPs/mail servers have problems with it
echo "inet_protocols = ipv4" >> /etc/postfix/main.cf
# Provide our custom apt sources before running `apt-get update`
ln -sf /srv/config/apt-source-append.list /etc/apt/sources.list.d/vvv-sources.list
echo "Linked custom apt sources"
if [[ $ping_result == "Connected" ]]; then
# If there are any packages to be installed in the apt_package_list array,
# then we'll run `apt-get update` and then `apt-get install` to proceed.
if [[ ${#apt_package_install_list[@]} = 0 ]]; then
echo -e "No apt packages to install.\n"
else
# Before running `apt-get update`, we should add the public keys for
# the packages that we are installing from non standard sources via
# our appended apt source.list
# Retrieve the Nginx signing key from nginx.org
echo "Applying Nginx signing key..."
wget --quiet http://nginx.org/keys/nginx_signing.key -O- | apt-key add -
# Apply the nodejs assigning key
echo "Applying nodejs signing key..."
apt-key adv --quiet --keyserver hkp://keyserver.ubuntu.com:80 --recv-key C7917B12 2>&1 | grep "gpg:"
apt-key export C7917B12 | apt-key add -
# update all of the package references before installing anything
echo "Running apt-get update..."
apt-get update --assume-yes
# install required packages
echo "Installing apt-get packages..."
apt-get install --assume-yes ${apt_package_install_list[@]}
# Clean up apt caches
apt-get clean
fi
# npm
#
# Make sure we have the latest npm version and the update checker module
npm install -g npm
npm install -g npm-check-updates
# xdebug
#
# XDebug 2.2.3 is provided with the Ubuntu install by default. The PECL
# installation allows us to use a later version. Not specifying a version
# will load the latest stable.
pecl install xdebug
# ack-grep
#
# Install ack-rep directory from the version hosted at beyondgrep.com as the
# PPAs for Ubuntu Precise are not available yet.
if [[ -f /usr/bin/ack ]]; then
echo "ack-grep already installed"
else
echo "Installing ack-grep as ack"
curl -s http://beyondgrep.com/ack-2.04-single-file > /usr/bin/ack && chmod +x /usr/bin/ack
fi
# COMPOSER
#
# Install Composer if it is not yet available.
if [[ ! -n "$(composer --version --no-ansi | grep 'Composer version')" ]]; then
echo "Installing Composer..."
curl -sS https://getcomposer.org/installer | php
chmod +x composer.phar
mv composer.phar /usr/local/bin/composer
fi
# Update both Composer and any global packages. Updates to Composer are direct from
# the master branch on its GitHub repository.
if [[ -n "$(composer --version --no-ansi | grep 'Composer version')" ]]; then
echo "Updating Composer..."
COMPOSER_HOME=/usr/local/src/composer composer self-update
COMPOSER_HOME=/usr/local/src/composer composer -q global require --no-update phpunit/phpunit:4.3.*
COMPOSER_HOME=/usr/local/src/composer composer -q global require --no-update phpunit/php-invoker:1.1.*
COMPOSER_HOME=/usr/local/src/composer composer -q global require --no-update mockery/mockery:0.9.*
COMPOSER_HOME=/usr/local/src/composer composer -q global require --no-update d11wtq/boris:v1.0.8
COMPOSER_HOME=/usr/local/src/composer composer -q global config bin-dir /usr/local/bin
COMPOSER_HOME=/usr/local/src/composer composer global update
fi
# Grunt
#
# Install or Update Grunt based on current state. Updates are direct
# from NPM
if [[ "$(grunt --version)" ]]; then
echo "Updating Grunt CLI"
npm update -g grunt-cli &>/dev/null
npm update -g grunt-sass &>/dev/null
npm update -g grunt-cssjanus &>/dev/null
else
echo "Installing Grunt CLI"
npm install -g grunt-cli &>/dev/null
npm install -g grunt-sass &>/dev/null
npm install -g grunt-cssjanus &>/dev/null
fi
# Graphviz
#
# Set up a symlink between the Graphviz path defined in the default Webgrind
# config and actual path.
echo "Adding graphviz symlink for Webgrind..."
ln -sf /usr/bin/dot /usr/local/bin/dot
else
echo -e "\nNo network connection available, skipping package installation"
fi
# Configuration for nginx
if [[ ! -e /etc/nginx/server.key ]]; then
echo "Generate Nginx server private key..."
vvvgenrsa="$(openssl genrsa -out /etc/nginx/server.key 2048 2>&1)"
echo "$vvvgenrsa"
fi
if [[ ! -e /etc/nginx/server.csr ]]; then
echo "Generate Certificate Signing Request (CSR)..."
openssl req -new -batch -key /etc/nginx/server.key -out /etc/nginx/server.csr
fi
if [[ ! -e /etc/nginx/server.crt ]]; then
echo "Sign the certificate using the above private key and CSR..."
vvvsigncert="$(openssl x509 -req -days 365 -in /etc/nginx/server.csr -signkey /etc/nginx/server.key -out /etc/nginx/server.crt 2>&1)"
echo "$vvvsigncert"
fi
echo -e "\nSetup configuration files..."
# Used to to ensure proper services are started on `vagrant up`
cp /srv/config/init/vvv-start.conf /etc/init/vvv-start.conf
echo " * Copied /srv/config/init/vvv-start.conf to /etc/init/vvv-start.conf"
# Copy nginx configuration from local
cp /srv/config/nginx-config/nginx.conf /etc/nginx/nginx.conf
cp /srv/config/nginx-config/nginx-wp-common.conf /etc/nginx/nginx-wp-common.conf
if [[ ! -d /etc/nginx/custom-sites ]]; then
mkdir /etc/nginx/custom-sites/
fi
rsync -rvzh --delete /srv/config/nginx-config/sites/ /etc/nginx/custom-sites/
echo " * Copied /srv/config/nginx-config/nginx.conf to /etc/nginx/nginx.conf"
echo " * Copied /srv/config/nginx-config/nginx-wp-common.conf to /etc/nginx/nginx-wp-common.conf"
echo " * Rsync'd /srv/config/nginx-config/sites/ to /etc/nginx/custom-sites"
# Copy php-fpm configuration from local
cp /srv/config/php5-fpm-config/php5-fpm.conf /etc/php5/fpm/php5-fpm.conf
cp /srv/config/php5-fpm-config/www.conf /etc/php5/fpm/pool.d/www.conf
cp /srv/config/php5-fpm-config/php-custom.ini /etc/php5/fpm/conf.d/php-custom.ini
cp /srv/config/php5-fpm-config/opcache.ini /etc/php5/fpm/conf.d/opcache.ini
cp /srv/config/php5-fpm-config/xdebug.ini /etc/php5/mods-available/xdebug.ini
# Find the path to Xdebug and prepend it to xdebug.ini
XDEBUG_PATH=$( find /usr -name 'xdebug.so' | head -1 )
sed -i "1izend_extension=\"$XDEBUG_PATH\"" /etc/php5/mods-available/xdebug.ini
echo " * Copied /srv/config/php5-fpm-config/php5-fpm.conf to /etc/php5/fpm/php5-fpm.conf"
echo " * Copied /srv/config/php5-fpm-config/www.conf to /etc/php5/fpm/pool.d/www.conf"
echo " * Copied /srv/config/php5-fpm-config/php-custom.ini to /etc/php5/fpm/conf.d/php-custom.ini"
echo " * Copied /srv/config/php5-fpm-config/opcache.ini to /etc/php5/fpm/conf.d/opcache.ini"
echo " * Copied /srv/config/php5-fpm-config/xdebug.ini to /etc/php5/mods-available/xdebug.ini"
# Copy memcached configuration from local
cp /srv/config/memcached-config/memcached.conf /etc/memcached.conf
echo " * Copied /srv/config/memcached-config/memcached.conf to /etc/memcached.conf"
# Copy custom dotfiles and bin file for the vagrant user from local
cp /srv/config/bash_profile /home/vagrant/.bash_profile
cp /srv/config/bash_aliases /home/vagrant/.bash_aliases
cp /srv/config/vimrc /home/vagrant/.vimrc
if [[ ! -d /home/vagrant/.subversion ]]; then
mkdir /home/vagrant/.subversion
fi
cp /srv/config/subversion-servers /home/vagrant/.subversion/servers
if [[ ! -d /home/vagrant/bin ]]; then
mkdir /home/vagrant/bin
fi
rsync -rvzh --delete /srv/config/homebin/ /home/vagrant/bin/
echo " * Copied /srv/config/bash_profile to /home/vagrant/.bash_profile"
echo " * Copied /srv/config/bash_aliases to /home/vagrant/.bash_aliases"
echo " * Copied /srv/config/vimrc to /home/vagrant/.vimrc"
echo " * Copied /srv/config/subversion-servers to /home/vagrant/.subversion/servers"
echo " * rsync'd /srv/config/homebin to /home/vagrant/bin"
# If a bash_prompt file exists in the VVV config/ directory, copy to the VM.
if [[ -f /srv/config/bash_prompt ]]; then
cp /srv/config/bash_prompt /home/vagrant/.bash_prompt
echo " * Copied /srv/config/bash_prompt to /home/vagrant/.bash_prompt"
fi
# RESTART SERVICES
#
# Make sure the services we expect to be running are running.
echo -e "\nRestart services..."
service nginx restart
service memcached restart
# Disable PHP Xdebug module by default
php5dismod xdebug
# Enable PHP mcrypt module by default
php5enmod mcrypt
service php5-fpm restart
# If MySQL is installed, go through the various imports and service tasks.
exists_mysql="$(service mysql status)"
if [[ "mysql: unrecognized service" != "${exists_mysql}" ]]; then
echo -e "\nSetup MySQL configuration file links..."
# Copy mysql configuration from local
cp /srv/config/mysql-config/my.cnf /etc/mysql/my.cnf
cp /srv/config/mysql-config/root-my.cnf /home/vagrant/.my.cnf
echo " * Copied /srv/config/mysql-config/my.cnf to /etc/mysql/my.cnf"
echo " * Copied /srv/config/mysql-config/root-my.cnf to /home/vagrant/.my.cnf"
# MySQL gives us an error if we restart a non running service, which
# happens after a `vagrant halt`. Check to see if it's running before
# deciding whether to start or restart.
if [[ "mysql stop/waiting" == "${exists_mysql}" ]]; then
echo "service mysql start"
service mysql start
else
echo "service mysql restart"
service mysql restart
fi
# IMPORT SQL
#
# Create the databases (unique to system) that will be imported with
# the mysqldump files located in database/backups/
if [[ -f /srv/database/init-custom.sql ]]; then
mysql -u root -proot < /srv/database/init-custom.sql
echo -e "\nInitial custom MySQL scripting..."
else
echo -e "\nNo custom MySQL scripting found in database/init-custom.sql, skipping..."
fi
# Setup MySQL by importing an init file that creates necessary
# users and databases that our vagrant setup relies on.
mysql -u root -proot < /srv/database/init.sql
echo "Initial MySQL prep..."
# Process each mysqldump SQL file in database/backups to import
# an initial data set for MySQL.
/srv/database/import-sql.sh
else
echo -e "\nMySQL is not installed. No databases imported."
fi
# Run wp-cli, tar, and npm as `vagrant` user instead of `root`
if (( $EUID == 0 )); then
wp() { sudo -EH -u vagrant -- wp "$@"; }
tar() { sudo -EH -u vagrant -- tar "$@"; }
npm() { sudo -EH -u vagrant -- npm "$@"; }
fi
if [[ $ping_result == "Connected" ]]; then
# WP-CLI Install
if [[ ! -d /srv/www/wp-cli ]]; then
echo -e "\nDownloading wp-cli, see http://wp-cli.org"
git clone https://github.com/wp-cli/wp-cli.git /srv/www/wp-cli
cd /srv/www/wp-cli
composer install
else
echo -e "\nUpdating wp-cli..."
cd /srv/www/wp-cli
git pull --rebase origin master
composer update
fi
# Link `wp` to the `/usr/local/bin` directory
ln -sf /srv/www/wp-cli/bin/wp /usr/local/bin/wp
# Download and extract phpMemcachedAdmin to provide a dashboard view and
# admin interface to the goings on of memcached when running
if [[ ! -d /srv/www/default/memcached-admin ]]; then
echo -e "\nDownloading phpMemcachedAdmin, see https://code.google.com/p/phpmemcacheadmin/"
cd /srv/www/default
wget -q -O phpmemcachedadmin.tar.gz 'https://phpmemcacheadmin.googlecode.com/files/phpMemcachedAdmin-1.2.2-r262.tar.gz'
mkdir memcached-admin
tar -xf phpmemcachedadmin.tar.gz --directory memcached-admin
rm phpmemcachedadmin.tar.gz
else
echo "phpMemcachedAdmin already installed."
fi
# Checkout Opcache Status to provide a dashboard for viewing statistics
# about PHP's built in opcache.
if [[ ! -d /srv/www/default/opcache-status ]]; then
echo -e "\nDownloading Opcache Status, see https://github.com/rlerdorf/opcache-status/"
cd /srv/www/default
git clone https://github.com/rlerdorf/opcache-status.git opcache-status
else
echo -e "\nUpdating Opcache Status"
cd /srv/www/default/opcache-status
git pull --rebase origin master
fi
# Webgrind install (for viewing callgrind/cachegrind files produced by
# xdebug profiler)
if [[ ! -d /srv/www/default/webgrind ]]; then
echo -e "\nDownloading webgrind, see https://github.com/jokkedk/webgrind"
git clone https://github.com/jokkedk/webgrind.git /srv/www/default/webgrind
else
echo -e "\nUpdating webgrind..."
cd /srv/www/default/webgrind
git pull --rebase origin master
fi
# PHP_CodeSniffer (for running WordPress-Coding-Standards)
if [[ ! -d /srv/www/phpcs ]]; then
echo -e "\nDownloading PHP_CodeSniffer (phpcs), see https://github.com/squizlabs/PHP_CodeSniffer"
git clone -b master https://github.com/squizlabs/PHP_CodeSniffer.git /srv/www/phpcs
else
cd /srv/www/phpcs
if [[ $(git rev-parse --abbrev-ref HEAD) == 'master' ]]; then
echo -e "\nUpdating PHP_CodeSniffer (phpcs)..."
git pull --no-edit origin master
else
echo -e "\nSkipped updating PHP_CodeSniffer since not on master branch"
fi
fi
# Sniffs WordPress Coding Standards
if [[ ! -d /srv/www/phpcs/CodeSniffer/Standards/WordPress ]]; then
echo -e "\nDownloading WordPress-Coding-Standards, sniffs for PHP_CodeSniffer, see https://github.com/WordPress-Coding-Standards/WordPress-Coding-Standards"
git clone -b master https://github.com/WordPress-Coding-Standards/WordPress-Coding-Standards.git /srv/www/phpcs/CodeSniffer/Standards/WordPress
else
cd /srv/www/phpcs/CodeSniffer/Standards/WordPress
if [[ $(git rev-parse --abbrev-ref HEAD) == 'master' ]]; then
echo -e "\nUpdating PHP_CodeSniffer WordPress Coding Standards..."
git pull --no-edit origin master
else
echo -e "\nSkipped updating PHPCS WordPress Coding Standards since not on master branch"
fi
fi
# Install the standards in PHPCS
/srv/www/phpcs/scripts/phpcs --config-set installed_paths ./CodeSniffer/Standards/WordPress/
/srv/www/phpcs/scripts/phpcs -i
# Install and configure the latest stable version of WordPress
if [[ ! -d /srv/www/wordpress-default ]]; then
echo "Downloading WordPress Stable, see http://wordpress.org/"
cd /srv/www/
curl -L -O https://wordpress.org/latest.tar.gz
tar -xvf latest.tar.gz
mv wordpress wordpress-default
rm latest.tar.gz
cd /srv/www/wordpress-default
echo "Configuring WordPress Stable..."
wp core config --dbname=wordpress_default --dbuser=wp --dbpass=wp --quiet --extra-php <<PHP
// Match any requests made via xip.io.
if ( isset( \$_SERVER['HTTP_HOST'] ) && preg_match('/^(local.wordpress.)\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(.xip.io)\z/', \$_SERVER['HTTP_HOST'] ) ) {
define( 'WP_HOME', 'http://' . \$_SERVER['HTTP_HOST'] );
define( 'WP_SITEURL', 'http://' . \$_SERVER['HTTP_HOST'] );
}
define( 'WP_DEBUG', true );
PHP
echo "Installing WordPress Stable..."
wp core install --url=local.wordpress.dev --quiet --title="Local WordPress Dev" --admin_name=admin --admin_email="[email protected]" --admin_password="password"
else
echo "Updating WordPress Stable..."
cd /srv/www/wordpress-default
wp core upgrade
fi
# Test to see if an svn upgrade is needed
svn_test=$( svn status -u /srv/www/wordpress-develop/ 2>&1 );
if [[ $svn_test == *"svn upgrade"* ]]; then
# If the wordpress-develop svn repo needed an upgrade, they probably all need it
for repo in $(find /srv/www -maxdepth 5 -type d -name '.svn'); do
svn upgrade "${repo/%\.svn/}"
done
fi;
# Checkout, install and configure WordPress trunk via core.svn
if [[ ! -d /srv/www/wordpress-trunk ]]; then
echo "Checking out WordPress trunk from core.svn, see https://core.svn.wordpress.org/trunk"
svn checkout https://core.svn.wordpress.org/trunk/ /srv/www/wordpress-trunk
cd /srv/www/wordpress-trunk
echo "Configuring WordPress trunk..."
wp core config --dbname=wordpress_trunk --dbuser=wp --dbpass=wp --quiet --extra-php <<PHP
// Match any requests made via xip.io.
if ( isset( \$_SERVER['HTTP_HOST'] ) && preg_match('/^(local.wordpress-trunk.)\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(.xip.io)\z/', \$_SERVER['HTTP_HOST'] ) ) {
define( 'WP_HOME', 'http://' . \$_SERVER['HTTP_HOST'] );
define( 'WP_SITEURL', 'http://' . \$_SERVER['HTTP_HOST'] );
}
define( 'WP_DEBUG', true );
PHP
echo "Installing WordPress trunk..."
wp core install --url=local.wordpress-trunk.dev --quiet --title="Local WordPress Trunk Dev" --admin_name=admin --admin_email="[email protected]" --admin_password="password"
else
echo "Updating WordPress trunk..."
cd /srv/www/wordpress-trunk
svn up
fi
# Checkout, install and configure WordPress trunk via develop.svn
if [[ ! -d /srv/www/wordpress-develop ]]; then
echo "Checking out WordPress trunk from develop.svn, see https://develop.svn.wordpress.org/trunk"
svn checkout https://develop.svn.wordpress.org/trunk/ /srv/www/wordpress-develop
cd /srv/www/wordpress-develop/src/
echo "Configuring WordPress develop..."
wp core config --dbname=wordpress_develop --dbuser=wp --dbpass=wp --quiet --extra-php <<PHP
// Match any requests made via xip.io.
if ( isset( \$_SERVER['HTTP_HOST'] ) && preg_match('/^(src|build)(.wordpress-develop.)\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}(.xip.io)\z/', \$_SERVER['HTTP_HOST'] ) ) {
define( 'WP_HOME', 'http://' . \$_SERVER['HTTP_HOST'] );
define( 'WP_SITEURL', 'http://' . \$_SERVER['HTTP_HOST'] );
} else if ( 'build' === basename( dirname( __FILE__ ) ) ) {
// Allow (src|build).wordpress-develop.dev to share the same Database
define( 'WP_HOME', 'http://build.wordpress-develop.dev' );
define( 'WP_SITEURL', 'http://build.wordpress-develop.dev' );
}
define( 'WP_DEBUG', true );
PHP
echo "Installing WordPress develop..."
wp core install --url=src.wordpress-develop.dev --quiet --title="WordPress Develop" --admin_name=admin --admin_email="[email protected]" --admin_password="password"
cp /srv/config/wordpress-config/wp-tests-config.php /srv/www/wordpress-develop/
cd /srv/www/wordpress-develop/
echo "Running npm install for the first time, this may take several minutes..."
npm install &>/dev/null
else
echo "Updating WordPress develop..."
cd /srv/www/wordpress-develop/
if [[ -e .svn ]]; then
svn up
else
if [[ $(git rev-parse --abbrev-ref HEAD) == 'master' ]]; then
git pull --no-edit git://develop.git.wordpress.org/ master
else
echo "Skip auto git pull on develop.git.wordpress.org since not on master branch"
fi
fi
echo "Updating npm packages..."
npm install &>/dev/null
fi
if [[ ! -d /srv/www/wordpress-develop/build ]]; then
echo "Initializing grunt in WordPress develop... This may take a few moments."
cd /srv/www/wordpress-develop/
grunt
fi
# Download phpMyAdmin
if [[ ! -d /srv/www/default/database-admin ]]; then
echo "Downloading phpMyAdmin 4.2.13.1..."
cd /srv/www/default
wget -q -O phpmyadmin.tar.gz 'http://sourceforge.net/projects/phpmyadmin/files/phpMyAdmin/4.2.13.1/phpMyAdmin-4.2.13.1-all-languages.tar.gz/download'
tar -xf phpmyadmin.tar.gz
mv phpMyAdmin-4.2.13.1-all-languages database-admin
rm phpmyadmin.tar.gz
else
echo "PHPMyAdmin already installed."
fi
cp /srv/config/phpmyadmin-config/config.inc.php /srv/www/default/database-admin/
else
echo -e "\nNo network available, skipping network installations"
fi
# Find new sites to setup.
# Kill previously symlinked Nginx configs
# We can't know what sites have been removed, so we have to remove all
# the configs and add them back in again.
find /etc/nginx/custom-sites -name 'vvv-auto-*.conf' -exec rm {} \;
# Look for site setup scripts
for SITE_CONFIG_FILE in $(find /srv/www -maxdepth 5 -name 'vvv-init.sh'); do
DIR="$(dirname $SITE_CONFIG_FILE)"
(
cd "$DIR"
source vvv-init.sh
)
done
# Look for Nginx vhost files, symlink them into the custom sites dir
for SITE_CONFIG_FILE in $(find /srv/www -maxdepth 5 -name 'vvv-nginx.conf'); do
DEST_CONFIG_FILE=${SITE_CONFIG_FILE//\/srv\/www\//}
DEST_CONFIG_FILE=${DEST_CONFIG_FILE//\//\-}
DEST_CONFIG_FILE=${DEST_CONFIG_FILE/%-vvv-nginx.conf/}
DEST_CONFIG_FILE="vvv-auto-$DEST_CONFIG_FILE-$(md5sum <<< "$SITE_CONFIG_FILE" | cut -c1-32).conf"
# We allow the replacement of the {vvv_path_to_folder} token with
# whatever you want, allowing flexible placement of the site folder
# while still having an Nginx config which works.
DIR="$(dirname $SITE_CONFIG_FILE)"
sed "s#{vvv_path_to_folder}#$DIR#" "$SITE_CONFIG_FILE" > /etc/nginx/custom-sites/"$DEST_CONFIG_FILE"
done
# Parse any vvv-hosts file located in www/ or subdirectories of www/
# for domains to be added to the virtual machine's host file so that it is
# self aware.
#
# Domains should be entered on new lines.
echo "Cleaning the virtual machine's /etc/hosts file..."
sed -n '/# vvv-auto$/!p' /etc/hosts > /tmp/hosts
mv /tmp/hosts /etc/hosts
echo "Adding domains to the virtual machine's /etc/hosts file..."
find /srv/www/ -maxdepth 5 -name 'vvv-hosts' | \
while read hostfile; do
while IFS='' read -r line || [ -n "$line" ]; do
if [[ "#" != ${line:0:1} ]]; then
if [[ -z "$(grep -q "^127.0.0.1 $line$" /etc/hosts)" ]]; then
echo "127.0.0.1 $line # vvv-auto" >> /etc/hosts
echo " * Added $line from $hostfile"
fi
fi
done < "$hostfile"
done
end_seconds="$(date +%s)"
echo "-----------------------------"
echo "Provisioning complete in "$(expr $end_seconds - $start_seconds)" seconds"
if [[ $ping_result == "Connected" ]]; then
echo "External network connection established, packages up to date."
else
echo "No external network available. Package installation and maintenance skipped."
fi
echo "For further setup instructions, visit http://vvv.dev"
|
digitalmediaproduction/JAMAVagrant
|
provision/provision.sh
|
Shell
|
mit
| 25,582 |
#!/bin/sh -e
if [ -f codecs/ilbc/iLBC_define.h ]; then
echo "***"
echo "The iLBC source code appears to already be present and does not"
echo "need to be downloaded."
echo "***"
exit 1
fi
echo "***"
echo "This script will download the Global IP Solutions iLBC encoder/decoder"
echo "source code from http://ilbcfreeware.org. Use of this code requires"
echo "agreeing to the license agreement present at that site."
echo ""
echo "This script assumes that you have already agreed to the license agreement."
echo "If you have not done so, you can abort the script now."
echo "***"
read tmp
wget -P codecs/ilbc http://www.ietf.org/rfc/rfc3951.txt
wget -q -O - http://www.ilbcfreeware.org/documentation/extract-cfile.txt | tr -d '\r' > codecs/ilbc/extract-cfile.awk
(cd codecs/ilbc && awk -f extract-cfile.awk rfc3951.txt)
echo "***"
echo "The iLBC source code download is complete."
echo "***"
exit 0
|
sangoma/gsm_asterisk
|
contrib/scripts/get_ilbc_source.sh
|
Shell
|
gpl-2.0
| 926 |
#!/bin/bash
###########################################################################################
## Copyright 2003, 2015 IBM Corp ##
## ##
## Redistribution and use in source and binary forms, with or without modification, ##
## are permitted provided that the following conditions are met: ##
## 1.Redistributions of source code must retain the above copyright notice, ##
## this list of conditions and the following disclaimer. ##
## 2.Redistributions in binary form must reproduce the above copyright notice, this ##
## list of conditions and the following disclaimer in the documentation and/or ##
## other materials provided with the distribution. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND ANY EXPRESS ##
## OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ##
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ##
## THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ##
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ##
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ##
## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ##
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
############################################################################################
### File : perl-Encode-Locale.sh ##
##
### Description: This testcase tests perl-Encode-Locale package ##
##
### Author: Hariharan T.S. <[email protected]> ##
###########################################################################################
######cd $(dirname $0)
#LTPBIN=${LTPBIN%/shared}/perl_Encode_Locale
MAPPER_FILE="$LTPBIN/mapper_file"
source $LTPBIN/tc_utils.source
source $MAPPER_FILE
TESTS_DIR="${LTPBIN%/shared}/perl_Encode_Locale"
required="perl"
function tc_local_setup()
{
# check installation and environment
tc_exec_or_break $required
# install check
tc_check_package "$PERL_ENCODE_LOCALE"
tc_break_if_bad $? "$PERL_ENCODE_LOCALE not installed"
}
################################################################################
# testcase functions #
################################################################################
#
# Function: runtests
#
# Description: - test perl-Encode-Locale
#
# Parameters: - none
#
# Return - zero on success
# - return value from commands on failure
################################################################################
function runtests()
{
pushd $TESTS_DIR &>/dev/null
TESTS=`ls t/*.t`
TST_TOTAL=`echo $TESTS | wc -w`
for test in $TESTS; do
tc_register "Test $test"
perl $test >$stdout 2>$stderr
tc_pass_or_fail $? "Test $test failed"
done
popd &>/dev/null
}
##############################################
#MAIN #
##############################################
TST_TOTAL=1
tc_setup
runtests
|
rajashreer7/autotest-client-tests
|
linux-tools/perl_Encode_Locale/perl-Encode-Locale.sh
|
Shell
|
gpl-2.0
| 3,785 |
. ./build.conf
LD_LIBRARY_PATH=build:$JDK_HOME/jre/lib/i386/server ./javafuse -f -o big_writes -o auto_cache -Cbrowsefs/BrowseFS:browsefs/selenium-server-standalone-2.7.0.jar -Ffs/browsefs/browsefs.config BrowseFS_Mount/
|
leloulight/javafuse-read-only
|
browsefs_mount.sh
|
Shell
|
gpl-3.0
| 222 |
#!/bin/bash
#
#
# make use of Studio 11 or 12 compiler
#
export CC=cc
export CXX=CC
INSTALL_BASE=/opt/bacula
SBIN_DIR=$INSTALL_BASE/sbin
MAN_DIR=$INSTALL_BASE/man
SYSCONF_DIR=$INSTALL_BASE/etc
SCRIPT_DIR=$INSTALL_BASE/etc
WORKING_DIR=/var/bacula
VERSION=2.2.5
CWD=`pwd`
# Try to guess the distribution base
DISTR_BASE=`dirname \`pwd\` | sed -e 's@/platforms$@@'`
echo "Distribution base: $DISTR_BASE"
TMPINSTALLDIR=/tmp/`basename $DISTR_BASE`-build
echo "Temp install dir: $TMPINSTALLDIR"
echo "Install directory: $INSTALL_BASE"
cd $DISTR_BASE
if [ "x$1" = "xbuild" ]; then
./configure --prefix=$INSTALL_BASE \
--sbindir=$SBIN_DIR \
--sysconfdir=$SYSCONF_DIR \
--mandir=$MAN_DIR \
--with-scriptdir=$SCRIPT_DIR \
--with-working-dir=$WORKING_DIR \
--with-subsys-dir=/var/lock/subsys \
--with-pid-dir=/var/run \
--enable-smartalloc \
--enable-conio \
--enable-readline \
--enable-client-only \
--disable-ipv6
make
fi
if [ -d $TMPINSTALLDIR ]; then
rm -rf $TMPINSTALLDIR
fi
mkdir $TMPINSTALLDIR
make DESTDIR=$TMPINSTALLDIR install
# copy additional files to install-dir
#
# change conf-files that they won't be overwritten by install
#
cd $TMPINSTALLDIR/$SYSCONF_DIR
for x in *.conf; do
mv ${x} ${x}-dist
done
# cd back to my start-dir
#
cd $CWD
#cp prototype.master prototype
sed -e "s|__PKGSOURCE__|$CWD|" prototype.master > prototype
pkgproto $TMPINSTALLDIR/$INSTALL_BASE=. >> prototype
pkgmk -o -d /tmp -b $TMPINSTALLDIR/$INSTALL_BASE -f prototype
if [ $? = 0 ]; then
pkgtrans /tmp bacula-$VERSION.pkg Bacula
echo "Package has been created in /tmp"
fi
|
gagoel/bacula
|
platforms/solaris/makepkg.sh
|
Shell
|
agpl-3.0
| 1,748 |
# to declare your own command you use the keyword cmd
cmd test {
print("test")
}
# so, you can use your command like any other
test
# the commands declared can handle arguments, like any other command
# all arguments passed by the user is elements from args array
# args is a variable the is created inside command symbol table
cmd test_with_args {
print("args type: ", type(args))
print("args len: ", len(args))
for arg in args {
print("> ", arg)
}
}
# here is the test of the command with arguments
test_with_args --a arg1 --b arg2
# you can passa an array as arguments to command too
test_with_args $@{["arg1", "arg2"]}
# commands can output on stdout or stderr
# to print in stderr, just use print_err
cmd test_out {
print("stdout: ", args.join(","))
print_err("stderr: ", args.join(","))
}
# print stderr on file test_err.txt and discard stdout output
test_out 2> test_err.txt > /dev/null
# print stout on file test_out.txt and discard stdout stderr
test_out > test_out.txt 2> /dev/null
# show file test_err.txt
cat < test_err.txt
# show file test_out.txt
cat < test_out.txt
# remove files
rm test_err.txt
rm test_out.txt
# commands can be used with pipeline too
cmd test_lines {
echo first line
echo second line
echo third
echo other
}
# on this case grep will receive the std output from command test_lines
# grep will get the lines 'first line' and 'second line'
test_lines | grep line
# alias is a keyword used to give a nickname to a command with some arguments
alias grep = grep --color=auto
# now grep will show the lines with color
test_lines | grep line
cmd test_a {
echo test_a executed
exit 0
}
cmd test_b {
echo test_b executed
exit 0
}
cmd test_c {
echo test_c executed
exit 1
}
# as test_a return 0(no error) and test_b return 0(no error)
# && execute test_a and test_b
test_a && test_b
# as test_a return 0(no error) and test_b return 0(no error)
# && execute test_a only test_a, because test_b would be executed
# only if test_a had exited with error (different from 0)
test_a || test_b
# as test_c return 1(error) and test_a return 0(no error)
# && execute test_c, but it won't execute test_a
test_c && test_a
# as test_c return 1(error) and test_a return 0(no error)
# || execute test_c, and as test_c exited with error, so test_a will be
# executed
test_c || test_a
|
alexst07/seti
|
samples/language/12-cmd.sh
|
Shell
|
apache-2.0
| 2,350 |
# Copyright (c) 2003, 2004, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
# @test
# @bug 6466476
# @summary Compatibility test for the old JDK ID mapping and Olson IDs
# @build OldIDMappingTest
# @run shell OldIDMappingTest.sh
: ${TESTJAVA:=${JAVA_HOME}}
: ${TESTCLASSES:="`pwd`"}
JAVA="${TESTJAVA}/bin/java"
STATUS=0
# Expecting the new (Olson compatible) mapping (default)
for I in "" " " no No NO false False FALSE Hello
do
if [ x"$I" != x ]; then
D="-Dsun.timezone.ids.oldmapping=${I}"
fi
if ! ${JAVA} ${D} -cp ${TESTCLASSES} OldIDMappingTest -new; then
STATUS=1
fi
done
# Expecting the old mapping
for I in true True TRUE yes Yes YES
do
if [ "x$I" != x ]; then
D="-Dsun.timezone.ids.oldmapping=${I}"
fi
if ! ${JAVA} ${D} -cp ${TESTCLASSES} OldIDMappingTest -old; then
STATUS=1
fi
done
exit ${STATUS}
|
andreagenso/java2scala
|
test/J2s/java/openjdk-6-src-b27/jdk/test/java/util/TimeZone/OldIDMappingTest.sh
|
Shell
|
apache-2.0
| 1,958 |
#!/usr/bin/env bash
set -e
if [ -n "$SKIP_TESTS" ]; then
exit 0
fi
SOURCE_DIR=${SOURCE_DIR:-$( cd "$( dirname "${BASH_SOURCE[0]}" )" && dirname $( pwd ) )}
BUILD_DIR=$(pwd)
TMPDIR=${TMPDIR:-/tmp}
USER=${USER:-$(whoami)}
SUCCESS=1
VALGRIND="valgrind --leak-check=full --show-reachable=yes --error-exitcode=125 --num-callers=50 --suppressions=\"$SOURCE_DIR/libgit2_clar.supp\""
LEAKS="MallocStackLogging=1 MallocScribble=1 MallocLogFile=/dev/null CLAR_AT_EXIT=\"leaks -quiet \$PPID\""
cleanup() {
echo "Cleaning up..."
if [ ! -z "$GITDAEMON_DIR" -a -f "${GITDAEMON_DIR}/pid" ]; then
echo "Stopping git daemon..."
kill $(cat "${GITDAEMON_DIR}/pid")
fi
if [ ! -z "$SSHD_DIR" -a -f "${SSHD_DIR}/pid" ]; then
echo "Stopping SSH..."
kill $(cat "${SSHD_DIR}/pid")
fi
echo "Done."
}
failure() {
echo "Test exited with code: $1"
SUCCESS=0
}
# Ask ctest what it would run if we were to invoke it directly. This lets
# us manage the test configuration in a single place (tests/CMakeLists.txt)
# instead of running clar here as well. But it allows us to wrap our test
# harness with a leak checker like valgrind. Append the option to write
# JUnit-style XML files.
run_test() {
TEST_CMD=$(ctest -N -V -R "^${1}$" | sed -n 's/^[0-9]*: Test command: //p')
if [ -z "$TEST_CMD" ]; then
echo "Could not find tests: $1"
exit 1
fi
TEST_CMD="${TEST_CMD} -r${BUILD_DIR}/results_${1}.xml"
if [ "$LEAK_CHECK" = "valgrind" ]; then
RUNNER="$VALGRIND $TEST_CMD"
elif [ "$LEAK_CHECK" = "leaks" ]; then
RUNNER="$LEAKS $TEST_CMD"
else
RUNNER="$TEST_CMD"
fi
eval $RUNNER || failure
}
# Configure the test environment; run them early so that we're certain
# that they're started by the time we need them.
echo "##############################################################################"
echo "## Configuring test environment"
echo "##############################################################################"
if [ -z "$SKIP_GITDAEMON_TESTS" ]; then
echo "Starting git daemon..."
GITDAEMON_DIR=`mktemp -d ${TMPDIR}/gitdaemon.XXXXXXXX`
git init --bare "${GITDAEMON_DIR}/test.git"
git daemon --listen=localhost --export-all --enable=receive-pack --pid-file="${GITDAEMON_DIR}/pid" --base-path="${GITDAEMON_DIR}" "${GITDAEMON_DIR}" 2>/dev/null &
fi
if [ -z "$SKIP_PROXY_TESTS" ]; then
echo "Starting HTTP proxy..."
curl -L https://github.com/ethomson/poxyproxy/releases/download/v0.4.0/poxyproxy-0.4.0.jar >poxyproxy.jar
java -jar poxyproxy.jar -d --address 127.0.0.1 --port 8080 --credentials foo:bar --quiet &
fi
if [ -z "$SKIP_SSH_TESTS" ]; then
echo "Starting ssh daemon..."
HOME=`mktemp -d ${TMPDIR}/home.XXXXXXXX`
SSHD_DIR=`mktemp -d ${TMPDIR}/sshd.XXXXXXXX`
git init --bare "${SSHD_DIR}/test.git"
cat >"${SSHD_DIR}/sshd_config" <<-EOF
Port 2222
ListenAddress 0.0.0.0
Protocol 2
HostKey ${SSHD_DIR}/id_rsa
PidFile ${SSHD_DIR}/pid
AuthorizedKeysFile ${HOME}/.ssh/authorized_keys
LogLevel DEBUG
RSAAuthentication yes
PasswordAuthentication yes
PubkeyAuthentication yes
ChallengeResponseAuthentication no
StrictModes no
# Required here as sshd will simply close connection otherwise
UsePAM no
EOF
ssh-keygen -t rsa -f "${SSHD_DIR}/id_rsa" -N "" -q
/usr/sbin/sshd -f "${SSHD_DIR}/sshd_config" -E "${SSHD_DIR}/log"
# Set up keys
mkdir "${HOME}/.ssh"
ssh-keygen -t rsa -f "${HOME}/.ssh/id_rsa" -N "" -q
cat "${HOME}/.ssh/id_rsa.pub" >>"${HOME}/.ssh/authorized_keys"
while read algorithm key comment; do
echo "[localhost]:2222 $algorithm $key" >>"${HOME}/.ssh/known_hosts"
done <"${SSHD_DIR}/id_rsa.pub"
# Get the fingerprint for localhost and remove the colons so we can
# parse it as a hex number. Older versions have a different output
# format.
if [[ $(ssh -V 2>&1) == OpenSSH_6* ]]; then
SSH_FINGERPRINT=$(ssh-keygen -F '[localhost]:2222' -f "${HOME}/.ssh/known_hosts" -l | tail -n 1 | cut -d ' ' -f 2 | tr -d ':')
else
SSH_FINGERPRINT=$(ssh-keygen -E md5 -F '[localhost]:2222' -f "${HOME}/.ssh/known_hosts" -l | tail -n 1 | cut -d ' ' -f 3 | cut -d : -f2- | tr -d :)
fi
fi
# Run the tests that do not require network connectivity.
if [ -z "$SKIP_OFFLINE_TESTS" ]; then
echo ""
echo "##############################################################################"
echo "## Running (offline) tests"
echo "##############################################################################"
run_test offline
fi
if [ -n "$RUN_INVASIVE_TESTS" ]; then
echo ""
echo "Running invasive tests"
echo ""
export GITTEST_INVASIVE_FS_SIZE=1
export GITTEST_INVASIVE_MEMORY=1
export GITTEST_INVASIVE_SPEED=1
run_test invasive
unset GITTEST_INVASIVE_FS_SIZE
unset GITTEST_INVASIVE_MEMORY
unset GITTEST_INVASIVE_SPEED
fi
if [ -z "$SKIP_ONLINE_TESTS" ]; then
# Run the various online tests. The "online" test suite only includes the
# default online tests that do not require additional configuration. The
# "proxy" and "ssh" test suites require further setup.
echo ""
echo "##############################################################################"
echo "## Running (online) tests"
echo "##############################################################################"
run_test online
fi
if [ -z "$SKIP_GITDAEMON_TESTS" ]; then
echo ""
echo "Running gitdaemon tests"
echo ""
export GITTEST_REMOTE_URL="git://localhost/test.git"
run_test gitdaemon
unset GITTEST_REMOTE_URL
fi
if [ -z "$SKIP_PROXY_TESTS" ]; then
echo ""
echo "Running proxy tests"
echo ""
export GITTEST_REMOTE_PROXY_HOST="localhost:8080"
export GITTEST_REMOTE_PROXY_USER="foo"
export GITTEST_REMOTE_PROXY_PASS="bar"
run_test proxy
unset GITTEST_REMOTE_PROXY_HOST
unset GITTEST_REMOTE_PROXY_USER
unset GITTEST_REMOTE_PROXY_PASS
fi
if [ -z "$SKIP_SSH_TESTS" ]; then
echo ""
echo "Running ssh tests"
echo ""
export GITTEST_REMOTE_URL="ssh://localhost:2222/$SSHD_DIR/test.git"
export GITTEST_REMOTE_USER=$USER
export GITTEST_REMOTE_SSH_KEY="${HOME}/.ssh/id_rsa"
export GITTEST_REMOTE_SSH_PUBKEY="${HOME}/.ssh/id_rsa.pub"
export GITTEST_REMOTE_SSH_PASSPHRASE=""
export GITTEST_REMOTE_SSH_FINGERPRINT="${SSH_FINGERPRINT}"
run_test ssh
unset GITTEST_REMOTE_URL
unset GITTEST_REMOTE_USER
unset GITTEST_REMOTE_SSH_KEY
unset GITTEST_REMOTE_SSH_PUBKEY
unset GITTEST_REMOTE_SSH_PASSPHRASE
unset GITTEST_REMOTE_SSH_FINGERPRINT
fi
if [ -z "$SKIP_FUZZERS" ]; then
echo ""
echo "##############################################################################"
echo "## Running fuzzers"
echo "##############################################################################"
for fuzzer in fuzzers/*_fuzzer; do
"${fuzzer}" "${SOURCE_DIR}/fuzzers/corpora/$(basename "${fuzzer%_fuzzer}")" || failure
done
fi
cleanup
if [ "$SUCCESS" -ne "1" ]; then
echo "Some tests failed."
exit 1
fi
echo "Success."
exit 0
|
chigraph/chigraph
|
third_party/libgit2/ci/test.sh
|
Shell
|
apache-2.0
| 6,799 |
#!/bin/bash
# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# To set up in e.g. Eclipse, run a separate shell and pipe the output from the
# test into this script.
#
# In Eclipse, that amounts to creating a Run Configuration which starts
# "/bin/bash" with the arguments "-c [trunk_path]/out/Debug/modules_unittests
# --gtest_filter=*BweTest* | [trunk_path]/webrtc/modules/
# remote_bitrate_estimator/bwe_plot.
# bwe_plot.sh supports multiple figures (windows), the figure is specified as an
# identifier at the first argument after the PLOT command. Each figure has a
# single y axis and a dual y axis mode. If any line specifies an axis by ending
# with "#<axis number (1 or 2)>" two y axis will be used, the first will be
# assumed to represent bitrate (in kbps) and the second will be assumed to
# represent time deltas (in ms).
log=$(</dev/stdin)
function gen_gnuplot_input {
colors=(a7001f 0a60c2 b2582b 21a66c d6604d 4393c3 f4a582 92c5de edcbb7 b1c5d0)
plots=$(echo "$log" | grep "^PLOT")
figures=($(echo "$plots" | cut -f 2 | sort | uniq))
for figure in "${figures[@]}" ; do
data_sets=$(echo "$plots" | grep "^PLOT.$figure" | cut -f 3 | sort | uniq)
linetypes=($(echo "$data_sets" | grep "#" | cut -d '#' -f 2 | \
cut -d ' ' -f 1))
echo -n "reset; "
echo -n "set terminal wxt $figure size 1440,900 font \"Arial,9\"; "
echo -n "set xlabel \"Seconds\"; "
if (( "${#linetypes[@]}" > "0" )); then
echo -n "set ylabel 'bitrate (kbps)';"
echo -n "set ytics nomirror;"
echo -n "set y2label 'time delta (ms)';"
echo -n "set y2tics nomirror;"
fi
echo -n "plot "
i=0
for set in $data_sets ; do
(( i++ )) && echo -n ","
echo -n "'-' with "
echo -n "linespoints "
echo -n "ps 0.5 "
echo -n "lc rgbcolor \"#${colors[$(($i % 10))]}\" "
if (( "${#linetypes[@]}" > "0" )); then
if (( "$i" <= "${#linetypes[@]}" )); then
echo -n "axes x1y${linetypes[$i - 1]} "
else
# If no line type is specified, but line types are used, we will
# default to the bitrate axis.
echo -n "axes x1y1 "
fi
fi
echo -n "title \"$set\" "
done
echo
for set in $data_sets ; do
echo "$log" | grep "^PLOT.$figure.$set" | cut -f 4,5
echo "e"
done
done
}
gen_gnuplot_input | gnuplot -persist
|
guorendong/iridium-browser-ubuntu
|
third_party/webrtc/modules/remote_bitrate_estimator/test/bwe_plot.sh
|
Shell
|
bsd-3-clause
| 2,726 |
#!/bin/sh
uname -a
gcc --version
vips --version
# how large an image do you want to process?
# sample2.v is 290x442 pixels ... replicate this many times horizontally and
# vertically to get a highres image for the benchmark
tile=13
# how complex an operation do you want to run?
# this sets the number of copies of the benchmark we chain together:
# higher values run more slowly and are more likely to be CPU-bound
chain=1
echo building test image ...
echo "tile=$tile"
vips im_replicate sample2.v temp.v $tile $tile
if [ $? != 0 ]; then
echo "build of test image failed -- out of disc space?"
exit 1
fi
echo -n "test image is" `header -f Xsize temp.v`
echo " by" `header -f Ysize temp.v` "pixels"
echo "starting benchmark ..."
echo "chain=$chain"
for cpus in 1 2 3 4 5 6 ; do
export IM_CONCURRENCY=$cpus
echo IM_CONCURRENCY=$IM_CONCURRENCY
echo time -p vips im_benchmarkn temp.v temp2.v $chain
time -p vips im_benchmarkn temp.v temp2.v $chain
time -p vips im_benchmarkn temp.v temp2.v $chain
if [ $? != 0 ]; then
echo "benchmark failed -- install problem?"
exit 1
fi
# find pixel average ... should be the same for all IM_CONCURRENCY settings
# or we have some kind of terrible bug
echo vips im_avg temp2.v
vips im_avg temp2.v
done
|
bamos/parsec-benchmark
|
pkgs/apps/vips/src/benchmark/benchmarkn.sh
|
Shell
|
bsd-3-clause
| 1,280 |
#!/bin/bash
set -eo pipefail
ACS_ENGINE_HOME=${GOPATH}/src/github.com/Azure/acs-engine
usage() {
echo "$0 [-v version] [-p acs_patch_version]"
echo " -v <version>: version"
echo " -p <patched version>: acs_patch_version"
}
while getopts ":v:p:" opt; do
case ${opt} in
v)
version=${OPTARG}
;;
p)
acs_patch_version=${OPTARG}
;;
*)
usage
exit
;;
esac
done
if [ -z "${version}" ] || [ -z "${acs_patch_version}" ]; then
usage
exit 1
fi
if [ -z "${AZURE_STORAGE_CONNECTION_STRING}" ] || [ -z "${AZURE_STORAGE_CONTAINER_NAME}" ]; then
echo '$AZURE_STORAGE_CONNECTION_STRING and $AZURE_STORAGE_CONTAINER_NAME need to be set for upload to Azure Blob Storage.'
exit 1
fi
KUBERNETES_RELEASE=$(echo $version | cut -d'.' -f1,2)
KUBERNETES_TAG_BRANCH=v${version}
ACS_VERSION=${version}-${acs_patch_version}
ACS_BRANCH_NAME=acs-v${ACS_VERSION}
DIST_DIR=${ACS_ENGINE_HOME}/_dist/k8s-windows-v${ACS_VERSION}/k
fetch_k8s() {
git clone https://github.com/Azure/kubernetes ${GOPATH}/src/k8s.io/kubernetes || true
cd ${GOPATH}/src/k8s.io/kubernetes
git remote add upstream https://github.com/kubernetes/kubernetes || true
git fetch upstream
}
set_git_config() {
git config user.name "ACS CI"
git config user.email "[email protected]"
}
create_version_branch() {
git checkout -b ${ACS_BRANCH_NAME} ${KUBERNETES_TAG_BRANCH} || true
}
k8s_16_cherry_pick() {
# 232fa6e5bc (HEAD -> release-1.6, origin/release-1.6) Fix the delay caused by network setup in POD Infra container
# 02b1c2b9e2 Use dns policy to determine setting DNS servers on the correct NIC in Windows container
# 4c2a2d79aa Fix getting host DNS for Windows node when dnsPolicy is Default
# caa314ccdc Update docker version parsing to allow nonsemantic versions as they have changed how they do their versions
# f18be40948 Fix the issue in unqualified name where DNS client such as ping or iwr validate name in response and original question. Switch to use miekg's DNS library
# c862b583c9 Remove DNS server from NAT network adapter inside container
# f6c27f9375 Merged libCNI-on-Windows changes from CNI release 0.5.0, PRs 359 and 361
# 4f196c6cac Fix the issue that ping uses the incorrect NIC to resolve name sometimes
# 2c9fd27449 Workaround for Outbound Internet traffic in Azure Kubernetes
# 5fa0725025 Use adapter vEthernet (HNSTransparent) on Windows host network to find node IP
# 79cf9963f7 Merge pull request #51126 from chen-anders/anders/port-47991-to-release-1.6
git cherry-pick 79cf9963f7..232fa6e5bc
}
k8s_17_cherry_pick() {
# 72b9c8f519 Add start time for root container spec
# b7c4184821 Fix windows docker stats cpu units issue
# 51fab673e1 Merge pull request #3 from JiangtianLi/release-1.7
# 45ba7bb0fb Implement metrics for Windows Containers
# 76b94898ec Use dns policy to determine setting DNS servers on the correct NIC in Windows container
# 74a2f37447 Fix network config due to the split of start POD sandbox and start container from 1.7.0
# 5fc0a5e4a2 Workaround for Outbound Internet traffic in Azure Kubernetes (*) Connect a Nat Network to the container (Second adapter) (*) Modify the route so that internet traffic goes via Nat network, and POD traffic goes over the CONTAINER_NETWORK (*) Modify getContainerIP to return the IP corresponding to POD network, and ignore Nat Network (*) DNS Fix for ACS Kubernetes in Windows
# adeb88d774 Use adapter vEthernet (HNSTransparent) on Windows host network to find node IP
# 02549d6647 Merge pull request #50914 from shyamjvs/add-logging-to-logdump
git cherry-pick 02549d6647..45ba7bb0fb
git cherry-pick 51fab673e1..72b9c8f519
}
apply_acs_cherry_picks() {
if [ "${KUBERNETES_RELEASE}" == "1.6" ]; then
k8s_16_cherry_pick
elif [ "${KUBERNETES_RELEASE}" == "1.7" ]; then
k8s_17_cherry_pick
else
echo "Unable to apply cherry picks for ${KUBERNETES_RELEASE}."
exit 1
fi
}
create_dist_dir() {
mkdir -p ${DIST_DIR}
}
build_kubelet() {
echo "building kubelet.exe..."
build/run.sh make WHAT=cmd/kubelet KUBE_BUILD_PLATFORMS=windows/amd64
cp ${GOPATH}/src/k8s.io/kubernetes/_output/dockerized/bin/windows/amd64/kubelet.exe ${DIST_DIR}
}
build_kubeproxy() {
echo "building kube-proxy.exe..."
build/run.sh make WHAT=cmd/kube-proxy KUBE_BUILD_PLATFORMS=windows/amd64
cp ${GOPATH}/src/k8s.io/kubernetes/_output/dockerized/bin/windows/amd64/kube-proxy.exe ${DIST_DIR}
}
download_kubectl() {
kubectl="https://storage.googleapis.com/kubernetes-release/release/v${version}/bin/windows/amd64/kubectl.exe"
echo "dowloading ${kubectl} ..."
wget ${kubectl} -P k
curl ${kubectl} -o ${DIST_DIR}/kubectl.exe
chmod 775 ${DIST_DIR}/kubectl.exe
}
download_nssm() {
NSSM_VERSION=2.24
NSSM_URL=https://nssm.cc/release/nssm-${NSSM_VERSION}.zip
echo "downloading nssm ..."
curl ${NSSM_URL} -o /tmp/nssm-${NSSM_VERSION}.zip
unzip -q -d /tmp /tmp/nssm-${NSSM_VERSION}.zip
cp /tmp/nssm-${NSSM_VERSION}/win64/nssm.exe ${DIST_DIR}
chmod 775 ${DIST_DIR}/nssm.exe
rm -rf /tmp/nssm-${NSSM_VERSION}*
}
download_winnat() {
az storage blob download -f ${DIST_DIR}/winnat.sys -c ${AZURE_STORAGE_CONTAINER_NAME} -n winnat.sys
}
copy_dockerfile_and_pause_ps1() {
cp ${ACS_ENGINE_HOME}/windows/* ${DIST_DIR}
}
create_zip() {
cd ${DIST_DIR}/..
zip -r ../v${ACS_VERSION}intwinnat.zip k/*
cd -
}
upload_zip_to_blob_storage() {
az storage blob upload -f ${DIST_DIR}/../../v${ACS_VERSION}intwinnat.zip -c ${AZURE_STORAGE_CONTAINER_NAME} -n v${ACS_VERSION}intwinnat.zip
}
push_acs_branch() {
cd ${GOPATH}/src/k8s.io/kubernetes
git push origin ${ACS_BRANCH_NAME}
}
create_dist_dir
fetch_k8s
set_git_config
create_version_branch
apply_acs_cherry_picks
# Due to what appears to be a bug in the Kubernetes Windows build system, one
# has to first build a linux binary to generate _output/bin/deepcopy-gen.
# Building to Windows w/o doing this will generate an empty deepcopy-gen.
build/run.sh make WHAT=cmd/kubelet KUBE_BUILD_PLATFORMS=linux/amd64
build_kubelet
build_kubeproxy
download_kubectl
download_nssm
download_winnat
copy_dockerfile_and_pause_ps1
create_zip
upload_zip_to_blob_storage
push_acs_branch
|
SitoCH/acs-engine
|
scripts/build-windows-k8s.sh
|
Shell
|
mit
| 6,168 |
# Set the different path for this activity
# This is sourced by runit.sh
path=$1
activity=hexagon
plugindir=$path/../boards/.libs
pythonplugindir=$path
resourcedir=$path/resources
section="/fun"
|
keshashah/GCompris
|
src/hexagon-activity/init_path.sh
|
Shell
|
gpl-2.0
| 196 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2012-2015 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Try to create an extra tup.config in the root directory after a variant.
. ./tup.sh
check_no_windows variant
tmkdir build
tmkdir sub
cat > Tupfile << HERE
.gitignore
: foreach *.c |> gcc -c %f -o %o |> %B.o
: *.o sub/*.o |> gcc %f -o %o |> prog
HERE
cat > sub/Tupfile << HERE
.gitignore
: foreach bar.c |> gcc -c %f -o %o |> %B.o
HERE
echo "int main(void) {return 0;}" > foo.c
tup touch Tupfile foo.c build/tup.config sub/bar.c
update
check_exist build/foo.o build/sub/bar.o build/prog build/.gitignore build/sub/.gitignore
check_not_exist foo.o sub/bar.o prog
tup touch tup.config
update
check_exist build/foo.o build/sub/bar.o build/prog build/.gitignore build/sub/.gitignore
check_not_exist foo.o sub/bar.o prog
eotup
|
p2rkw/tup
|
test/t8041-extra-tup-config.sh
|
Shell
|
gpl-2.0
| 1,478 |
#!/usr/bin/env bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Creates the project file distributions for the TensorFlow Lite Micro test and
# example targets aimed at embedded platforms.
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR=${SCRIPT_DIR}/../../../../..
cd ${ROOT_DIR}
source tensorflow/lite/micro/tools/ci_build/helper_functions.sh
readable_run make -f tensorflow/lite/micro/tools/make/Makefile clean
TARGET=arduino
# TODO(b/143715361): parallel builds do not work with generated files right now.
readable_run make -f tensorflow/lite/micro/tools/make/Makefile \
TARGET=${TARGET} \
TAGS="portable_optimized" \
generate_arduino_zip
readable_run tensorflow/lite/micro/tools/ci_build/install_arduino_cli.sh
readable_run tensorflow/lite/micro/tools/ci_build/test_arduino_library.sh \
tensorflow/lite/micro/tools/make/gen/arduino_x86_64/prj/tensorflow_lite.zip
|
gunan/tensorflow
|
tensorflow/lite/micro/tools/ci_build/test_arduino.sh
|
Shell
|
apache-2.0
| 1,560 |
#!/bin/bash
lib=$(dirname $0)/lib
$lib/ucalls.py -l tcl "$@"
|
mcaleavya/bcc
|
tools/tclcalls.sh
|
Shell
|
apache-2.0
| 61 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/TesseractOCRiOS/TesseractOCR.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/TesseractOCRiOS/TesseractOCR.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
shridharmalimca/iOSDev
|
iOS/Components/OCRTest/Pods/Target Support Files/Pods-OCRTest/Pods-OCRTest-frameworks.sh
|
Shell
|
apache-2.0
| 3,729 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# PREREQUISITES: run `godep restore` in the main repo before calling this script.
RELEASE="1.4"
MAIN_REPO_FROM_SRC="${1:-"k8s.io/kubernetes"}"
MAIN_REPO="${GOPATH%:*}/src/${MAIN_REPO_FROM_SRC}"
CLIENT_REPO_FROM_SRC="${2:-"k8s.io/client-go/${RELEASE}"}"
CLIENT_REPO="${MAIN_REPO}/staging/src/${CLIENT_REPO_FROM_SRC}"
CLIENT_REPO_TEMP="${CLIENT_REPO}"/_tmp
# working in the ${CLIENT_REPO_TEMP} so 'godep save' won't complain about dirty working tree.
echo "creating the _tmp directory"
mkdir -p "${CLIENT_REPO_TEMP}"
cd "${CLIENT_REPO}"
# mkcp copies file from the main repo to the client repo, it creates the directory if it doesn't exist in the client repo.
function mkcp() {
mkdir -p "${CLIENT_REPO_TEMP}/$2" && cp -r "${MAIN_REPO}/$1" "${CLIENT_REPO_TEMP}/$2"
}
echo "copying client packages"
mkcp "pkg/client/clientset_generated/release_1_4" "pkg/client/clientset_generated"
mkcp "/pkg/client/record/" "/pkg/client"
mkcp "/pkg/client/cache/" "/pkg/client"
# TODO: make this test file not depending on pkg/client/unversioned
rm "${CLIENT_REPO_TEMP}"/pkg/client/cache/listwatch_test.go
mkcp "/pkg/client/restclient" "/pkg/client"
mkcp "/pkg/client/testing" "/pkg/client"
# remove this test because it imports the internal clientset
rm "${CLIENT_REPO_TEMP}"/pkg/client/testing/core/fake_test.go
mkcp "/pkg/client/transport" "/pkg/client"
mkcp "/pkg/client/typed" "/pkg/client"
mkcp "/pkg/client/unversioned/auth" "/pkg/client/unversioned"
mkcp "/pkg/client/unversioned/clientcmd" "/pkg/client/unversioned"
mkcp "/pkg/client/unversioned/portforward" "/pkg/client/unversioned"
# remove this test because it imports the internal clientset
rm "${CLIENT_REPO_TEMP}"/pkg/client/unversioned/portforward/portforward_test.go
pushd "${CLIENT_REPO_TEMP}"
echo "generating vendor/"
GO15VENDOREXPERIMENT=1 godep save ./...
popd
echo "move to the client repo"
# clean the ${CLIENT_REPO}
ls "${CLIENT_REPO}" | grep -v '_tmp' | xargs rm -r
mv "${CLIENT_REPO_TEMP}"/* "${CLIENT_REPO}"
rm -r "${CLIENT_REPO_TEMP}"
echo "moving vendor/k8s.io/kuberentes"
cp -rn "${CLIENT_REPO}"/vendor/k8s.io/kubernetes/. "${CLIENT_REPO}"/
rm -rf "${CLIENT_REPO}"/vendor/k8s.io/kubernetes
# client-go will share the vendor of the main repo for now. When client-go
# becomes a standalone repo, it will have its own vendor
mv "${CLIENT_REPO}"/vendor "${CLIENT_REPO}"/_vendor
# remove the pkg/util/net/sets/README.md to silent hack/verify-munge-docs.sh
# TODO: probably we should convert the README.md a doc.go
find ./ -name "README.md" -delete
echo "rewriting imports"
grep -Rl "\"${MAIN_REPO_FROM_SRC}" ./ | grep ".go" | grep -v "vendor/" | xargs sed -i "s|\"${MAIN_REPO_FROM_SRC}|\"${CLIENT_REPO_FROM_SRC}|g"
echo "converting pkg/client/record to v1"
# need a v1 version of ref.go
cp "${CLIENT_REPO}"/pkg/api/ref.go "${CLIENT_REPO}"/pkg/api/v1/ref.go
gofmt -w -r 'api.a -> v1.a' "${CLIENT_REPO}"/pkg/api/v1/ref.go
gofmt -w -r 'Scheme -> api.Scheme' "${CLIENT_REPO}"/pkg/api/v1/ref.go
# rewriting package name to v1
sed -i 's/package api/package v1/g' "${CLIENT_REPO}"/pkg/api/v1/ref.go
# ref.go refers api.Scheme, so manually import /pkg/api
sed -i "s,import (,import (\n\"${CLIENT_REPO_FROM_SRC}/pkg/api\",g" "${CLIENT_REPO}"/pkg/api/v1/ref.go
gofmt -w "${CLIENT_REPO}"/pkg/api/v1/ref.go
# rewrite pkg/client/record to v1
gofmt -w -r 'api.a -> v1.a' "${CLIENT_REPO}"/pkg/client/record
# need to call sed to rewrite the strings in test cases...
find "${CLIENT_REPO}"/pkg/client/record -type f -name "*.go" -print0 | xargs -0 sed -i "s/api.ObjectReference/v1.ObjectReference/g"
# rewrite the imports
find "${CLIENT_REPO}"/pkg/client/record -type f -name "*.go" -print0 | xargs -0 sed -i 's,pkg/api",pkg/api/v1",g'
# gofmt the changed files
echo "rewrite conflicting Prometheus registration"
sed -i "s/request_latency_microseconds/request_latency_microseconds_copy/g" "${CLIENT_REPO}"/pkg/client/metrics/metrics.go
sed -i "s/request_status_codes/request_status_codes_copy/g" "${CLIENT_REPO}"/pkg/client/metrics/metrics.go
sed -i "s/kubernetes_build_info/kubernetes_build_info_copy/g" "${CLIENT_REPO}"/pkg/version/version.go
echo "rewrite proto names in proto.RegisterType"
find "${CLIENT_REPO}" -type f -name "generated.pb.go" -print0 | xargs -0 sed -i "s/k8s\.io\.kubernetes/k8s.io.client-go.1.4/g"
echo "rearranging directory layout"
# $1 and $2 are relative to ${CLIENT_REPO}
function mvfolder {
local src=${1%/#/}
local dst=${2%/#/}
# create the parent directory of dst
if [ "${dst%/*}" != "${dst}" ]; then
mkdir -p "${CLIENT_REPO}/${dst%/*}"
fi
# move
mv "${CLIENT_REPO}/${src}" "${CLIENT_REPO}/${dst}"
# rewrite package
local src_package="${src##*/}"
local dst_package="${dst##*/}"
find "${CLIENT_REPO}" -type f -name "*.go" -print0 | xargs -0 sed -i "s,package ${src_package},package ${dst_package},g"
# rewrite imports
# the first rule is to convert import lines like `restclient "k8s.io/client-go/pkg/client/restclient"`,
# where a package alias is the same the package name.
find "${CLIENT_REPO}" -type f -name "*.go" -print0 | \
xargs -0 sed -i "s,${src_package} \"${CLIENT_REPO_FROM_SRC}/${src},${dst_package} \"${CLIENT_REPO_FROM_SRC}/${dst},g"
find "${CLIENT_REPO}" -type f -name "*.go" -print0 | \
xargs -0 sed -i "s,\"${CLIENT_REPO_FROM_SRC}/${src},\"${CLIENT_REPO_FROM_SRC}/${dst},g"
# rewrite import invocation
if [ "${src_package}" != "${dst_package}" ]; then
find "${CLIENT_REPO}" -type f -name "*.go" -print0 | xargs -0 sed -i "s,\<${src_package}\.\([a-zA-Z]\),${dst_package}\.\1,g"
fi
}
mvfolder pkg/client/clientset_generated/release_1_4 kubernetes
mvfolder pkg/client/typed/discovery discovery
mvfolder pkg/client/typed/dynamic dynamic
mvfolder pkg/client/transport transport
mvfolder pkg/client/record tools/record
mvfolder pkg/client/restclient rest
mvfolder pkg/client/cache tools/cache
mvfolder pkg/client/unversioned/auth tools/auth
mvfolder pkg/client/unversioned/clientcmd tools/clientcmd
mvfolder pkg/client/unversioned/portforward tools/portforward
mvfolder pkg/client/metrics tools/metrics
mvfolder pkg/client/testing/core testing
if [ "$(find "${CLIENT_REPO}"/pkg/client -type f -name "*.go")" ]; then
echo "${CLIENT_REPO}/pkg/client is expected to be empty"
exit 1
else
rm -r "${CLIENT_REPO}"/pkg/client
fi
mvfolder third_party pkg/third_party
mvfolder federation pkg/federation
echo "running gofmt"
find "${CLIENT_REPO}" -type f -name "*.go" -print0 | xargs -0 gofmt -w
|
jszczepkowski/kubernetes
|
staging/src/k8s.io/client-go/copy.sh
|
Shell
|
apache-2.0
| 7,157 |
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012 Vincent C. Passaro ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
#!/bin/bash
######################################################################
#By Tummy a.k.a Vincent C. Passaro #
#Vincent[.]Passaro[@]gmail[.]com #
#www.vincentpassaro.com #
######################################################################
#_____________________________________________________________________
#| Version | Change Information | Author | Date |
#|__________|_______________________|____________________|____________|
#| 1.0 | Initial Script | Vincent C. Passaro | 20-oct-2011|
#| | Creation | | |
#|__________|_______________________|____________________|____________|
#
# Updated by Vincent C. Passaro:
# Documentation update
#######################DISA INFORMATION###############################
#Group ID (Vulid): V-816
#Group Title: Audit administrative, privileged, security actions
#Rule ID: SV-27302r1_rule
#Severity: CAT II
#Rule Version (STIG-ID): GEN002760
#Rule Title: The audit system must be configured to audit all
#administrative, privileged, and security actions.
#
#Vulnerability Discussion: If the system is not configured to
#audit certain activities and write them to an audit log, it is
#more difficult to detect and track system compromises and
#damages incurred during a system compromise.
#
#
#Responsibility: System Administrator
#IAControls: ECAR-1, ECAR-2, ECAR-3
#
#Check Content:
#Check the auditing configuration of the system.
#
#Procedure:
# cat /etc/audit.rules /etc/audit/audit.rules | grep -i "auditd.conf"
#If no results are returned, or the line does not start with "-w",this is a finding.
#
# cat /etc/audit.rules /etc/audit/audit.rules | grep -i "audit.rules"
#If no results are returned, or the line does not start with "-w",this is a finding.
#
# cat /etc/audit.rules /etc/audit/audit.rules | grep -e "-a always,exit" | grep -i "adjtime"
#If the result does not contain "-S adjtime" and "-k time-change",this is a finding.
#
# cat /etc/audit.rules /etc/audit/audit.rules | grep -e "-a always,exit" | grep -i "settimeofday"
#If the result does not contain "-S settimeofday" and "-k time-change",this is a finding.
#
# cat /etc/audit.rules /etc/audit/audit.rules | grep -e "-a always,exit" | grep -i "stime"
#If the result does not contain "-S stime" and "-k time-change",this is a finding.
#
# cat /etc/audit.rules /etc/audit/audit.rules | grep -e "-a always,exit" | grep -i "clock_settime"
#If the result does not contain "-S clock_settime" and "-k time-change",this is a finding.
#
# cat /etc/audit.rules /etc/audit/audit.rules | grep -e "-a always,exit" | grep -i "sethostname"
#If the result does not contain "-S sethostname" and "-k system-locale",this is a finding.
#
# cat /etc/audit.rules /etc/audit/audit.rules | grep -e "-a always,exit" | grep -i "setdomain"
#If the result does not contain "-S setdomain" and "-k system-locale",this is a finding.
#
# cat /etc/audit.rules /etc/audit/audit.rules | grep -e "-a always,exit" | grep -i "sched_setparam"
#If the result does not contain "-S sched_setparam", this is a finding.
#
# cat /etc/audit.rules /etc/audit/audit.rules | grep -e "-a always,exit" | grep -i "sched_setscheduler"
#If the result does not contain "-S sched_setscheduler", this is a finding.
#
#
#Fix Text: The "-F arch=<ARCH>"restriction is required on dual-architecture
#systems (such as x86_64). On dual-architecture systems, two separate
#rules must exist - one for each architecture supported. Use the g
#eneric architectures “b32” and “b64” for specifying these rules.
#On single architecture systems, the "-F arch=<ARCH>"restriction
#may be omitted, but if present must match either the architecture
#of the system or its corresponding generic architecture. The
#architecture of the system may be determined by running “uname -m”.
#See the auditctl(8) manpage for additional details.
#Any restrictions (such as with “-F”) beyond those provided in the
#example rules are not in strict compliance with this requirement,
#and are a finding unless justified and documented appropriately.
#The use of audit keys consistent with the provided example is
#encouraged to provide for uniform audit logs, however omitting
#the audit key or using an alternate audit key is not a finding.
#Procedure:
#Add the following lines to the audit.rules file to enable
#auditing of administrative, privileged, and security actions:
#
#-w /etc/auditd.conf
#-w /etc/audit/auditd.conf
#-w /etc/audit.rules
#-w /etc/audit/audit.rules
#-a always,exit -F arch=<ARCH> -S adjtimex -S settimeofday -S stime -k time-change
#-a always,exit -F arch=<ARCH> -S sethostname -S setdomainname -k system-locale
#-a always,exit -F arch=<ARCH> -S clock_settime -k time-change
#
#A Real Time Operating System (RTOS) provides specialized system
#scheduling which causes an inordinate number of messages to be
#produced when the sched_setparam and set_setscheduler are audited.
#This not only may degrade the system speed to an unusable level
#but obscures any forensic information which may otherwise have been useful.
#Unless the operating system is a Red Hat 5 based RTOS (including
#MRG and AS5300) the following should also be present in /etc/audit/audit.rules
#
#-a always,exit -F arch=<ARCH> -S sched_setparam -S sched_setscheduler
#
#
#Restart the auditd service.
# service auditd restart
#######################DISA INFORMATION###############################
#AND AGAIN DISA STATEMENT IS CORRECT (THIS IS GETTING OLD)
#
#The stime flag is the same as settimeofday, which is only valid for 32bit platforms. SO! We need to remove it, but were still going to grep for it so we don't throw any false positives.
#
#Global Variables#
PDI=GEN002760
UNAME=$( uname -m )
BIT64='x86_64'
AUDITFILE='/etc/audit/audit.rules'
AUDITCOUNT641=$( grep -c -e "-a always,exit -F arch=b64 -S adjtimex -S settimeofday -S stime -k time-change" $AUDITFILE )
AUDITCOUNT642=$( grep -c -e "-a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale" $AUDITFILE )
AUDITCOUNT643=$( grep -c -e "-a always,exit -F arch=b64 -S clock_settime -k time-change" $AUDITFILE )
AUDITCOUNT644=$( grep -c -e "-a always,exit -F arch=b64 -S sched_setparam -S sched_setscheduler" $AUDITFILE )
AUDITCOUNT321=$( grep -c -e "-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change" $AUDITFILE )
AUDITCOUNT322=$( grep -c -e "-a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale" $AUDITFILE )
AUDITCOUNT323=$( grep -c -e "-a always,exit -F arch=b32 -S clock_settime -k time-change" $AUDITFILE )
AUDITCOUNT324=$( grep -c -e "-a always,exit -F arch=b32 -S sched_setparam -S sched_setscheduler" $AUDITFILE )
AUDITCOUNTNOARCH1=$( grep -c -e "-w /etc/auditd.conf" $AUDITFILE )
AUDITCOUNTNOARCH2=$( grep -c -e "-w /etc/audit/auditd.conf" $AUDITFILE )
AUDITCOUNTNOARCH3=$( grep -c -e "-w /etc/audit.rules" $AUDITFILE )
AUDITCOUNTNOARCH4=$( grep -c -e "-w /etc/audit/audit.rules" $AUDITFILE )
#Start-Lockdown
#
#
#THIS BEGINS THE SECTION THAT IS ISN'T ARCH SPECIFIC
#
#
if [ $AUDITCOUNTNOARCH1 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-w /etc/auditd.conf" >> $AUDITFILE
service auditd restart
fi
if [ $AUDITCOUNTNOARCH2 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-w /etc/audit/auditd.conf" >> $AUDITFILE
service auditd restart
fi
if [ $AUDITCOUNTNOARCH3 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-w /etc/audit.rules" >> $AUDITFILE
service auditd restart
fi
if [ $AUDITCOUNTNOARCH4 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-w /etc/audit/audit.rules" >> $AUDITFILE
service auditd restart
fi
#END NON-ARCH SPECIFIC
#
#
#THIS BEGINS THE SECTION THAT IS ARCH SPECIFIC
#
#
#THIS SECTION IS FOR THE 64BIT
if [ $UNAME == $BIT64 ]
then
if [ $AUDITCOUNT641 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-a always,exit -F arch=b64 -S adjtimex -S settimeofday -k time-change" >> $AUDITFILE
service auditd restart
fi
if [ $AUDITCOUNT642 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale" >> $AUDITFILE
service auditd restart
fi
if [ $AUDITCOUNT643 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-a always,exit -F arch=b64 -S clock_settime -k time-change" >> $AUDITFILE
service auditd restart
fi
if [ $AUDITCOUNT644 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-a always,exit -F arch=b64 -S sched_setparam -S sched_setscheduler" >> $AUDITFILE
service auditd restart
fi
#THIS SECTION IS FOR THE 64BIT WITH 32BIT FLAG SET FOR AUDITING
if [ $AUDITCOUNT321 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change" >> $AUDITFILE
service auditd restart
fi
if [ $AUDITCOUNT322 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale" >> $AUDITFILE
service auditd restart
fi
if [ $AUDITCOUNT323 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-a always,exit -F arch=b32 -S clock_settime -k time-change" >> $AUDITFILE
service auditd restart
fi
if [ $AUDITCOUNT324 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-a always,exit -F arch=b32 -S sched_setparam -S sched_setscheduler" >> $AUDITFILE
service auditd restart
fi
else
#THIS IS THE SECTION FOR 32BIT ONLY!
if [ $AUDITCOUNT321 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change" >> $AUDITFILE
service auditd restart
fi
if [ $AUDITCOUNT322 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale" >> $AUDITFILE
service auditd restart
fi
if [ $AUDITCOUNT323 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-a always,exit -F arch=b32 -S clock_settime -k time-change" >> $AUDITFILE
service auditd restart
fi
if [ $AUDITCOUNT324 -eq 0 ]
then
echo " " >> $AUDITFILE
echo "#############GEN002760#############" >> $AUDITFILE
echo "-a always,exit -F arch=b32 -S sched_setparam -S sched_setscheduler" >> $AUDITFILE
service auditd restart
fi
fi
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5-beta/prod/GEN002760.sh
|
Shell
|
apache-2.0
| 12,044 |
#!/bin/sh
set -e
HPTOOL=hptool/dist/build/hptool/hptool
HPTOOL_ALT=hptool/.cabal-sandbox/bin/hptool
if ( cabal sandbox --help >/dev/null 2>&1 ) ; then
if [ \! -d hptool/.cabal-sandbox ]
then
echo '***'
echo '*** Setting up sandbox for hptool'
echo '***'
cabal update
(cd hptool; cabal sandbox init; cabal install --only-dependencies)
fi
else
if ( cabal install --dry-run --only-dependencies | grep -q 'would be installed' ) ; then
echo '=== pre-requisite packages for hptool are not installed'
echo ' run the following:'
echo ' cd hptool ; cabal install --only-dependencies'
exit 1
fi
fi
echo '***'
echo '*** Building hptool'
echo '***'
(cd hptool; cabal build)
if [ "$HPTOOL_ALT" -nt "$HPTOOL" ] ; then
HPTOOL="$HPTOOL_ALT"
fi
echo '***'
echo '*** Running hptool'
echo '***'
exec $HPTOOL "$@"
|
bgamari/haskell-platform
|
platform.sh
|
Shell
|
bsd-3-clause
| 902 |
export ARCH=arm
export CROSS_COMPILE=/opt/toolchains/arm-eabi-4.4.3/bin/arm-eabi-
export USE_SEC_FIPS_MODE=true
make u1_defconfig
make
|
mozilla-b2g/kernel-android-galaxy-s2-ics
|
build_kernel.sh
|
Shell
|
gpl-2.0
| 135 |
#!/bin/sh
# 4.1.1 and 4.1.2 had a bug whereby some recursive listings
# didn't include a blank line between per-directory groups of files.
# Copyright (C) 2001-2014 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ ls
mkdir x y a b c a/1 a/2 a/3 || framework_failure_
touch f a/1/I a/1/II || framework_failure_
# This first example is from Andreas Schwab's bug report.
ls -R1 a b c > out || fail=1
cat <<EOF > exp
a:
1
2
3
a/1:
I
II
a/2:
a/3:
b:
c:
EOF
compare exp out || fail=1
rm -rf out exp
ls -R1 x y f > out || fail=1
cat <<EOF > exp
f
x:
y:
EOF
compare exp out || fail=1
Exit $fail
|
shichao-an/coreutils
|
tests/ls/recursive.sh
|
Shell
|
gpl-3.0
| 1,280 |
#!/bin/bash
# Copyright (c) 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Linux Build Script.
# Fail on any error.
set -e
# Display commands being run.
set -x
SCRIPT_DIR=`dirname "$BASH_SOURCE"`
source $SCRIPT_DIR/../scripts/linux/build.sh RELEASE gcc
|
endlessm/chromium-browser
|
third_party/swiftshader/third_party/SPIRV-Tools/kokoro/linux-gcc-release/build.sh
|
Shell
|
bsd-3-clause
| 775 |
#!/bin/bash
# Please note that this file is here only to help with debugging.
# In a working environment there is no need to create two new files per every
# email recieved. If done with debugging please use
# og_mailinglist_postfix_transport.php directly.
# Write message to file
cat >/tmp/message.$$
# Read message from file, pass it to PHP and write output to log file.
(cat /tmp/message.$$ | \
/etc/exim4/exim_og_mailinglist/og_mailinglist_exim4_transport.php $1 > \
/tmp/message.$$.log 2>&1 )
# Show log file for easier debugging.
cat /tmp/message.$$.log
|
markbannister/dev1collaboration
|
sites/all/modules/og_mailinglist/backends/exim_og_mailinglist/og_mailinglist_debug.sh
|
Shell
|
gpl-2.0
| 569 |
# Cocoa Functions
find /System/Library/Frameworks/{AppKit,Foundation}.framework -name \*.h -exec grep '^[A-Z][A-Z_]* [^;]* \**NS[A-Z][A-Za-z]* *(' '{}' \;|perl -pe 's/.*?\s\*?(NS\w+)\s*\(.*/$1/'|sort|uniq|./list_to_regexp.rb >/tmp/functions.txt
# Cocoa Protocols Classes
{ find /System/Library/Frameworks/{AppKit,Foundation}.framework -name \*.h -exec grep '@interface NS[A-Za-z]*' '{}' \;|perl -pe 's/.*?(NS[A-Za-z]+).*/$1/';
find /System/Library/Frameworks/{AppKit,Foundation}.framework -name \*.h -exec grep '@protocol NS[A-Za-z]*' '{}' \;|perl -pe 's/.*?(NS[A-Za-z]+).*/$1/';
}|sort|uniq|./list_to_regexp.rb >/tmp/classes.txt
# Cocoa Types
find /System/Library/Frameworks/{AppKit,Foundation}.framework -name \*.h -exec grep 'typedef .* _*NS[A-Za-z]*' '{}' \;|perl -pe 's/.*?(NS[A-Za-z]+);.*/$1/'|perl -pe 's/typedef .*? _?(NS[A-Za-z0-9]+) \{.*/$1/'|grep -v typedef|sort|uniq|./list_to_regexp.rb >/tmp/types.txt
# Cocoa Constants
find /System/Library/Frameworks/{AppKit,Foundation}.framework -name \*.h -exec awk '/\}/ { pr = 0; } { if(pr) print $0; } /^(typedef )?enum .*\{[^}]*$/ { pr = 1; }' '{}' \;|expand|grep '^ *NS[A-Z]'|perl -pe 's/^\s*(NS[A-Z][A-Za-z0-9_]*).*/$1/'|sort|uniq|./list_to_regexp.rb >/tmp/constants.txt
# Cocoa Notifications
find /System/Library/Frameworks/{AppKit,Foundation}.framework -name \*.h -exec grep '\*NS.*Notification' '{}' \;|perl -pe 's/.*?(NS[A-Za-z]+Notification).*/$1/'|sort|uniq|./list_to_regexp.rb >/tmp/notifications.txt
|
zyqhi/zemacofig
|
vendor/yasnippet/yasmate/bundles/objc-tmbundle/Support/Collect Cocoa Definitions.sh
|
Shell
|
gpl-2.0
| 1,470 |
#!/bin/bash
set -eo pipefail
dir="$(dirname "$(readlink -f "$BASH_SOURCE")")"
image="$1"
dbImage='mysql:5.7'
# ensure the dbImage is ready and available
if ! docker image inspect "$dbImage" &> /dev/null; then
docker pull "$dbImage" > /dev/null
fi
# Create an instance of the container-under-test
mysqlCid="$(docker run -d \
-e MYSQL_RANDOM_ROOT_PASSWORD=true \
-e MYSQL_DATABASE=monica \
-e MYSQL_USER=homestead \
-e MYSQL_PASSWORD=secret \
"$dbImage")"
trap "docker rm -vf $mysqlCid > /dev/null" EXIT
cid="$(docker run -d \
--link "$mysqlCid":mysql \
-e DB_HOST=mysql \
"$image")"
trap "docker rm -vf $cid $mysqlCid > /dev/null" EXIT
_artisan() {
docker exec "$cid" php artisan "$@"
}
# returns success when all database migrations are finished
_migrate_done() {
local status
status="$(_artisan migrate:status)"
if grep -q ' Yes ' <<<"$status" && ! grep -q ' No ' <<<"$status"; then
return 0
fi
return 1
}
# check artisan command for specific output; print and error when not found
_artisan_test() {
local match="$1"; shift
output="$(_artisan "$@")"
if ! grep -iq "$match" <<<"$output"; then
echo "Match: '$match' not found in: $output"
return 1
fi
}
# Give some time to install
. "$dir/../../retry.sh" --tries 30 '_migrate_done'
# Check if installation is complete
_artisan monica:getversion > /dev/null
. "$dir/../../retry.sh" --tries 5 -- _artisan_test 'No scheduled commands are ready to run.' schedule:run
|
neo-technology/docker-official-images
|
test/tests/monica-cli/run.sh
|
Shell
|
apache-2.0
| 1,447 |
install_service() {
mkdir /test-service
mv /app.jar /test-service/spring-boot-app.jar
chmod +x /test-service/spring-boot-app.jar
ln -s /test-service/spring-boot-app.jar /etc/init.d/spring-boot-app
}
install_double_link_service() {
mkdir /test-service
mv /app.jar /test-service/
chmod +x /test-service/app.jar
ln -s /test-service/app.jar /test-service/spring-boot-app.jar
ln -s /test-service/spring-boot-app.jar /etc/init.d/spring-boot-app
}
start_service() {
service spring-boot-app start $@
}
restart_service() {
service spring-boot-app restart
}
status_service() {
service spring-boot-app status
}
stop_service() {
service spring-boot-app stop
}
force_stop_service() {
service spring-boot-app force-stop
}
await_app() {
if [ -z $1 ]
then
url=http://127.0.0.1:8080
else
url=$1
fi
end=$(date +%s)
let "end+=600"
until curl -s $url > /dev/null
do
now=$(date +%s)
if [[ $now -ge $end ]]; then
break
fi
sleep 1
done
}
|
royclarkson/spring-boot
|
spring-boot-tests/spring-boot-integration-tests/spring-boot-launch-script-tests/src/intTest/resources/scripts/test-functions.sh
|
Shell
|
apache-2.0
| 998 |
# store all NETFS* variables
# I don't know why it does not work with the full declare -- var=value syntax
# found out by experiment that I need to remove the declare -- stuff.
declare -p ${!NETFS*} | sed -e 's/declare .. //' >>$ROOTFS_DIR/etc/rear/rescue.conf
|
krissi/rear
|
usr/share/rear/rescue/NETFS/default/60_store_NETFS_variables.sh
|
Shell
|
gpl-2.0
| 261 |
#!/bin/bash
SCM_URL=https://github.com/cisco/openh264
SCM_TAG=master
source $(dirname "${BASH_SOURCE[0]}")/android-build-common.sh
function build {
echo "Building architecture $1..."
BASE=$(pwd)
common_run cd $BUILD_SRC
PATH=$ANDROID_NDK:$PATH
MAKE="make PATH=$PATH ENABLEPIC=Yes OS=android NDKROOT=$ANDROID_NDK TARGET=android-$2 NDKLEVEL=$2 ARCH=$1 -j libraries"
common_run git clean -xdf
common_run export QUIET_AR="$CCACHE "
common_run export QUIET_ASM="$CCACHE "
common_run export QUIET_CC="$CCACHE "
common_run export QUIET_CCAR="$CCACHE "
common_run export QUIET_CXX="$CCACHE "
common_run $MAKE
# Install creates a non optimal directory layout, fix that
common_run $MAKE PREFIX=$BUILD_SRC/libs/$1 install
common_run cd $BASE
}
# Run the main program.
common_parse_arguments $@
common_check_requirements
common_update $SCM_URL $SCM_TAG $BUILD_SRC
common_clean $BUILD_DST
for ARCH in $BUILD_ARCH
do
case $ARCH in
"armeabi")
OARCH="arm"
;;
"armeabi-v7a")
OARCH="arm"
;;
"arm64-v8a")
OARCH="arm64"
;;
*)
OARCH=$ARCH
;;
esac
echo "$ARCH=$OARCH"
build $OARCH $NDK_TARGET
if [ ! -d $BUILD_DST/$ARCH/include ];
then
common_run mkdir -p $BUILD_DST/$ARCH/include
fi
common_run cp -L -r $BUILD_SRC/libs/$OARCH/include/ $BUILD_DST/$ARCH/
if [ ! -d $BUILD_DST/$ARCH ];
then
common_run mkdir -p $BUILD_DST/$ARCH
fi
common_run cp -L $BUILD_SRC/libs/$OARCH/lib/*.so $BUILD_DST/$ARCH/
done
|
bmiklautz/FreeRDP
|
scripts/android-build-openh264.sh
|
Shell
|
apache-2.0
| 1,436 |
#! /bin/sh
# Test old-style module crap.
BITNESS=32
rm -rf tests/tmp/*
# Create inputs
MODULE_DIR=tests/tmp/lib/modules/$MODTEST_UNAME
mkdir -p $MODULE_DIR/drivers/type
ln tests/data/$BITNESS/normal/noexport_nodep-$BITNESS.ko \
$MODULE_DIR/drivers/type
mkdir -p $MODULE_DIR/other/type
ln tests/data/$BITNESS/normal/export_nodep-$BITNESS.ko \
$MODULE_DIR/other/type
# Set up modules.dep file.
echo "# A comment" > $MODULE_DIR/modules.dep
echo "/lib/modules/$MODTEST_UNAME/drivers/type/noexport_nodep-$BITNESS.ko:" >> $MODULE_DIR/modules.dep
echo "/lib/modules/$MODTEST_UNAME/other/type/export_nodep-$BITNESS.ko:" >> $MODULE_DIR/modules.dep
SIZE1=`wc -c < tests/data/$BITNESS/normal/noexport_nodep-$BITNESS.ko`
SIZE2=`wc -c < tests/data/$BITNESS/normal/export_nodep-$BITNESS.ko`
# -l lists all of them (either order)
[ "`modprobe -l 2>&1`" = "/lib/modules/$MODTEST_UNAME/drivers/type/noexport_nodep-$BITNESS.ko
/lib/modules/$MODTEST_UNAME/other/type/export_nodep-$BITNESS.ko" ] ||
[ "`modprobe -l 2>&1`" = "/lib/modules/$MODTEST_UNAME/other/type/export_nodep-$BITNESS.ko
/lib/modules/$MODTEST_UNAME/drivers/type/noexport_nodep-$BITNESS.ko" ]
# -l -t foo lists none of them.
[ "`modprobe -l -t foo 2>&1`" = "" ]
# -l -t type lists all of them (either order)
[ "`modprobe -l -t type 2>&1`" = "/lib/modules/$MODTEST_UNAME/drivers/type/noexport_nodep-$BITNESS.ko
/lib/modules/$MODTEST_UNAME/other/type/export_nodep-$BITNESS.ko" ] ||
[ "`modprobe -l -t type 2>&1`" = "/lib/modules/$MODTEST_UNAME/other/type/export_nodep-$BITNESS.ko
/lib/modules/$MODTEST_UNAME/drivers/type/noexport_nodep-$BITNESS.ko" ]
# -l -t drivers lists one.
[ "`modprobe -l -t drivers 2>&1`" = "/lib/modules/$MODTEST_UNAME/drivers/type/noexport_nodep-$BITNESS.ko" ]
# -l -t drivers/type lists one.
[ "`modprobe -l -t drivers/type 2>&1`" = "/lib/modules/$MODTEST_UNAME/drivers/type/noexport_nodep-$BITNESS.ko" ]
# -l -t other lists one.
[ "`modprobe -l -t other 2>&1`" = "/lib/modules/$MODTEST_UNAME/other/type/export_nodep-$BITNESS.ko" ]
# -l -t other/type lists one.
[ "`modprobe -l -t other/type 2>&1`" = "/lib/modules/$MODTEST_UNAME/other/type/export_nodep-$BITNESS.ko" ]
# Wildcard works.
[ "`modprobe -l -t type 'noexport-nodep*' 2>&1`" = "/lib/modules/$MODTEST_UNAME/drivers/type/noexport_nodep-$BITNESS.ko" ]
# -t type without -l not supported
modprobe -t type 2>&1 | grep -q Usage
modprobe -a -t type 2>&1 | grep -q Usage
# -a with one arg succeeds.
[ "`modprobe -a noexport_nodep-$BITNESS 2>&1`" = "INIT_MODULE: $SIZE1 " ]
# ... even with - and _ confused.
[ "`modprobe -a noexport-nodep_$BITNESS 2>&1`" = "INIT_MODULE: $SIZE1 " ]
# With two args succeeds.
[ "`modprobe -a noexport_nodep-$BITNESS export_nodep-$BITNESS 2>&1`" = "INIT_MODULE: $SIZE1
INIT_MODULE: $SIZE2 " ]
# Does second even if first screws up.
[ "`modprobe -a crap export_nodep-$BITNESS 2>&1`" = "WARNING: Module crap not found.
INIT_MODULE: $SIZE2 " ]
|
zhang-xin/kdi
|
usermode/module-init-tools-3.12/tests/test-modprobe/20oldstyle.sh
|
Shell
|
gpl-2.0
| 2,920 |
#
# For a description of the syntax of this configuration file,
# see extra/config/Kconfig-language.txt
#
config TARGET_ARCH
string
default "sh"
config FORCE_OPTIONS_FOR_ARCH
bool
default y
select ARCH_ANY_ENDIAN
config ARCH_CFLAGS
string
choice
prompt "Target Processor Type"
default CONFIG_SH4
help
This is the processor type of your CPU. This information is used for
optimizing purposes, as well as to determine if your CPU has an MMU,
an FPU, etc. If you pick the wrong CPU type, there is no guarantee
that uClibc will work at all....
Here are the available choices:
- "SH2A" Renesas SH-2A (SH7206)
- "SH2" SuperH SH-2
- "SH3" SuperH SH-3
- "SH4" SuperH SH-4
config CONFIG_SH2A
select ARCH_HAS_NO_MMU
select HAVE_NO_PIC
bool "SH2A"
config CONFIG_SH2
select ARCH_HAS_NO_MMU
bool "SH2"
config CONFIG_SH3
select ARCH_HAS_MMU
bool "SH3"
config CONFIG_SH4
select FORCE_SHAREABLE_TEXT_SEGMENTS
bool "SH4"
endchoice
|
kidmaple/CoolWall
|
uClibc/extra/Configs/Config.sh
|
Shell
|
gpl-2.0
| 969 |
#!/usr/bin/env bash
get_counts.sh data/int_20k 3 data/counts_20k_3
mkdir -p data/optimize_20k_3
get_initial_metaparameters.py \
--ngram-order=3 \
--names=data/counts_20k_3/names \
--num-train-sets=$(cat data/counts_20k_3/num_train_sets) > data/optimize_20k_3/0.metaparams
validate_metaparameters.py \
--ngram-order=3 \
--num-train-sets=$(cat data/counts_20k_3/num_train_sets) \
data/optimize_20k_3/0.metaparams
get_objf_and_derivs.py --derivs-out=data/optimize_20k_3/0.derivs \
data/counts_20k_3 data/optimize_20k_3/0.{metaparams,objf} data/optimize_20k_3/work.0
validate_metaparameter_derivs.py \
--ngram-order=3 \
--num-train-sets=$(cat data/counts_20k_3/num_train_sets) \
data/optimize_20k_3/0.{metaparams,derivs}
test_metaparameter_derivs.py \
data/optimize_20k_3/0.metaparams \
data/counts_20k_3 data/optimize_20k_3/temp
# test_metaparameter_derivs.py: analytical and difference-method derivatives agree 98.641527436%
|
keli78/pocolm
|
egs/tedlium/local/self_test.sh
|
Shell
|
apache-2.0
| 968 |
#!/bin/bash
#
# Copyright 2008-2009 LinkedIn, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
base_dir=$(dirname $0)/../../../
for file in $base_dir/dist/*.jar;
do
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$file
done
for file in $base_dir/lib/*.jar;
do
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$file
done
HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$base_dir/dist/resources
export HADOOP_CLASSPATH
$HADOOP_HOME/bin/hadoop voldemort.store.readonly.mr.HadoopStoreJobRunner $@
|
birendraa/voldemort
|
contrib/hadoop-store-builder/bin/hadoop-build-readonly-store.sh
|
Shell
|
apache-2.0
| 980 |
echo ===============================================================================
echo \[tabescape_off.sh\]: test for tab escaping off
$srcdir/killrsyslog.sh # kill rsyslogd if it runs for some reason
./nettester -ttabescape_off -iudp
if [ "$?" -ne "0" ]; then
exit 1
fi
echo test via tcp
./nettester -ttabescape_off -itcp
if [ "$?" -ne "0" ]; then
exit 1
fi
|
rangochan/rsyslog
|
tests/tabescape_off.sh
|
Shell
|
gpl-3.0
| 368 |
#!/bin/bash
alias azure=azure.cmd
# Set variables for existing resource group
existingRGName="IaaSStory"
location="westus"
vnetName="WTestVNet"
backendSubnetName="BackEnd"
remoteAccessNSGName="NSG-RemoteAccess"
# Set variables to use for backend resource group
backendRGName="IaaSStory-Backend"
prmStorageAccountName="iaasstoryprmstorage"
avSetName="ASDB"
vmSize="Standard_DS3"
diskSize=127
publisher="Canonical"
offer="UbuntuServer"
sku="14.04.5-LTS"
version="latest"
vmNamePrefix="DB"
osDiskName="osdiskdb"
dataDiskName="datadisk"
nicNamePrefix="NICDB"
ipAddressPrefix="192.168.2."
username='adminuser'
password='adminP@ssw0rd'
numberOfVMs=2
# Retrieve the Ids for resources in the IaaSStory resource group
subnetId="$(azure network vnet subnet show --resource-group $existingRGName \
--vnet-name $vnetName \
--name $backendSubnetName|grep Id)"
subnetId=${subnetId#*/}
nsgId="$(azure network nsg show --resource-group $existingRGName \
--name $remoteAccessNSGName|grep Id)"
nsgId=${nsgId#*/}
# Create necessary resources for VMs
azure group create $backendRGName $location
azure storage account create $prmStorageAccountName \
--resource-group $backendRGName \
--location $location --type PLRS
azure availset create --resource-group $backendRGName \
--location $location \
--name $avSetName
# Loop to create NICs and VMs
for ((suffixNumber=1;suffixNumber<=numberOfVMs;suffixNumber++));
do
# Create NIC for database access
nic1Name=$nicNamePrefix$suffixNumber-DA
x=$((suffixNumber+3))
ipAddress1=$ipAddressPrefix$x
azure network nic create --name $nic1Name \
--resource-group $backendRGName \
--location $location \
--private-ip-address $ipAddress1 \
--subnet-id $subnetId
# Create NIC for remote access
nic2Name=$nicNamePrefix$suffixNumber-RA
x=$((suffixNumber+53))
ipAddress2=$ipAddressPrefix$x
azure network nic create --name $nic2Name \
--resource-group $backendRGName \
--location $location \
--private-ip-address $ipAddress2 \
--subnet-id $subnetId $vnetName \
--network-security-group-id $nsgId
#Create the VM
azure vm create --resource-group $backendRGName \
--name $vmNamePrefix$suffixNumber \
--location $location \
--vm-size $vmSize \
--subnet-id $subnetId \
--availset-name $avSetName \
--nic-names $nic1Name,$nic2Name \
--os-type linux \
--image-urn $publisher:$offer:$sku:$version \
--storage-account-name $prmStorageAccountName \
--storage-account-container-name vhds \
--os-disk-vhd $osDiskName$suffixNumber.vhd \
--admin-username $username \
--admin-password $password
#Create two data disks, and end the loop.
azure vm disk attach-new --resource-group $backendRGName \
--vm-name $vmNamePrefix$suffixNumber \
--storage-account-name $prmStorageAccountName \
--storage-account-container-name vhds \
--vhd-name $dataDiskName$suffixNumber-1.vhd \
--size-in-gb $diskSize \
--lun 0
azure vm disk attach-new --resource-group $backendRGName \
--vm-name $vmNamePrefix$suffixNumber \
--storage-account-name $prmStorageAccountName \
--storage-account-container-name vhds \
--vhd-name $dataDiskName$suffixNumber-2.vhd \
--size-in-gb $diskSize \
--lun 1
done
|
robotechredmond/azure-quickstart-templates
|
IaaS-Story/11-MultiNIC/arm/virtual-network-deploy-multinic-arm-cli.sh
|
Shell
|
mit
| 3,479 |
#!/bin/bash
# GPG support is optional
# called by dracut
check() {
require_binaries gpg || return 1
return 255
}
# called by dracut
depends() {
echo crypt
}
# called by dracut
install() {
inst_multiple gpg
inst "$moddir/crypt-gpg-lib.sh" "/lib/dracut-crypt-gpg-lib.sh"
}
|
matlinuxer2/dracut
|
modules.d/91crypt-gpg/module-setup.sh
|
Shell
|
gpl-2.0
| 295 |
# Aliases
alias g='git'
compdef g=git
alias gst='git status'
compdef _git gst=git-status
alias gl='git pull'
compdef _git gl=git-pull
alias gup='git pull --rebase'
compdef _git gup=git-fetch
alias gp='git push'
compdef _git gp=git-push
alias gd='git diff'
gdv() { git diff -w "$@" | view - }
compdef _git gdv=git-diff
alias gc='git commit -v'
compdef _git gc=git-commit
alias gca='git commit -v -a'
compdef _git gca=git-commit
alias gco='git checkout'
compdef _git gco=git-checkout
alias gcm='git checkout master'
alias gr='git remote'
compdef _git gr=git-remote
alias grv='git remote -v'
compdef _git grv=git-remote
alias grmv='git remote rename'
compdef _git grmv=git-remote
alias grrm='git remote remove'
compdef _git grrm=git-remote
alias grset='git remote set-url'
compdef _git grset=git-remote
alias grup='git remote update'
compdef _git grset=git-remote
alias gb='git branch'
compdef _git gb=git-branch
alias gba='git branch -a'
compdef _git gba=git-branch
alias gcount='git shortlog -sn'
compdef gcount=git
alias gcl='git config --list'
alias gcp='git cherry-pick'
compdef _git gcp=git-cherry-pick
alias glg='git log --stat --max-count=5'
compdef _git glg=git-log
alias glgg='git log --graph --max-count=5'
compdef _git glgg=git-log
alias glgga='git log --graph --decorate --all'
compdef _git glgga=git-log
alias gss='git status -s'
compdef _git gss=git-status
alias ga='git add'
compdef _git ga=git-add
alias gm='git merge'
compdef _git gm=git-merge
alias grh='git reset HEAD'
alias grhh='git reset HEAD --hard'
alias gwc='git whatchanged -p --abbrev-commit --pretty=medium'
alias gf='git ls-files | grep'
alias gpoat='git push origin --all && git push origin --tags'
# Will cd into the top of the current repository
# or submodule.
alias grt='cd $(git rev-parse --show-toplevel || echo ".")'
# Git and svn mix
alias git-svn-dcommit-push='git svn dcommit && git push github master:svntrunk'
compdef git-svn-dcommit-push=git
alias gsr='git svn rebase'
alias gsd='git svn dcommit'
#
# Will return the current branch name
# Usage example: git pull origin $(current_branch)
#
function current_branch() {
ref=$(git symbolic-ref HEAD 2> /dev/null) || \
ref=$(git rev-parse --short HEAD 2> /dev/null) || return
echo ${ref#refs/heads/}
}
function current_repository() {
ref=$(git symbolic-ref HEAD 2> /dev/null) || \
ref=$(git rev-parse --short HEAD 2> /dev/null) || return
echo $(git remote -v | cut -d':' -f 2)
}
# these aliases take advantage of the previous function
alias ggpull='git pull origin $(current_branch)'
compdef ggpull=git
alias ggpush='git push origin $(current_branch)'
compdef ggpush=git
alias ggpnp='git pull origin $(current_branch) && git push origin $(current_branch)'
compdef ggpnp=git
|
bkono/oh-my-zsh
|
plugins/git/git.plugin.zsh
|
Shell
|
mit
| 2,728 |
# Add final information to the script.
cat >> $LAYOUT_CODE <<EOF
set +x
set +e
LogPrint "Disk layout created."
EOF
|
krissi/rear
|
usr/share/rear/layout/prepare/default/55_finalize_script.sh
|
Shell
|
gpl-2.0
| 118 |
#!/bin/bash -eu
# This test measures master replication overhead by running a number of
# autocommit queries with wsrep_on set to 0 and 1.
#
# NOTES:
# - The load was deliberately chosen to produce maximum replication overhead.
# - SQL commands are first dumped into a text file in order to minimize client
# overhead when benchmarking.
declare -r DIST_BASE=$(cd $(dirname $0)/../..; pwd -P)
TEST_BASE=${TEST_BASE:-"$DIST_BASE"}
. $TEST_BASE/conf/main.conf
declare -r SCRIPTS="$DIST_BASE/scripts"
. $SCRIPTS/jobs.sh
. $SCRIPTS/action.sh
declare -r TABLE_NAME="memory"
declare -r TABLE="$DBMS_TEST_SCHEMA.$TABLE_NAME"
declare -r TABLE_DEFINITION="(c1 INT AUTO_INCREMENT PRIMARY KEY, c2 CHAR(255))"
MYSQL="mysql -u$DBMS_TEST_USER -p$DBMS_TEST_PSWD"
MYSQL="$MYSQL -h${NODE_INCOMING_HOST[0]} -P${NODE_INCOMING_PORT[0]} -B"
prepare()
{
stop
start_node -g gcomm:// --mysql-opt --wsrep-new-cluster 0
echo -n "Preparing... "
$MYSQL -e "DROP TABLE IF EXISTS $TABLE;
CREATE TABLE $TABLE $TABLE_DEFINITION;
INSERT INTO $TABLE(c2) VALUES('abc');"
# make sure the latest writeset protocol is used
$MYSQL -e "SET GLOBAL wsrep_provider_options='repl.proto_max=10'"
echo "done"
}
# load() will progressively generate larger and larger writesets in geometric
# progression, by inserting table onto itself. At some point it will have to
# spill to disk and trigger the bug.
load()
{
echo -e "Rows\tSeconds\tRSS"
for rows in $(seq 0 20)
do
echo -en "$(( 1 << $rows ))\t"
begin=$SECONDS
$MYSQL -e "INSERT INTO $TABLE(c2) SELECT c2 FROM $TABLE;"
seconds=$(( $SECONDS - $begin ))
echo -en "$seconds\t"; ps --no-headers -C mysqld -o rss || ps --no-headers -C mysqld-debug -o rss
done
}
prepare
load 1
|
percona/debian-percona-galera-3
|
tests/regressions/lp1255964/run.sh
|
Shell
|
gpl-2.0
| 1,805 |
# -------------------------------------------------------------------------------------------------
# Copyright (c) 2015 zsh-syntax-highlighting contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the zsh-syntax-highlighting contributors nor the names of its contributors
# may be used to endorse or promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------------------------
# -*- mode: zsh; sh-indentation: 2; indent-tabs-mode: nil; sh-basic-offset: 2; -*-
# vim: ft=zsh sw=2 ts=2 et
# -------------------------------------------------------------------------------------------------
BUFFER='() echo hello; () { echo world } "argument"'
expected_region_highlight=(
"1 2 reserved-word" # ()
"4 7 builtin" # echo
"9 13 default" # hello
"14 14 commandseparator" # ;
"16 17 reserved-word" # ()
"19 19 reserved-word" # {
"21 24 builtin" # echo
"26 30 default" # world
"32 32 reserved-word" # }
"34 43 default" # "argument"
"34 43 double-quoted-argument" # "argument"
)
|
codeprimate/arid
|
zsh/zsh-syntax-highlighting/highlighters/main/test-data/anonymous-function.zsh
|
Shell
|
bsd-2-clause
| 2,396 |
#!/usr/bin/env sh
set -e
node ./tablegen-x86.js $@
|
johnparker007/mame
|
3rdparty/asmjit/tools/tablegen.sh
|
Shell
|
gpl-2.0
| 51 |
#!/bin/bash
#
# Script to run gjslint on only the modified or added files in the current
# branch. Should be run from the base git directory with the PR branch checked
# out.
USER_BASE=$(python -c "import site;import sys;sys.stdout.write(site.USER_BASE)")
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
CHANGED_FILES=$(git diff --name-only --diff-filter=AM master..$CURRENT_BRANCH |
grep -E "\.js$" | grep -v -E "test\.js$")
if [[ -n "$CHANGED_FILES" ]]; then
set -x
$USER_BASE/bin/gjslint \
--strict \
--jslint_error=all \
--exclude_files=deps.js,alltests.js,protractor.conf.js,protractor_spec.js,browser_capabilities.js \
$CHANGED_FILES;
else
echo "No .js files found to lint in this Pull Request."
fi
|
NROER/drupal-intranet
|
sites/all/libraries/openlayers3/closure-library/scripts/ci/lint_pull_request.sh
|
Shell
|
gpl-2.0
| 735 |
#!/bin/sh
TEST_SCRIPT=./VMake/executableTester.sh
until test -r ${TEST_SCRIPT} ; do
TEST_SCRIPT=../${TEST_SCRIPT}
done
. ${TEST_SCRIPT}
runAndHandleSystemTest "testParallelPipedQuadEL " "$0" "$@"
|
kelchuan/snac_thesis
|
StGermain/Discretisation/Mesh/tests/testParallelPipedQuadEL.2of3.sh
|
Shell
|
gpl-2.0
| 206 |
#!/bin/bash
export CPATH="$PREFIX/include"
export LIBRARY_PATH="$PREFIX/lib"
make
mkdir -p $PREFIX/bin
cp bioawk $PREFIX/bin
|
dmaticzka/bioconda-recipes
|
recipes/bioawk/build.sh
|
Shell
|
mit
| 128 |
PGBIN=/usr/local/pgsql/bin
echo "Creating test cluster... "
mkdir testdata
chmod 700 testdata
$PGBIN/initdb -D testdata > log/test.log 2>&1
echo "Starting test cluster... "
$PGBIN/pg_ctl -l log/backend.log -D testdata start >> log/test.log 2>&1
sleep 2
echo "Inserting test data... "
$PGBIN/psql postgres < sql/test.sql >> log/test.log 2>&1
echo "========================================="
echo "Testing plain dump... "
../xlogdump -T testdata/pg_xlog/000000010000000000000000 > results/dump.result
if [ "`diff results/dump.result expected/dump.result`" = "" ];
then
echo "Test passed"
else
echo "Test failed!"
fi
echo "========================================="
echo "Testing transactions dump..."
../xlogdump -t testdata/pg_xlog/000000010000000000000000 > results/transactions.result
if [ "`diff results/transactions.result expected/transactions.result`" = "" ];
then
echo "Test passed"
else
echo "Test failed!"
fi
echo "========================================="
echo "Testing dump with translated names..."
../xlogdump -T -h localhost testdata/pg_xlog/000000010000000000000000 > results/names.result
if [ "`diff results/names.result expected/names.result`" = "" ];
then
echo "Test passed"
else
echo "Test failed!"
fi
echo "========================================="
echo "Stoping test cluster... "
$PGBIN/pg_ctl -D testdata stop >> log/test.log 2>&1
echo "Removing test cluster... "
rm -rf testdata
echo "Done!"
|
foyzur/gpdb
|
contrib/xlogdump/test/test.sh
|
Shell
|
apache-2.0
| 1,422 |
#
# Copyright (C) 2014 OpenWrt.org
#
platform_get_rootfs() {
local rootfsdev
if read cmdline < /proc/cmdline; then
case "$cmdline" in
*block2mtd=*)
rootfsdev="${cmdline##*block2mtd=}"
rootfsdev="${rootfsdev%%,*}"
;;
*root=*)
rootfsdev="${cmdline##*root=}"
rootfsdev="${rootfsdev%% *}"
;;
esac
echo "${rootfsdev}"
fi
}
platform_copy_config() {
case "$(board_name)" in
erlite)
mount -t vfat /dev/sda1 /mnt
cp -af "$CONF_TAR" /mnt/
umount /mnt
;;
esac
}
platform_do_flash() {
local tar_file=$1
local board=$2
local kernel=$3
local rootfs=$4
mkdir -p /boot
mount -t vfat /dev/$kernel /boot
[ -f /boot/vmlinux.64 -a ! -L /boot/vmlinux.64 ] && {
mv /boot/vmlinux.64 /boot/vmlinux.64.previous
mv /boot/vmlinux.64.md5 /boot/vmlinux.64.md5.previous
}
echo "flashing kernel to /dev/$kernel"
tar xf $tar_file sysupgrade-$board/kernel -O > /boot/vmlinux.64
md5sum /boot/vmlinux.64 | cut -f1 -d " " > /boot/vmlinux.64.md5
echo "flashing rootfs to ${rootfs}"
tar xf $tar_file sysupgrade-$board/root -O | dd of="${rootfs}" bs=4096
sync
umount /boot
}
platform_do_upgrade() {
local tar_file="$1"
local board=$(board_name)
local rootfs="$(platform_get_rootfs)"
local kernel=
[ -b "${rootfs}" ] || return 1
case "$board" in
erlite)
kernel=sda1
;;
er)
kernel=mmcblk0p1
;;
*)
return 1
esac
platform_do_flash $tar_file $board $kernel $rootfs
return 0
}
platform_check_image() {
local board=$(board_name)
case "$board" in
erlite | \
er)
local tar_file="$1"
local kernel_length=`(tar xf $tar_file sysupgrade-$board/kernel -O | wc -c) 2> /dev/null`
local rootfs_length=`(tar xf $tar_file sysupgrade-$board/root -O | wc -c) 2> /dev/null`
[ "$kernel_length" = 0 -o "$rootfs_length" = 0 ] && {
echo "The upgarde image is corrupt."
return 1
}
return 0
;;
esac
echo "Sysupgrade is not yet supported on $board."
return 1
}
|
greedymouse/openwrt
|
target/linux/octeon/base-files/lib/upgrade/platform.sh
|
Shell
|
gpl-2.0
| 1,926 |
#!/bin/sh
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "Generating self-signed cert"
mkdir -p /certs
openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 \
-keyout /certs/privateKey.key \
-out /certs/certificate.crt \
-subj "/C=UK/ST=Warwickshire/L=Leamington/O=OrgName/OU=IT Department/CN=example.com"
# If we're running on Windows, skip loading the Linux .so modules.
if [ "$(uname)" = "Windows_NT" ]; then
sed -i -E "s/^(load_module modules\/ndk_http_module.so;)$/#\1/" conf/nginx.conf
sed -i -E "s/^(load_module modules\/ngx_http_lua_module.so;)$/#\1/" conf/nginx.conf
sed -i -E "s/^(load_module modules\/ngx_http_lua_upstream_module.so;)$/#\1/" conf/nginx.conf
# NOTE(claudiub): on Windows, nginx will take the paths in the nginx.conf file as relative paths.
cmd /S /C "mklink /D C:\\openresty\\certs C:\\certs"
fi
echo "Starting nginx"
nginx -g "daemon off;"
|
krmayankk/kubernetes
|
test/images/echoserver/run.sh
|
Shell
|
apache-2.0
| 1,429 |
#!/bin/sh
#
# dex2jar - Tools to work with android .dex and java .class files
# Copyright (c) 2009-2012 Panxiaobo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# copy from $Tomcat/bin/startup.sh
# resolve links - $0 may be a softlink
PRG="$0"
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
PRGDIR=`dirname "$PRG"`
#
# call d2j_invoke.sh to setup java environment
"$PRGDIR/d2j_invoke.sh" "com.googlecode.dex2jar.tools.DeObfInitCmd" "$@"
|
gianina-ingenuity/titanium-branch-deep-linking
|
testbed/x/plugins/com.appcelerator.test/versions/8244.293/MakeAppTouchTestable/android/dex2jar/d2j-init-deobf.sh
|
Shell
|
mit
| 1,091 |
#!/bin/bash
clear
echo "Classes generation thru schemas XSD - JAXB"
echo ""
echo "Working dir"
pwd
echo ""
export DEFAULT_XSD_DIR="./schemas/nfe/PL_009_V4_V1.30"
export DEFAULT_PACKAGE_NAME="io.echosystem.ebr.nfe.bind.v4.sv130"
export DEFAULT_DESTINATION_DIR="./classes-geradas/v4130"
read -p "XSD files directory [$DEFAULT_XSD_DIR]: " XSD_DIR
read -p "Package name [$DEFAULT_PACKAGE_NAME]: " PACKAGE_NAME
read -p "Destination dir [$DEFAULT_DESTINATION_DIR]: " DESTINATION_DIR
if [ "$XSD_DIR" == "" ]; then
export XSD_DIR=$DEFAULT_XSD_DIR
fi
if [ "$PACKAGE_NAME" == "" ]; then
export PACKAGE_NAME=$DEFAULT_PACKAGE_NAME
fi
if [ "$DESTINATION_DIR" == "" ]; then
export DESTINATION_DIR=$DEFAULT_DESTINATION_DIR
fi
echo ""
echo "Working arguments:"
echo ""
echo "XSD files directory: $XSD_DIR"
echo "Package name: $PACKAGE_NAME"
echo "Destination dir: $DESTINATION_DIR"
echo ""
if [ -f $DESTINATION_DIR ]; then
echo "Destination dir already is a regular file"
exit -1
fi
if [ -d $DESTINATION_DIR ]; then
export OVERWRITE="?"
while [[ "$OVERWRITE" != "y" && "$OVERWRITE" != "n" ]]
do
read -p "Destination dir already exists, overwrite?[y/n]: " OVERWRITE
if [[ "$OVERWRITE" != "y" && "$OVERWRITE" != "Y" ]]; then
echo "Generation aborted."
exit -1;
fi
done
rm $DESTINATION_DIR/*
else
mkdir $DESTINATION_DIR
fi
for XSD_FILE in $XSD_DIR/*; do
echo $XSD_FILE
xjc -d $DESTINATION_DIR -p $PACKAGE_NAME -encoding UTF-8 -npa -nv $XSD_FILE
done
tree $DESTINATION_DIR
|
rodolphopicolo/e-BR
|
geracao-classes/gerar-classes.sh
|
Shell
|
mit
| 1,492 |
#!/usr/bin/env bash
echo "You need JQ and the AWS CLI installed in your machine to be able to run this script"
echo "Choose the db instance identifier:"
read -e dbInstanceIdentifier
aws rds describe-db-log-files --db-instance-identifier ${dbInstanceIdentifier} > /tmp/${dbInstanceIdentifier}-describe-db-log-files
cat /tmp/${dbInstanceIdentifier}-describe-db-log-files | jq '[.DescribeDBLogFiles][0][].LogFileName' > /tmp/${dbInstanceIdentifier}-production-logs-filename
#cat /tmp/${dbInstanceIdentifier}-production-logs-filename | xargs -I{} sh -c "xxx() { if test "$1" != "${1%/*}"; then mkdir -p ${1%/*}; fi && command tee "$1"; }; aws rds download-db-log-file-portion --db-instance-identifier ${dbInstanceIdentifier} --log-file-name {} | xxx {}"
cat /tmp/${dbInstanceIdentifier}-production-logs-filename | awk -F "/" '{print substr($1,2,length($1))}' | xargs -I{} sh -c 'mkdir -p {}'
cat /tmp/${dbInstanceIdentifier}-production-logs-filename | xargs -I{} sh -c "aws rds download-db-log-file-portion --db-instance-identifier ${dbInstanceIdentifier} --log-file-name {} > {}"
|
albertocubeddu/serverUtility
|
rdsLogExporter.sh
|
Shell
|
mit
| 1,082 |
#!/bin/sh
set -ex
PYI_PATH=$(python -c "import PyInstaller as _; print(_.__path__[0])")
PYI_RTH_PKGUTIL=$PYI_PATH/hooks/rthooks/pyi_rth_pkgutil.py
if [ "$RUNNER_OS" == "Windows" ]; then
unix2dos ./pyi_rth_pkgutil.py.patch
fi
patch $PYI_RTH_PKGUTIL < ./pyi_rth_pkgutil.py.patch
if [ "$RUNNER_OS" == "Windows" ]; then
dos2unix ./pyi_rth_pkgutil.py.patch
fi
|
sunoru/pokemon_tournament
|
scripts/patch.sh
|
Shell
|
mit
| 364 |
#!/bin/bash
dotnet ../src/SizePhotos/bin/Debug/net5.0/SizePhotos.dll -i
|
AerisG222/SizePhotos
|
test/test_invalid.sh
|
Shell
|
mit
| 72 |
#!/bin/bash
source ${0%/*}/config.sh
set -e
echo
echo '---------------------------------'
echo 'Generate packages'
echo '---------------------------------'
pushd $ROOT_DIR
if [ ! -d $PCK_DIR ] ; then
mkdir $PCK_DIR
fi
rm -f $PCK_DIR/qtrb_device_$PCK_VERSION.zip
rm -f $PCK_DIR/qtrb_host_$PCK_VERSION.zip
rm -f $PCK_DIR/qtrb_sysroot_$PCK_VERSION.zip
zip -r $PCK_DIR/qtrb_device_$PCK_VERSION.zip $QT_OUTPUT_DIR
zip -r $PCK_DIR/qtrb_host_$PCK_VERSION.zip $QT_HOST_DIR
zip -r $PCK_DIR/qtrb_sysroot_$PCK_VERSION.zip $SYSROOT_IMG_FILEPATH
du -sh $PCK_DIR/qtrb_host_$PCK_VERSION.zip
du -sh $PCK_DIR/qtrb_device_$PCK_VERSION.zip
du -sh $PCK_DIR/qtrb_sysroot_$PCK_VERSION.zip
popd
|
GuillaumeLazar/qtrb
|
70_generate_packages.sh
|
Shell
|
mit
| 680 |
#!/usr/bin/env bash
# @author xiaofeng
# @usage sudo ztrace php_pid
ztrace_php7()
{
cat>$FILE<<EOF
import operator
import gdb
import time
import os
def ztrace():
so_path = os.path.abspath("$1")
print so_path
gdb.execute("set \$dl_handle = (void *)dlopen(\"" + so_path + "\", 1)")
gdb.execute("print \$dl_handle")
gdb.execute("set zend_compile_file = ztrace_compile_file")
gdb.execute("set zend_execute_ex = ztrace_execute_ex")
gdb.execute("set zend_execute_internal = ztrace_execute_internal")
gdb.execute("set zend_throw_exception_hook = ztrace_throw_exception_hook")
gdb.execute("c")
gdb.execute("set \$t = (int)dlclose(\$dl_handle)")
gdb.execute("print \$t")
return
if __name__ == "__main__":
gdb.execute("info proc")
ztrace()
EOF
}
env_check()
{
command -v php >/dev/null 2>&1 || { echo >&2 "php required"; exit 1; }
command -v python >/dev/null 2>&1 || { echo >&2 "python required"; exit 1; }
command -v gdb >/dev/null 2>&1 || { echo >&2 "gdb required"; exit 1; }
if [[ $EUID -ne 0 ]]; then
echo >&2 "root required"
exit 1
fi
# 太慢了!
# if [[ $(rpm -qa|grep '.*php.*debuginfo.*'|wc -l) == 0 ]]; then
# echo >&2 "php debuginfo required"
# exit 1
# fi
}
# echo "Usage: \"sudo $0 php_pid\"";
env_check
PHP_BIN=`which php`
PHP_VER=$(${PHP_BIN} -r 'echo PHP_MAJOR_VERSION;')
FILE=$(dirname $0)/zobjdump.py
if [ ${PHP_VER} == 5 ]; then
echo >&2 "php7 support only"
exit 1
fi
if [ $# -ge 2 ]; then
ztrace_php7 "/tmp/ztrace.so"
eval "gdb --batch -nx $PHP_BIN $1 -ex \"source ${FILE}\" 2>/dev/null" # | tail -n +6
fi
rm -f ${FILE}
|
goghcrow/php-minimalism
|
src/tools/ztrace_TODO/ztrace.sh
|
Shell
|
mit
| 1,679 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-1599-1
#
# Security announcement date: 2012-10-09 00:00:00 UTC
# Script generation date: 2017-01-01 21:03:00 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: i386
#
# Vulnerable packages fix on version:
# - linux-image-3.2.0-1420-omap4:3.2.0-1420.27
#
# Last versions recommanded by security team:
# - linux-image-3.2.0-1420-omap4:3.2.0-1420.27
#
# CVE List:
# - CVE-2012-3520
# - CVE-2012-6539
# - CVE-2012-6540
# - CVE-2012-6541
# - CVE-2012-6542
# - CVE-2012-6544
# - CVE-2012-6545
# - CVE-2012-6546
# - CVE-2012-6689
# - CVE-2013-1827
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade linux-image-3.2.0-1420-omap4=3.2.0-1420.27 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/i386/2012/USN-1599-1.sh
|
Shell
|
mit
| 851 |
#!/bin/bash -eo pipefail
[ -d "$RUNDIR" ] || mkdir "$RUNDIR"
chown haproxy:haproxy "$RUNDIR"
chmod 2775 "$RUNDIR"
if [ ! -z "$PIPEWORK"]
then
/usr/bin/pipework --wait
fi
export ETCD_PORT=${ETCD_PORT:-4001}
export HOST_IP=${HOST_IP:-172.17.42.1}
export ETCD=$HOST_IP:4001
echo "[haproxy-confd] booting container. ETCD: $ETCD"
# Loop until confd has updated the haproxy config
until confd -onetime -node $ETCD -config-file /etc/confd/conf.d/haproxy.toml; do
echo "[haproxy-confd] waiting for confd to refresh haproxy.cfg"
sleep 5
done
# Run confd in the background to watch the upstream servers
confd -interval 10 -node $ETCD -config-file /etc/confd/conf.d/haproxy.toml
|
nicolerenee/docker-haproxy-confd
|
start.sh
|
Shell
|
mit
| 680 |
TITLE="Internet Outage Logger Script v5.1 - 2014/02/02"
# Written By: [email protected]
# sh "/mnt/usb8gb/active_system/scripts/logging/internet_outage_logger.sh" "$(nvram get wan_gateway)" "5g"
# ====================================================================================================
# [# Global Static Variables #]
SCRIPT_DIRECTORY="$(dirname $0)"
SCRIPTS_DIRECTORY="$(dirname "$SCRIPT_DIRECTORY")" # Scripts Root Directory
SCRIPTS_DEPENDENCIES_DIRECTORY="$SCRIPTS_DIRECTORY/dependencies" # All Scripts Gerneral Dependencies
# - - - - - - - - - - - - - - - - -
ShortLogSeconds=60 #Shorter log notetion for anything less than this time.
DoNotLogSeconds=3 #Will not log if down for less than X secionds.
LogDirectoy="$(dirname "$(dirname "$SCRIPT_DIRECTORY")")/archives/internet_outage_logs" #"$(nvram get usb_disk_main)/active_system/archives/internet_outage_logs"
LogFilename="internet_outage_log"
ISP="COX Business 1year - 15mb Down / 3mb Up"
ZipCode="73013"
# - - - - - - - - - - - - - - - - -
LogLocation=$LogDirectoy"/"$LogFilename"_"$(date +%Y-%m_%B)".txt"
ADDRESS="$1"
LED="$2"
DownStateTimeSec=1 # DownStateTimeSec=$(date +%s)
DownStateTime="Start of Script"
lastConState="Up"
# [# Included Libraries & Scripts #] --------------------------------------------------------------------------------------
source "$SCRIPTS_DEPENDENCIES_DIRECTORY/color_text_functions.sh" # Include Color Functions
#source "$SCRIPTS_DEPENDENCIES_DIRECTORY/color_text_functions-nocolor.sh" # Include Non-Color Functions
# Functions ------------------------------------
checkFolder() { [ -d "$@" ] && echo "$(BLU "Folder Exists: $@")" || (echo "$(MAG "Making Folder: $@")"; mkdir -p "$@"); } #CheckFolder v3 - 2011/02/20
logPrepair()
{
#Check if Log Location needs Updating
if [ "$(echo $LogDirectoy"/"$LogFilename"_"$(date +%Y-%m_%B)".txt")" != "$LogLocation" ]; then
LogLocation=$LogDirectoy"/"$LogFilename"_"$(date +%Y-%m_%B)".txt"
BLK "Updating Log Location: "$LogFilename"_"$(date +%Y-%m_%B)".txt"
fi
# echo "$(GRN "Setting up Log File:") $(WHT "$LogLocation")"
if [ -f "$LogLocation" ]; then
#echo "$(RED "Deleting Log File:") $(BLK "$LogDirectoy/$dstLogFilename")"
#rm -f "$LogDirectoy/$dstLogFilename"
echo "$(GRN "Log File Exsist, using it:") $(BLK "$LogLocation")"
else
checkFolder "$LogDirectoy"
echo "$(YEL "Creating Log File:") $(BLK "$LogLocation")"
echo "# $TITLE" >> "$LogLocation"
echo "# Internet Service Provider: $ISP | Zip Code: $ZipCode" >> "$LogLocation"
echo "# Creating Log: $(date '+DATE: %m/%d/%y TIME: %r')" >> "$LogLocation"
echo "================================================================================" >> "$LogLocation"
echo "" >> "$LogLocation"
fi
}
# ConectionState ( CurrentState, LastState )
ConectionState()
{
CurrentState="$1"
LastState="$2"
BLK "- - - - - - - - - - - - - - - - - - - - - -"
WHT "$(BLU $ADDRESS) $(YEL CHANGED STATE!!!)"
BLK "LastState: $(GrnOrRed $LastState)"
BLK "CurrentState: $(GrnOrRed $CurrentState)"
BLK "- - - - - - - - - - - - - - - - - - - - - -"
TimeStampOutput "$CurrentState"
}
TimeStampOutput()
{
TimeStamp=$(date +%a\ %B\ %d\ \(%m/%d/%Y\)\ %I:%M:%S\ %p)
if [ $1 == "Down" ]; then
# LED Light Change
BLK "Setting LED: $(WHT "$LED") $(GRN "on")"
led $LED on
BLK "- - - - - - - - - - - - - - -"
# Write State to temp file for LCD Scree Readout
YEL "Writing State $(RED "Down") $(BLK "to Wan Status Log:") $(WHT "/tmp/wan_status.txt")"
echo "Down | $(date +%a\ %I:%M%P)" > /tmp/wan_status.txt
# Creating Time Stamps for Down State
DownStateTimeSec=$(date +%s)
DownStateTime=$TimeStamp
elif [ $1 == "Up" ]; then
# LED Light Change
BLK "Setting LED: $(WHT "$LED") $(YEL "off")"
led $LED off
BLK "- - - - - - - - - - - - - - -"
# Write State to temp file for LCD Scree Readout
YEL "Writing State $(GRN "Up") $(BLK "to Wan Status Log:") $(WHT "/tmp/wan_status.txt")"
echo "Up | $(date +%a\ %I:%M%P)" > /tmp/wan_status.txt
CurrentDateSec=$(date +%s) #This is to try and fix post processing problems
if [ $(($CurrentDateSec - $DownStateTimeSec)) -lt $DoNotLogSeconds ]; then
echo "$(BLU $ADDRESS) was $(RED Down) $(YEL "for < $DoNotLogSeconds sec, not logging.")"
elif [ $(($CurrentDateSec - $DownStateTimeSec)) -gt $ShortLogSeconds ]; then
# Down State Logs
echo "$(CYN $DownStateTime) | $(BLU $ADDRESS) $(WHT [$(RED Down)])"
echo "$DownStateTime | $ADDRESS [Down]" >> "$LogLocation"
logger -t WANStatus "$DownStateTime | $ADDRESS [Down]"
#===========================================================
# Up State Logs
TimePassed=$(timeOutput $(($CurrentDateSec - $DownStateTimeSec)))
echo "$(CYN $TimeStamp) | $(BLU $ADDRESS) $(WHT [$(GRN Up)])"
BLK "Was $(WHT [$(RED Down)]) $(BLK for) $(MAG $TimePassed)"
BLK "-------------------------------------------"
echo "$TimeStamp | $ADDRESS [Up]" >> "$LogLocation"
echo "Was [Down] for $TimePassed" >> "$LogLocation"
echo "-------------------------------------------" >> "$LogLocation"
logger -t WANStatus "$TimeStamp | $ADDRESS [Up]"
logger -t WANStatus "Was [Down] for $TimePassed"
#logger -t WANStatus "~------------------------------------------"
else
TimePassed=$(($CurrentDateSec - $DownStateTimeSec))
# If connection was down for less than X secionds
echo "$(CYN $TimeStamp) | $(BLU $ADDRESS) | $(YEL "Connection Was Down for $TimePassed second(s)")"
BLK "- - - - - - - - - - - - - - - - - - - - - -"
echo "$TimeStamp | $ADDRESS | Connection Was Down for $TimePassed second(s)" >> "$LogLocation"
echo "- - - - - - - - - - - - - - - - - - - - - -" >> "$LogLocation"
logger -t WANStatus "$TimeStamp | $ADDRESS | Connection Was Down for $TimePassed second(s)"
#logger -t WANStatus "~ - - - - - - - - - - - - - - - - - - - - -"
fi
else
WRN "TimeStampOutput: Error"
fi
}
timeOutput()
{
seconds=$1
days=$((seconds / 86400 ))
seconds=$((seconds % 86400))
hours=$((seconds / 3600))
seconds=$((seconds % 3600))
minutes=$((seconds / 60))
seconds=$((seconds % 60))
output=""
if [ $days -gt 0 ]; then
output="$days days(s) "
fi
if [ $hours -gt 0 ]; then
output="$output$hours hour(s) "
fi
if [ $minutes -gt 0 ]; then
output="$output$minutes minute(s) "
fi
if [ $seconds -gt 0 ]; then
output="$output$seconds seconds(s) "
fi
# echo "$hours hour(s) $minutes minute(s) $seconds second(s)"
echo "$output"
}
GrnOrRed()
{
[ $1 != "Up" ] && ( RED $1 ) || ( GRN $1 )
}
ResetWANAdress()
{
ADDRESS="$(nvram get wan_gateway)"
while sleep 3 && [ "$ADDRESS" != "${ADDRESS/0.0.0.0/}" ] || [ "$ADDRESS" != "${ADDRESS/192.168.1./}" ]; do
WRN "$ADDRESS NOT valid, attempting to reset WAN IP address"
logger -t WANStatus "$ADDRESS NOT valid, attempting to reset WAN IP address"
dhcpc-release
sleep 10
dhcpc-renew
sleep 3
ADDRESS="$(nvram get wan_gateway)"
done
}
# ==============================================================
# Varable for checking if script is already running.
BLK "$TITLE | $(WHT IP:) $(BLU $ADDRESS) $(BLK "|") $(WHT LED:) $(MAG $LED)"
logger -t WANStatus "$TITLE | IP: $ADDRESS | LED: $LED"
logPrepair # Setup Log File & Log Header
SCRIPT_STATUS=$(ps -w | grep -v $$ | grep `basename $0` | grep "$ADDRESS" | grep -v "grep" | wc -l)
# Write State to temp file for LCD Scree Readout
# YEL "Writing State $(GRN "Up") $(BLK "to Wan Status Log:") $(WHT "/tmp/wan_status.txt")"
# echo "Up | $(date +%a\ %I:%M%P)" > /tmp/wan_status.txt
# Check Address
BLK "Testing Address for validity"
if [ "$ADDRESS" != "${ADDRESS/0.0.0.0/}" ] || [ "$ADDRESS" != "${ADDRESS/192.168.1./}" ]; then
# Connection Down
ConectionState "Down" "Up"
ResetWANAdress
ConectionState "Up" "Down"
fi
# [# Main #] --------------------------------------------------------------------------------------
sleep 1
if [ "$SCRIPT_STATUS" -le "2" ]; then # Checking if script is already running.
YEL "Pinging $(BLK with a wait time of) $(WHT "1 second(s)") $(BLK at) $(WHT 1 count),\n $(BLK "shorter notes of connection drop of less than") $(WHT "$ShortLogSeconds second(s)"),\n $(BLK "will not log for less than") $(WHT "$DoNotLogSeconds second(s)")."
while sleep 1; do
# ping -w 1 -c 1 $ADDRESS > /dev/null && (led $LED off; echo "$ADDRESS is up"; ConectionState up $lastConState; lastConState="up") || (led $LED on; echo "$ADDRESS is down"; ConectionState up $lastConState; lastConState="down")
if ping -w 1 -c 1 $ADDRESS > /dev/null; then
# Connection Up
echo "$(BLU $ADDRESS) is $(GRN Up)"
# logger -t WANStatus "$ADDRESS is Up"
else
if !(ping -w 1 -c 1 $ADDRESS > /dev/null); then
# Connection Down
ConectionState "Down" "Up"
#Setup Log File & Log Header
logPrepair
while !(ping -w 1 -c 1 $ADDRESS > /dev/null); do
if [ "$(nvram get wan_gateway)" != "${ADDRESS/0.0.0.0/}" ] || [ "$(nvram get wan_gateway)" != "${ADDRESS/192.168.1./}" ]; then
ADDRESS="$(nvram get wan_gateway)"
echo "$(BLU $ADDRESS) is $(RED Down), and Address is not valid, atempting reset"
ResetWANAdress
else
echo "$(BLU $ADDRESS) is $(RED Down)"
fi
done
ConectionState "Up" "Down"
# logger -t WANStatus "$ADDRESS is Down"
else
YEL "Possable False Down Time. Conection was down for less than a seciond"
fi
fi
done
else
WRN "($SCRIPT_STATUS) Script Process(s) Already Running for $ADDRESS, Exiting Script"
BLK "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --"
echo "PID: $$"
ps -w | grep -v "grep" | grep -v $$ | grep `basename $0` | grep "$ADDRESS"
BLK "-- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --"
fi
|
AustinSaintAubin/linux-bash-scripts-library-core
|
logging/internet_outage_logger.sh
|
Shell
|
mit
| 9,791 |
#!/usr/bin/env bash
cd $(dirname $0)/..
$(yarn bin)/electron-builder --mac --win --x64 --publish never
mv './dist/Redmine Now-0.4.0.dmg' ./dist/mac/RedmineNowSetup-0.4.0.dmg
mv './dist/Redmine Now-0.4.0.dmg.blockmap' ./dist/mac
mv ./dist/latest-mac.yml ./dist/mac
mkdir -p ./dist/win
mv './dist/Redmine Now Setup 0.4.0.exe' ./dist/win/RedmineNowSetup-0.4.0.exe
mv './dist/Redmine Now Setup 0.4.0.exe.blockmap' ./dist/win
mv ./dist/latest.yml ./dist/win
|
emsk/redmine-now
|
bin/pack-all.sh
|
Shell
|
mit
| 455 |
#!/bin/sh
if [ "$1" = macos ]; then
if ! brew list fzf >/dev/null; then
brew_install fzf
"$(brew --prefix)/opt/fzf/install"
fi
elif [ "$1" = alpine ]; then
apk_add fzf
elif [ "$1" = debian ]; then
if ! type fzf >/dev/null; then
# Try installing with apt with fallback to git
apt_install fzf || {
[ -d "$HOME/.fzf" ] \
|| git clone --depth 1 https://github.com/junegunn/fzf.git "$HOME/.fzf"
"$HOME/.fzf/install"
}
fi
else
type fzf >/dev/null || {
log_error "$1 is not supported!"
return 0
}
fi
|
stephencroberts/dotfiles
|
modules/fzf/install.sh
|
Shell
|
mit
| 561 |
#!/bin/sh
set -e
APPDIR="$(dirname "$(readlink -e "$0")")"
. "$APPDIR"/common.conf
exec "$PYTHON" -s "${APPDIR}/usr/bin/electron-cash" "$@"
|
fyookball/electrum
|
contrib/build-linux/appimage/scripts/apprun.sh
|
Shell
|
mit
| 143 |
#!/bin/bash
source environ.sh
BROKEN
SRC=http://ftp.gnu.org/gnu/make/make-4.2.tar.gz
DIR=make-4.2
CONFIGURE_ARGS=("CFLAGS=-DPOSIX" "--host=${HOST}" "--without-guile")
configure_template $*
|
redox-os/libc
|
ports/make.sh
|
Shell
|
mit
| 193 |
#!/usr/bin/env bash
# Install command-line tools using Homebrew.
# Make sure we’re using the latest Homebrew.
brew update
# Upgrade any already-installed formulae.
brew upgrade --all
# Install GNU core utilities (those that come with macOS are outdated).
# Don’t forget to add `$(brew --prefix coreutils)/libexec/gnubin` to `$PATH`.
brew install coreutils
ln -s /usr/local/bin/gsha256sum /usr/local/bin/sha256sum
# Install some other useful utilities like `sponge`.
brew install moreutils
# Install GNU `find`, `locate`, `updatedb`, and `xargs`, `g`-prefixed.
brew install findutils
# Install GNU `sed`, overwriting the built-in `sed`.
brew install gnu-sed --with-default-names
# Install Bash 4.
# Note: don’t forget to add `/usr/local/bin/bash` to `/etc/shells` before
# running `chsh`.
brew install bash
brew tap homebrew/versions
brew install bash-completion
# Switch to using brew-installed bash as default shell
if ! fgrep -q '/usr/local/bin/bash' /etc/shells; then
echo '/usr/local/bin/bash' | sudo tee -a /etc/shells;
chsh -s /usr/local/bin/bash;
fi;
# Install Brew services
brew tap homebrew/services
# Install `wget` with IRI support.
brew install wget --with-iri
# Install RingoJS and Narwhal.
# Note that the order in which these are installed is important;
# see http://git.io/brew-narwhal-ringo.
brew install ringojs
brew install narwhal
# Install more recent versions of some macOS tools.
brew install vim --override-system-vi
brew install homebrew/dupes/grep
brew install homebrew/dupes/openssh
brew install homebrew/dupes/screen
brew install homebrew/php/php70 --with-gmp
# Cask
brew install caskroom/cask/brew-cask
brew cask install xquartz
# Install font tools.
brew tap bramstein/webfonttools
brew install sfnt2woff
brew install sfnt2woff-zopfli
brew install woff2
# Install font tools.
brew tap bramstein/webfonttools
brew install sfnt2woff
brew install sfnt2woff-zopfli
brew install woff2
# Install some CTF tools; see https://github.com/ctfs/write-ups.
brew install bfg
brew install binutils
brew install binwalk
brew install cifer
brew install dex2jar
brew install dns2tcp
brew install fcrackzip
brew install foremost
brew install hashpump
brew install hydra
brew install john
brew install knock
brew install netpbm
brew install nmap
brew install pngcheck
brew install socat
brew install sqlmap
brew install tcpflow
brew install tcpreplay
brew install tcptrace
brew install ucspi-tcp # `tcpserver` etc.
brew install homebrew/x11/xpdf
brew install xz
# Install other useful binaries.
brew install ack
brew install dnsmasq
brew install dark-mode
brew install git
brew install git-lfs
brew install imagemagick --with-webp
brew install lua
brew install lynx
brew install p7zip
brew install pigz
brew install pv
brew install rename
brew install rhino
brew install speedtest_cli
brew install ssh-copy-id
brew install testssl
brew install tree
brew install vbindiff
brew install webkit2png
brew install zopfli
# Install Node.js. Note: this installs `npm` too, using the recommended
# installation method.
brew install node
# Vagrant related stuff
brew tap homebrew/completions
brew install vagrant-completion
brew tap homebrew/binary
brew install packer
# tldr pages - Simplified and community-driven man pages
brew tap tldr-pages/tldr
brew install tldr
## Web development stuff
brew install phantomjs
brew install casperjs
brew install mysql
brew install liquibase
brew install mycli
# Java development stuff
brew install tomcat
brew install maven
brew install scala
# Chef
brew cask install chefdk
# Remove outdated versions from the cellar.
brew cleanup
|
thorsten/dotfiles
|
brew.sh
|
Shell
|
mit
| 3,605 |
#!/bin/bash
################################################################################
#### Dialog function ####
################################################################################
bootstrapper_dialog() {
DIALOG_RESULT=$(whiptail --clear --backtitle "Arch bootstrapper" "$@" 3>&1 1>&2 2>&3)
DIALOG_CODE=$?
}
################################################################################
#### Welcome ####
################################################################################
clear
bootstrapper_dialog --title "Welcome" --msgbox "\nWelcome to Kenny's Arch Linux bootstrapper." 10 60
################################################################################
#### UEFI / BIOS detection ####
################################################################################
efivar -l >/dev/null 2>&1
if [[ $? -eq 0 ]]; then
UEFI_BIOS_text="UEFI detected."
UEFI_radio="on"
BIOS_radio="off"
else
UEFI_BIOS_text="BIOS detected."
UEFI_radio="off"
BIOS_radio="on"
fi
bootstrapper_dialog --title "UEFI or BIOS" --radiolist "${UEFI_BIOS_text}\nPress <Enter> to accept." 10 40 2 1 UEFI "$UEFI_radio" 2 BIOS "$BIOS_radio"
[[ $DIALOG_RESULT -eq 1 ]] && UEFI=1 || UEFI=0
################################################################################
#### Prompts ####
################################################################################
bootstrapper_dialog --title "Hostname" --inputbox "\nPlease enter a name for this host.\n" 10 60
hostname="$DIALOG_RESULT"
################################################################################
#### Password prompts ####
################################################################################
bootstrapper_dialog --title "Disk encryption" --passwordbox "\nEnternter a strong passphrase for the disk encryption.\nLeave blank if you don't want encryption.\n" 10 60
encryption_passphrase="$DIALOG_RESULT"
bootstrapper_dialog --title "Root password" --passwordbox "\nEnter a strong password for the root user.\n" 10 60
root_password="$DIALOG_RESULT"
################################################################################
#### Warning ####
################################################################################
bootstrapper_dialog --title "WARNING" --yesno "\nThis script will NUKE /dev/sda from orbit.\nPress <Enter> to continue or <Esc> to cancel.\n" 10 60
clear
if [[ $DIALOG_CODE -eq 1 ]]; then
bootstrapper_dialog --title "Cancelled" --msgbox "\nScript was cancelled at your request." 10 60
exit 0
fi
################################################################################
#### reset the screen ####
################################################################################
reset
################################################################################
#### Nuke and set up disk partitions ####
################################################################################
echo "Zapping disk"
sgdisk --zap-all /dev/sda
[[ $UEFI -eq 0 ]] && printf "r\ng\nw\ny\n" | gdisk /dev/sda
# Hope the kernel can read the new partition table. Partprobe usually fails...
blockdev --rereadpt /dev/sda
echo "Creating /dev/sda1"
if [[ $UEFI -eq 1 ]]; then
printf "n\n1\n\n+1G\nef00\nw\ny\n" | gdisk /dev/sda
yes | mkfs.fat -F32 /dev/sda1
else
printf "n\np\n1\n\n+200M\nw\n" | fdisk /dev/sda
yes | mkfs.xfs -f /dev/sda1
fi
echo "Creating /dev/sda2"
if [[ $UEFI -eq 1 ]]; then
printf "n\n2\n\n\n8e00\nw\ny\n"| gdisk /dev/sda
else
printf "n\np\n2\n\n\nt\n2\n8e\nw\n" | fdisk /dev/sda
fi
if [[ ! -z $encryption_passphrase ]]; then
echo "Setting up encryption"
printf "%s" "$encryption_passphrase" | cryptsetup luksFormat /dev/sda2 -
printf "%s" "$encryption_passphrase" | cryptsetup open --type luks /dev/sda2 lvm -
cryptdevice_boot_param="cryptdevice=/dev/sda2:vg00 "
encrypt_mkinitcpio_hook="encrypt "
physical_volume="/dev/mapper/lvm"
else
physical_volume="/dev/sda2"
fi
echo "Setting up LVM"
pvcreate --force $physical_volume
vgcreate vg00 $physical_volume
lvcreate -L 20G vg00 -n lvroot
lvcreate -l +100%FREE vg00 -n lvhome
echo "Creating XFS file systems on top of logical volumes"
yes | mkfs.xfs -f /dev/mapper/vg00-lvroot
yes | mkfs.xfs -f /dev/mapper/vg00-lvhome
################################################################################
#### Install Arch ####
################################################################################
mount /dev/vg00/lvroot /mnt
mkdir /mnt/{boot,home}
mount /dev/sda1 /mnt/boot
mount /dev/vg00/lvhome /mnt/home
yes '' | pacstrap -i /mnt base base-devel
genfstab -U -p /mnt >> /mnt/etc/fstab
################################################################################
#### Configure base system ####
################################################################################
arch-chroot /mnt /bin/bash <<EOF
echo "Setting and generating locale"
echo "en_US.UTF-8 UTF-8" > /etc/locale.gen
locale-gen
export LANG=en_US.UTF-8
echo "LANG=en_US.UTF-8" >> /etc/locale.conf
echo "Setting time zone"
ln -s /usr/share/zoneinfo/Europe/Brussels /etc/localtime
echo "Setting hostname"
echo $hostname > /etc/hostname
sed -i '/localhost/s/$'"/ $hostname/" /etc/hosts
echo "Installing wifi packages"
pacman --noconfirm -S iw wpa_supplicant dialog wpa_actiond
echo "Generating initramfs"
sed -i "s/^HOOKS.*/HOOKS=\"base udev autodetect modconf block keyboard ${encrypt_mkinitcpio_hook}lvm2 filesystems fsck\"/" /etc/mkinitcpio.conf
mkinitcpio -p linux
echo "Setting root password"
echo "root:${root_password}" | chpasswd
EOF
################################################################################
#### Install boot loader ####
################################################################################
if [[ $UEFI -eq 1 ]]; then
arch-chroot /mnt /bin/bash <<EOF
echo "Installing Gummiboot boot loader"
pacman --noconfirm -S gummiboot
gummiboot install
cat << GRUB > /boot/loader/entries/arch.conf
title Arch Linux
linux /vmlinuz-linux
initrd /initramfs-linux.img
options ${cryptdevice_boot_param}root=/dev/mapper/vg00-lvroot rw
GRUB
EOF
else
arch-chroot /mnt /bin/bash <<EOF
echo "Installing Grub boot loader"
pacman --noconfirm -S grub
grub-install --target=i386-pc --recheck /dev/sda
sed -i "s|^GRUB_CMDLINE_LINUX_DEFAULT.*|GRUB_CMDLINE_LINUX_DEFAULT=\"quiet ${cryptdevice_boot_param}root=/dev/mapper/vg00-lvroot\"|" /etc/default/grub
grub-mkconfig -o /boot/grub/grub.cfg
EOF
fi
################################################################################
#### The end ####
################################################################################
printf "The script has completed bootstrapping Arch Linux.\n\nTake a minute to scroll up and check for errors (using shift+pgup).\nIf it looks good you can reboot.\n"
|
rasschaert/arch-bootstrap
|
bootstrap.bash
|
Shell
|
mit
| 7,542 |
#!/bin/bash
# Startup registrator, logs will be written to console.
# Needs to run inside the container
CMD="/bin/registrator $@"
$CMD
|
hudl/registrator
|
run-registrator.sh
|
Shell
|
mit
| 139 |
#!/bin/bash
: <<COMMITBLOCK
# 脚本名称:swip.sh
# 脚本用途:数字转换IP地址脚本
# 作者:hooboor
# 版本:VER. 1.0
# Copyright (c) 2015-2018
# 修改日期:2015/10/19
COMMITBLOCK
#默认值
DEFAULT=0
#初始值
initData=$1
#将输入数字分为两段
#整数部分
_src_int=`echo $initData|awk -F. '{printf("%d\n",$1)}'|sed -e 's/\ //g'`
#为负数,退出
if [ "$_src_int" -lt "0" ]
then
echo -e "\e[1;31m 输入数值不能为负数!\e[0m"
exit 0
fi
#小数部分,%s确保无小数时值为空
#if [ ! -z `echo $1|awk -F. '{printf("%s",$2)}'` ]
#条件判断值,判断小数部分数值:
#$_arg=0 输入数字为正整数
#$_arg=1 输入数字带有小数
_arg=`echo $initData|awk -F. '{printf("%d",$2)}'`
#echo "_arg IS $_arg"
#无值时%d的结果为0
#if [ `echo $1|awk -F. '{printf("%d",$2)}'` -ne 0 ]
if [ "$_arg" -ne "0" ]
then
#原数字带小数时
_src_dec=`echo $initData|awk -F. '{printf("0.%s\n",$NF)}'|sed -e 's/\ //g'`
#echo "带小数:$_src_dec"
else
#原数字不带小数时
#%s打印为空值
#_src_dec=`echo $1|awk -F. '{printf("%s\n",$2)}'|sed -e 's/\ //g'`
#%d打印值为0
_src_dec=`echo $initData|awk -F. '{printf("%d\n",$2)}'|sed -e 's/\ //g'`
#echo "原数字不带小数"
fi
#小数扩大10倍
_src_dec_enlg10=`echo $_src_dec\*10|bc`
#echo "_src_dec_enlg10 = $_src_dec_enlg10"
#取扩大后整数部分
#%s打印字符,%d打印数字
#_src_dec_enlg10_intval=`echo $_src_dec_enlg10|awk -F. '{printf("%s\n",$1)}'`
_src_dec_enlg10_intval=`echo $_src_dec_enlg10|awk -F. '{printf("%d\n",$1)}'`
#echo "_src_dec_enlg10_intval = $_src_dec_enlg10_intval"
#小数部分有效数字值
#_src_dec_valid_decval=`echo $1|awk -F. '{printf("%s\n",$2)}'|sed -e 's/\ //g'`
_src_dec_valid_decval=`echo $initData|awk -F. '{printf("%d\n",$NF)}'|sed -e 's/\ //g'`
#设置10进制字面值等于8进制数字面值;如数值为1.0123(10),取小数点后值0123,使0123(10)=(0123)(8)
let "_oct2dec = 0$_src_dec_valid_decval"
#设置10进制字面值等于16进制字面值
#let "_oct2dec = 0x$_src_dec_valid_decval"
#小数部分有效值如果为0
if [ "$_src_dec_valid_decval" -eq "0" ]
then
_src_dec_valid_decval="无"
fi
#echo "输入数字为: $1"
#echo "整数部分为: $_src_int"
#echo "小数部分为: $_src_dec"
#echo "小数部分有效数字值为: $_src_dec_valid_decval"
#echo
#取参数值,并转换为整数
#_midsrc=`echo $1|awk -F. '{printf("%d\n",$NF)}'|sed -e 's/\ //g'`
#echo -e "\e[1;31m 原始数据为:$1 \e[0m"
#echo -e "\e[1;34m 小数部分取整数为:$_src_dec_valid_decval \e[0m"
#echo
#8进制转10进制
#_oct2dec=`echo $((8#$_src_dec_valid_decval))`
#_oct2dec=`echo "obase=8;$_src_dec_valid_decval"|bc`
#再将小数转换为10进制的数字转换为16进制,不能用_oct2dec=`echo $((16#$_oct2dec))`
#echo "obase=16;$_oct2dec"|bc
#8进制转16进制
if [ "$_src_dec_enlg10_intval" -lt "1" ]
then
#0.0x时转换
_oct2hex=`echo "obase=16;$_oct2dec"|bc`
#echo -e "\e[1;32m 原小数部分(0.0x)转化为十进制为:\e[0m"
#echo -e "\e[1;32m $_src_dec_valid_decval (8) = $_oct2dec (16) \e[0m"
else
#0.x时不转换
_oct2hex=`echo "obase=16;$_src_dec_valid_decval"|bc`
#echo "不需要按8进制转换"
#echo -e "\e[1;31m 原小数部分(0.x)转化为十六进制为:\e[0m"
#echo -e "\e[1;31m $_src_dec_valid_decval (10) = $_oct2hex (16) \e[0m"
fi
#_midtemp0=`echo $_oct2dec`
#echo "转化为十进制数为:$_midtemp0"
#调试
#echo int is $1
#echo hex is $_int2hex
#echo hex length is $_len
usage()
{
echo
echo "**************************************************************************"
echo "* *"
echo "* "用法: ./`basename $0` 正整数或正小数" *"
echo "* *"
echo "* "说明: 小数部分小于0.1时,小数部分不能含有8、9两个数字,否则转换报错" *"
echo "* *"
echo "* "示例 ./`basename $0` 123.345" *"
echo "* *"
echo "**************************************************************************"
echo
}
info()
{
#判断参数
echo "_arg IS $_arg"
echo "输入数字为: $initData"
#如果_src_int没设置或为空,那么就以DEFAULT作为其值
echo "整数部分为: ${_src_int:-DEFAULT}"
echo "小数部分为: $_src_dec"
echo "小数部分有效数字值为: $_src_dec_valid_decval"
echo
echo -e "\e[1;31m 原始数据为:$initData \e[0m"
echo -e "\e[1;34m 小数部分取整数为:$_src_dec_valid_decval \e[0m"
echo
#小数部分扩大10倍后,整数部分取值
if [ "$_src_dec_enlg10_intval" -lt "1" ]
then
#0.0x时转换
echo -e "\e[1;32m 原小数部分(0.0x)转化为十进制为:\e[0m"
echo -e "\e[1;32m $_src_dec_valid_decval (8) = $_oct2dec (16) \e[0m"
else
#0.x时不转换
echo "不需要按8进制转换"
echo -e "\e[1;31m 原小数部分(0.x)转化为十六进制为:\e[0m"
echo -e "\e[1;31m $_src_dec_valid_decval (10) = $_oct2hex (16) \e[0m"
fi
#是否是小数
echo "带小数:$_src_dec"
#扩大10倍
echo "扩大10倍:_src_dec_enlg10 = $_src_dec_enlg10"
#扩大后整数部分
echo "扩大后整数部分:_src_dec_enlg10_intval = $_src_dec_enlg10_intval"
}
#初始化参数
init()
{
#设置转换函数初始值
#if [ `echo $1|awk -F. '{printf("%d",$2)}'` -ne 0 ]
if [ "$_arg" -ne "0" ]
then
###为小数时
#awk 参数
_awkscript='{printf "%06s\n",$0}'
#16进制数值长度
_len=6
#循环次数
_loo=` expr $_len / 2`
#转换参数
_switch=$_oct2hex
#转换后长度
_swed_len=`expr length $_switch`
echo "_swed_len = $_swed_len"
else
###为整数时
#10进制数转16进制数
_int2hex=`echo "obase=16;$initData"|bc`
#awk 参数
_awkscript='{printf "%08s\n",$0}'
#16进制数值长度
_len=8
#循环次数
_loo=` expr $_len / 2`
#转换参数
_switch=$_int2hex
#转换后长度
_swed_len=`expr length $_switch`
echo "_swed_len = $_swed_len"
fi
}
#初始化
init
#转换函数
switchip()
{
#判断16进制数长度,小于8位时,左边加0
if [ "$_swed_len" -le "$_len" ]
then
#获取16进制数值,不足8(6),左边补零
_temp=`echo $_switch|awk "$_awkscript"`
#设置长度为8位
_len_eight=`expr length $_temp`
#echo -e "_len_eight = $_len_eight \n"
#echo _temp$_len $_temp
else
#大于8(6),从右向左截取8(6)位
_temp=`echo ${_switch:(-$_len)}`
_len_eight=`echo $_swed_len`
#echo "_len_eight = $_len_eight"
#echo _temp$_len $_temp
fi
#分段获得16进制数值,如1F2B3380,那么分4段为:1F 2B 33 80,并将之转换为10进制
for ((i=0,n=0;i<$_loo;i++))
do
#echo "n的初始值为:$n"
#将分段数值存入_array数组
_array[i]=`echo ${_temp:$n:2}`
#将数组中的值存入_varray数组
_varray=`echo ${_array[$i]}`
#转换为10进制数存入_ips数组
_ips[i]=`echo $((16#${_varray}))`
#将数组值存入_vips数组
_vips=`echo ${_ips[$i]}`
#显示数值
#echo _array[$i] = $_varray
#echo _ips[$i] = $_vips
#n为截取字符值,上面_array[i]=`echo ${_temp:$n:2}`中
((n+=2))
#打印最终结果
if [ "$n" -ge "$_len_eight" -a "$_len" -gt "6" ]
then
echo -e "\e[1;31m IP地址为:`echo ${_ips[*]}|sed -e 's/\ /\./g'` \e[0m"
else
echo -e "\e[1;31m IP地址为:$_src_int.`echo ${_ips[*]}|sed -e 's/\ /\./g'` \e[0m"
fi
#((n+=2))
#echo "n循环值为:$n"
done
}
#小数转ip
dectoip()
{
echo "小数部分转IP"
#整数部分大于0吗?
#if [ `echo $1|awk -F. '{printf("%d",$2)}'` -ne 0 ]
if [ "$_arg" -ne "0" ]
then
#整数部分大于0
echo "整数部分大于0"
#整数部分大于255吗
if [ "$_src_int" -gt "255" ]
then
#大于255
echo "小数前的整数值不能大于256"
exit 0
else
#这个整数值即为ip中第一字段值,因为小于255,所以不需要转换
#echo "第一字段值为:$_src_int"
switchip
fi
else
#整数部分等于0,再判断小数点后0的个数
echo "整数部分等于0,再判断小数点后0的个数"
#小数长度(不包括整数部分)
#b=`echo $a|awk -F. '{printf("%s\n",$2)}'|wc -l`
#小数部分乘10>=1,那么小数点后直接接有效数字。
if [ "$_src_dec_enlg10_intval" -ge "1" ]
then
echo "0.x"
#小数点后一个0的情况
echo "小数点后一个0的情况"
switchip
else
echo "0.0x"
#小数放大10倍
xx=`echo $c\*10|bc`
#小数点后多个0的情况
echo "小数点后多个0的情况"
fi
fi
}
#参数判断
if [ "$#" -lt "1" -o "$initData" = "-h" -o "$initData" = "--help" -o "$initData" = "?"]
then
usage
else
info
fi
#判断是整数还是小数
if [ ! -z `echo $initData|awk -F. '{printf("%s",$2)}'` ]
then
#是小数
echo $initData is a decimal
dectoip
else
#是整数
echo $initData is a integer
switchip
fi
|
hooboor/ShellDemo
|
general/swip-beta.sh
|
Shell
|
mit
| 9,336 |
#!/bin/sh
set -eo pipefail -o nounset
#download SVs from gnomAD (grch37) in bgzipped VCF format
wget --quiet https://storage.googleapis.com/gnomad-public/papers/2019-sv/gnomad_v2.1_sv.sites.vcf.gz
#sort and tabix
gsort gnomad_v2.1_sv.sites.vcf.gz https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/GRCh37/GRCh37.genome | bgzip -c > gnomad_v2_sv_sorted.sites.vcf.gz
tabix gnomad_v2_sv_sorted.sites.vcf.gz
#clean up
rm gnomad_v2.1_sv.sites.vcf.gz
|
gogetdata/ggd-recipes
|
recipes/genomics/Homo_sapiens/GRCh37/grch37-structural-variants-gnomad-v1/recipe.sh
|
Shell
|
mit
| 481 |
#!/bin/bash
# runGameOfLife1000.sh
# Muriel Brunet, Isaiah Mann
# CSC 352, Spring 17
# Run for 1000 generations
# chmod a+x runGameOfLife1000.sh
# ./runGameOfLife1000.sh
# ./runGameOfLife1000.sh 2>&1 | grep "with\|real" > timing1000gen.data
javac *.java # compile all the files
for i in 2 4 8 12 16 20 32 64 ; do # number of threads
printf "\n"
printf "\n"
echo "..................................... with" $i "threads"
for j in 1 2 3 4 5 6 7 8 9 10 ; do # number of times to run the program
echo "RUN" $j
time java GameOfLifeApplication "dish.txt" $i 1000 false
done
done
|
imann24/game-of-life-parallel
|
src/runGameOfLife1000.sh
|
Shell
|
mit
| 596 |
#!/bin/bash
set -eu
# SUBMIT THETA
# Runs main.sh on Theta
# Use 'submit-theta.sh -h' for help
usage()
{
echo "usage: submit-theta [-q QUEUE] [-w WALLTIME] NODES PARALLELISM"
}
# Defaults:
QUEUE="default"
WALLTIME=00:02:00
while getopts "hq:w:" OPTION
do
case $OPTION in
h) usage ; exit 0 ;;
q) QUEUE=$OPTARG ;;
w) WALLTIME=$OPTARG ;;
*) exit 1 ;;
esac
done
shift $(( OPTIND - 1 ))
if (( ${#} != 2 ))
then
usage
exit 1
fi
NODES=$1
PARALLELISM=$2
CONCURRENCY=$(( NODES / PARALLELISM ))
TASKS=$(( CONCURRENCY * 10 ))
TIMESTAMP=$( date "+%Y-%m-%d_%H:%M:%S" )
OUTPUT=output/$TIMESTAMP
LOG=output/$TIMESTAMP.txt
mkdir -p $OUTPUT
echo OUTPUT=$OUTPUT
echo TIMESTAMP=$TIMESTAMP > $LOG
{
echo NODES=$NODES
echo PARALLELISM=$PARALLELISM
echo CONCURRENCY=$CONCURRENCY
echo TASKS=$TASKS
echo
} | tee -a $LOG
JOB=$( qsub --project CSC249ADOA01 \
--queue $QUEUE \
--nodecount $NODES \
--time $WALLTIME \
--output $LOG \
--error $LOG \
--env OUTPUT=$OUTPUT \
./main.sh $PARALLELISM $CONCURRENCY $TASKS )
echo JOB=$JOB
cqwait $JOB
|
ECP-CANDLE/Supervisor
|
workflows/async-local/submit-theta.sh
|
Shell
|
mit
| 1,145 |
#!/bin/bash
# Script to migrate to Neovim from Vim
# Install neovim
sudo add-apt-repository ppa:neovim-ppa/unstable
sudo apt-get update
sudo apt-get install neovim
# Migrate dotfiles from Vim
mkdir ~/.config/nvim
ln -s ./vim/.vimrc ~/.config/nvim/init.vim
# Support for Python extensions
sudo apt-get install python-dev python-pip python3-dev python3-pip
sudo pip2 install --upgrade neovim
sudo pip3 install --upgrade neovim
|
mikeliturbe/dotfiles
|
neovim_migration.sh
|
Shell
|
mit
| 430 |
for i in $(ls *.JPG); do exiv2 -r '%Y%m%d.%H%M%S.:basename:' rename $i; done
|
tacy/shell
|
linux/rename_jpg_by_create_date.sh
|
Shell
|
mit
| 77 |
#!/bin/sh
# Install prerequisites.
apt-get update
apt-get install -y -q --no-install-recommends \
curl ca-certificates make g++ sudo bash
# Install Fluentd.
/usr/bin/curl -sSL https://toolbelt.treasuredata.com/sh/install-ubuntu-xenial-td-agent2.sh | sh
# Change the default user and group to root.
# Needed to allow access to /var/log/docker/... files.
sed -i -e "s/USER=td-agent/USER=root/" -e "s/GROUP=td-agent/GROUP=root/" /etc/init.d/td-agent
# Install the Elasticsearch Fluentd plug-in.
# http://docs.fluentd.org/articles/plugin-management
td-agent-gem install --no-document fluent-plugin-kubernetes_metadata_filter -v 0.24.0
td-agent-gem install --no-document fluent-plugin-elasticsearch -v 1.5.0
# Remove docs and postgres references
rm -rf /opt/td-agent/embedded/share/doc \
/opt/td-agent/embedded/share/gtk-doc \
/opt/td-agent/embedded/lib/postgresql \
/opt/td-agent/embedded/bin/postgres \
/opt/td-agent/embedded/share/postgresql
apt-get remove -y make g++
apt-get autoremove -y
apt-get clean -y
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
m-wrona/kubernetes-elasticsearch
|
docker/fluentd/build.sh
|
Shell
|
mit
| 1,070 |
GENOME=Rspe02.final.assembly.fasta
~/bio/Applications/EVM_r2012-06-25/EvmUtils/recombine_EVM_partial_outputs.pl \
--partitions partitions_list.out \
--output_file_name evm.out
~/bio/Applications/EVM_r2012-06-25/EvmUtils/convert_EVM_outputs_to_GFF3.pl \
--partitions partitions_list.out \
--output evm.out \
--genome $GENOME
|
shujishigenobu/genome_annot
|
app/EVM/pipeline_1/run_evm_step3_recombine_outputs.sh
|
Shell
|
mit
| 332 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.