code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
VIM=vim
FAKEVIM=${FAKEVIM:-test/test}
diff=meld
cmdfile=fakevim_test_cmd.log
INDENT=${INDENT:-' '}
options="set smartindent|set autoindent|set nocindent"
print_help() {
echo "USAGE: $0 FILE CMD..."
echo " Run input in both Vim and FakeVim and compare result."
echo " Results are stored in FILE.vim and FILE.fakevim."
echo " Tests for FakeVim in Qt Creator are stored in \"$cmdfile\" file."
}
print() {
for arg in "$@"; do
if [ "$arg" == "N" ]; then
printf '\n'
else
printf "%s" "$arg"
fi
done >> "$cmdfile"
}
print_content() {
local file=$1
sed \
-e 's/"/\\"/g' \
-e 's/^/'"$INDENT"'"/' \
-e 's/$/" N/' "$file" \
>> "$cmdfile"
}
vim_exec() {
for cmd in "$@"; do
printf "%s" "|map \\X $cmd|normal \\X"
done
}
run_vim() {
local file=$1
shift
"$VIM" \
-c "$options" \
-c "$(vim_exec "$@")" \
-c "normal i|" \
-c "wq" "$file"
}
run_fakevim() {
local file=$1
shift
find_fakevim
"$FAKEVIM" "$file" \
":$options|set nopasskeys|set nopasscontrolkey<CR>" \
"$@" "<ESC><ESC>i|<ESC>" \
":wq<CR>"
}
find_fakevim() {
if [ ! -x "$FAKEVIM" ]; then
dir=$(dirname "$(readlink -f "$0")")
FAKEVIM=$(find "$dir" -type f -executable -name test | head -1)
fi
}
print_test() {
local header=$1
local file=$2
local footer=$3
print "$header" N
print_content "$file"
print "$footer" N
}
same() {
cmp "$@" >/dev/null 2>&1
}
main() {
set -e
if [ "$#" -lt 2 ]; then
print_help
exit 1
fi
local file=$1
shift # rest are commands
rm -f "$cmdfile"
#cp ~/.vimrc fakevimrc
print_test 'data.setText(' "$file" ');'
# run command through Vim
local vimoutfile=${file}.vim
cp "$file" "$vimoutfile"
run_vim "$vimoutfile" "$@"
print_test "KEYS(\"$*\"," "$vimoutfile" ');'
local fakevimoutfile=${file}.fakevim
cp "$file" "$fakevimoutfile"
run_fakevim "$fakevimoutfile" "$@"
if same "$fakevimoutfile" "$vimoutfile"; then
echo OK, same result from Vim and FakeVim.
else
echo FAILED, different result from Vim and FakeVim.
$diff "$fakevimoutfile" "$vimoutfile"
fi
reset
cat "$cmdfile"
sed 's/^/ /' "$cmdfile" | xclip -i
}
main "$@"
|
vovkasm/liteide
|
liteidex/src/3rdparty/fakevim/tests/vim-test.sh
|
Shell
|
lgpl-2.1
| 2,543 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
workload_folder=`dirname "$0"`
workload_folder=`cd "$workload_folder"; pwd`
workload_root=${workload_folder}/../..
. "${workload_root}/../../bin/functions/load-bench-config.sh"
enter_bench SparkStreamingBench ${workload_root} ${workload_folder}
show_bannar start
START_TIME=`timestamp`
printFullLog
run-streaming-job com.intel.hibench.streambench.spark.RunBench $SPARKBENCH_PROPERTIES_FILES
END_TIME=`timestamp`
gen_report ${START_TIME} ${END_TIME} 0 # FIXME, size should be throughput
show_bannar finish
|
cemsbr/HiBench
|
workloads/streamingbench/spark/bin/run.sh
|
Shell
|
apache-2.0
| 1,303 |
#!/bin/bash
set -e -u
# Prevent python from generating compiled *.pyc files
export PYTHONDONTWRITEBYTECODE=1
DEBUG="${1:-empty}"
MY_PATH="`dirname \"$0\"`" # relative
MY_PATH="`( cd \"$MY_PATH\" && pwd )`" # absolutized and normalized
source "$MY_PATH/../autobuild/ndk_helper.sh"
export NDK_ROOT=$(GetNdkRoot) || ( echo "Can't read NDK root path from android/local.properties"; exit 1 )
GLSLC_PATH="$NDK_ROOT/shader-tools/darwin-x86_64/glslc"
OMIM_PATH="${OMIM_PATH:-$(cd "$(dirname "$0")/../.."; pwd)}"
SHADERS_GENERATOR="$OMIM_PATH/shaders/vulkan_shaders_preprocessor.py"
python "$SHADERS_GENERATOR" "$OMIM_PATH/shaders/GL" shader_index.txt programs.hpp program_params.hpp shaders_lib.glsl "$OMIM_PATH/data/vulkan_shaders" "$GLSLC_PATH" "$DEBUG"
|
rokuz/omim
|
tools/unix/generate_vulkan_shaders.sh
|
Shell
|
apache-2.0
| 768 |
#!/bin/sh
set -e
function fail {
printf "FAILURE: $1\n"
FAILED=1
}
echo "starting kernel config sanity test with ${1:-/proc/config.gz}"
if [ -n "$1" ]; then
UNZIPPED_CONFIG=$(cat "$1")
else
# decompress /proc/config.gz from the host
UNZIPPED_CONFIG=$(zcat /proc/config.gz)
fi
if [ -n "$2" ]; then
kernelVersion="$2"
else
kernelVersion="$(uname -r)"
fi
kernelMajor="${kernelVersion%%.*}"
kernelMinor="${kernelVersion#$kernelMajor.}"
kernelMinor="${kernelMinor%%.*}"
arch="$(uname -m)"
# Most tests against https://kernsec.org/wiki/index.php/Kernel_Self_Protection_Project
# Positive cases
echo $UNZIPPED_CONFIG | grep -q CONFIG_BUG=y || fail "CONFIG_BUG=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_DEBUG_KERNEL=y || fail "CONFIG_DEBUG_KERNEL=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_STRICT_DEVMEM=y || fail "CONFIG_STRICT_DEVMEM=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_SYN_COOKIES=y || fail "CONFIG_SYN_COOKIES=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_DEBUG_CREDENTIALS=y || fail "CONFIG_DEBUG_CREDENTIALS=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_DEBUG_NOTIFIERS=y || fail "CONFIG_DEBUG_NOTIFIERS=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_DEBUG_LIST=y || fail "CONFIG_DEBUG_LIST=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_SECCOMP=y || fail "CONFIG_SECCOMP=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_SECCOMP_FILTER=y || fail "CONFIG_SECCOMP_FILTER=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_SECURITY=y || fail "CONFIG_SECURITY=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_SECURITY_YAMA=y || fail "CONFIG_SECURITY_YAMA=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_PANIC_ON_OOPS=y || fail "CONFIG_PANIC_ON_OOPS=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_SYN_COOKIES=y || fail "CONFIG_SYN_COOKIES=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_BPF_JIT_ALWAYS_ON=y || fail "CONFIG_BPF_JIT_ALWAYS_ON=y"
# Conditional on kernel version
if [ "$kernelMajor" -eq 4 -a "$kernelMinor" -le 10 ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_DEBUG_RODATA=y || fail "CONFIG_DEBUG_RODATA=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_DEBUG_SET_MODULE_RONX=y || fail "CONFIG_DEBUG_SET_MODULE_RONX=y"
fi
# Options added in newer kernels
if [ "$kernelMajor" -eq 5 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -ge 5 ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_UBSAN=y || fail "CONFIG_UBSAN=y"
fi
if [ "$kernelMajor" -eq 5 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -ge 7 ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_SLAB_FREELIST_RANDOM=y || fail "CONFIG_SLAB_FREELIST_RANDOM=y"
fi
if [ "$kernelMajor" -eq 5 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -ge 8 ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_HARDENED_USERCOPY=y || fail "CONFIG_HARDENED_USERCOPY=y"
fi
# 4.18.x renamed this option (and re-introduced CC_STACKPROTECTOR as STACKPROTECTOR)
if [ "$kernelMajor" -eq 5 ] || [ "$kernelMajor" -le 4 -a "$kernelMinor" -ge 18 ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_STACKPROTECTOR=y || fail "CONFIG_STACKPROTECTOR=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_STACKPROTECTOR_STRONG=y || fail "CONFIG_STACKPROTECTOR_STRONG=y"
else
echo $UNZIPPED_CONFIG | grep -q CONFIG_CC_STACKPROTECTOR_STRONG=y || fail "CONFIG_CC_STACKPROTECTOR_STRONG=y"
fi
# poisoning cannot be enabled in 4.4
if [ "$kernelMajor" -eq 5 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -ge 9 ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_PAGE_POISONING=y || fail "CONFIG_PAGE_POISONING=y"
# These sub options were removed from 5.11.x
if [ "$kernelMajor" -le 5 -a "$kernelMinor" -lt 11 ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_PAGE_POISONING_NO_SANITY=y || fail "CONFIG_PAGE_POISONING_NO_SANITY=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_PAGE_POISONING_ZERO=y || fail "CONFIG_PAGE_POISONING_ZERO=y"
fi
fi
if [ "$kernelMajor" -eq 5 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -ge 10 ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_BUG_ON_DATA_CORRUPTION=y || fail "CONFIG_BUG_ON_DATA_CORRUPTION=y"
fi
if [ "$kernelMajor" -eq 5 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -ge 11 ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_STRICT_KERNEL_RWX=y || fail "CONFIG_STRICT_KERNEL_RWX=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_STRICT_MODULE_RWX=y || fail "CONFIG_STRICT_MODULE_RWX=y"
fi
if [ "$kernelMajor" -eq 5 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -ge 5 ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_RANDOMIZE_BASE=y || fail "CONFIG_RANDOMIZE_BASE=y"
fi
# Positive cases conditional on architecture and/or kernel version
if [ "$arch" = "x86_64" ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_LEGACY_VSYSCALL_NONE=y || fail "CONFIG_LEGACY_VSYSCALL_NONE=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_PAGE_TABLE_ISOLATION=y || fail "CONFIG_PAGE_TABLE_ISOLATION=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_RETPOLINE=y || fail "CONFIG_RETPOLINE=y"
echo $UNZIPPED_CONFIG | grep -q CONFIG_GENERIC_CPU_VULNERABILITIES=y || fail "CONFIG_GENERIC_CPU_VULNERABILITIES=y"
if [ "$kernelMajor" -eq 5 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -ge 5 ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_IO_STRICT_DEVMEM=y || fail "CONFIG_IO_STRICT_DEVMEM=y"
fi
if [ "$kernelMajor" -eq 5 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -ge 8 ]; then
echo $UNZIPPED_CONFIG | grep -q CONFIG_RANDOMIZE_MEMORY=y || fail "CONFIG_RANDOMIZE_MEMORY=y"
fi
fi
# Negative cases
echo $UNZIPPED_CONFIG | grep -q 'CONFIG_COMPAT_BRK is not set' || fail "CONFIG_COMPAT_BRK is not set"
echo $UNZIPPED_CONFIG | grep -q 'CONFIG_SCSI_PROC_FS is not set' || fail "CONFIG_SCSI_PROC_FS is not set"
# Negative cases conditional on architecture and/or kernel version
if [ "$arch" = "x86_64" ]; then
echo $UNZIPPED_CONFIG | grep -q 'CONFIG_ACPI_CUSTOM_METHOD is not set' || fail "CONFIG_ACPI_CUSTOM_METHOD is not set"
echo $UNZIPPED_CONFIG | grep -q 'CONFIG_COMPAT_VDSO is not set' || fail "CONFIG_COMPAT_VDSO is not set"
echo $UNZIPPED_CONFIG | grep -q 'CONFIG_KEXEC is not set' || fail "CONFIG_KEXEC is not set"
echo $UNZIPPED_CONFIG | grep -q 'CONFIG_X86_X32 is not set' || fail "CONFIG_X86_X32 is not set"
echo $UNZIPPED_CONFIG | grep -q 'CONFIG_MODIFY_LDT_SYSCALL is not set' || fail "CONFIG_MODIFY_LDT_SYSCALL is not set"
if [ "$kernelMajor" -eq 5 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -ge 5 ]; then
echo $UNZIPPED_CONFIG | grep -q 'CONFIG_LEGACY_PTYS is not set' || fail "CONFIG_LEGACY_PTYS is not set"
echo $UNZIPPED_CONFIG | grep -q 'CONFIG_HIBERNATION is not set' || fail "CONFIG_HIBERNATION is not set"
fi
# DEVKMEM was removed with 5.13.x (Note this check is not quote accurate but we are not adding
# older kernels like e.g. 4.11 anymore.
if [ "$kernelMajor" -le 5 ] && [ "$kernelMinor" -lt 13 ]; then
echo $UNZIPPED_CONFIG | grep -q 'CONFIG_DEVKMEM is not set' || fail "CONFIG_DEVKMEM is not set"
fi
fi
# modprobe
for mod in \
nfs \
nfsd \
ntfs
do
modprobe $mod 2>/dev/null || true
done
# check filesystems that are built in
for fs in \
sysfs \
tmpfs \
bdev \
proc \
cpuset \
cgroup \
devtmpfs \
binfmt_misc \
debugfs \
tracefs \
securityfs \
sockfs \
bpf \
pipefs \
ramfs \
hugetlbfs \
rpc_pipefs \
devpts \
ext4 \
vfat \
msdos \
iso9660 \
nfs \
nfs4 \
nfsd \
cifs \
ntfs \
fuseblk \
fuse \
fusectl \
overlay \
udf \
xfs \
9p \
pstore \
mqueue
do
grep -q "[[:space:]]${fs}\$" /proc/filesystems || fail "${fs} filesystem missing"
done
if [ -z "$FAILED" ]
then
echo "kernel config test succeeded!"
else
echo "kernel config test failed!"
exit 1
fi
|
deitch/linuxkit
|
test/pkg/kernel-config/check-kernel-config.sh
|
Shell
|
apache-2.0
| 7,411 |
NAMESPACE=default hack/cluster-monitoring/deploy
|
BrentDorsey/pipeline
|
metrics.ml/kube-prometheus/deploy.sh
|
Shell
|
apache-2.0
| 49 |
#!/bin/bash
usage() {
echo ""
echo "Usage: $0 -u <psql_user> -d <database_name> -h <host> -p <port> -s <chado prebuilt schema> [ -r ]"
echo ""
echo "Options:"
echo " -u : PostgreSQL username"
echo " -d : Name of the database to which the chado schema and ontologies are to be loaded"
echo " -h : Database host (default: localhost)"
echo " -p : Port (default: 5432)"
echo " -s : Chado schema to load (*sql.gz)"
echo " -r : Flag that triggers pg_dump if database already exists (optional)"
echo ""
exit
}
check_config() {
PSQL_CREATEDB=$(which createdb)
PSQL_DROPDB=$(which dropdb)
PSQL_EXEC=$(which psql)
if ! [ -x $PSQL_EXEC ] ; then
echo "You must install PostgreSQL and 'psql' must be accessible in current PATH"
exit
fi
if ! [ -x $PSQL_CREATEDB ] ; then
echo "'createdb' must be accessible in current PATH"
exit
fi
if ! [ -x $PSQL_DROPDB ] ; then
echo "'dropdb' must be accessible in current PATH"
exit
fi
}
load_chado_schema() {
file_type=`echo ${CHADO_SCHEMA##*.}`
echo "Loading chado schema ${CHADO_SCHEMA} to database '$2'."
if [ $file_type == "gz" ]; then
gunzip -c $CHADO_SCHEMA | psql -U $1 -d $2 -h $3 -p $4 &> $STDLOG
else
psql -U $1 -d $2 -h $3 -p $4 < $CHADO_SCHEMA &> $STDLOG
fi
}
dump_database() {
OUTPUT=$2"_database_dump_"$TIMESTAMP".sql"
echo pg_dump -U $1 -d $2 -h $3 -p $4 -f $OUTPUT -b
pg_dump -U $1 -d $2 -h $3 -p $4 -f $OUTPUT -b
}
# Default
PORT="5432"
HOST="localhost"
DEFAULTDB="template1"
PGDUMP=0
TIMESTAMP=`date +%F_%T`
STDLOG="load_chado_schema_${TIMESTAMP}.log"
if [ $# -eq 0 ]; then
usage
exit
fi
while getopts "h:p:u:d:s:r" opt; do
case "$opt" in
h) HOST=$OPTARG
;;
p) PORT=$OPTARG
;;
u) PG_USER=$OPTARG
;;
d) DB=$OPTARG
;;
s) CHADO_SCHEMA=$OPTARG
;;
r) PGDUMP=1
;;
*) usage
;;
?) usage
;;
esac
done
if [ -z "${PG_USER}" ] || [ -z "${DB}" ] || [ -z "${CHADO_SCHEMA}" ] ; then
usage
fi
if [ ! -f ${CHADO_SCHEMA} ]; then
echo "File ${CHADO_SCHEMA} not found."
exit
fi
check_config
psql -U $PG_USER -h $HOST -p $PORT -d $DEFAULTDB -c "CREATE DATABASE \"$DB\""
EXIT_STATUS=$?
if [ $EXIT_STATUS -eq 0 ]; then
# CREATE DATABASE command was successful (i.e. there was no pre-existing database with the same name)
load_chado_schema $PG_USER $DB $HOST $PORT
elif [ $EXIT_STATUS -eq 1 ]; then
# CREATE DATABASE command was unsuccessful because there was a pre-existing database with the same name
if [ $PGDUMP -eq 1 ]; then
# -r option was provided at run-time; will attempt to backup the existing database
echo "Database '$DB' already exists. Backing up data via pg_dump."
dump_database $PG_USER $DB $HOST $PORT
if [ $? -eq 0 ]; then
# PG_DUMP was successful
echo "pg_dump was successful."
echo "Dropping and creating database '$DB'."
# DROP DATABASE after a PG_DUMP
psql -U $PG_USER -h $HOST -p $PORT -d $DEFAULTDB -c "DROP DATABASE \"$DB\""
if [ $? -ne 0 ]; then
echo "Cannot drop database '$DB' due to lack of privileges or existing open connections."
exit
fi
# CREATE DATABASE
psql -U $PG_USER -h $HOST -p $PORT -d $DEFAULTDB -c "CREATE DATABASE \"$DB\""
if [ $? -ne 0 ]; then
echo "Cannot create database '$DB' due to lack of privileges."
exit
fi
# finally, load chado schema
load_chado_schema $PG_USER $DB $HOST $PORT
fi
else
# -r option was not provided at run-time; will not try to do anything
echo "Database '$DB' already exists. If you would like to do a pg_dump, to backup its contents, run the script again with '-r' flag."
fi
else
# CREATE DATABASE command was unsuccessful for other reasons
echo "Cannot create database '$DB' due to improper connection parameters, lack of privileges or non-existent user '$PG_USER'."
exit
fi
echo "Chado schema loaded successfully to ${DB}. Check $STDLOG for more information."
|
erasche/Apollo
|
scripts/load_chado_schema.sh
|
Shell
|
bsd-3-clause
| 4,356 |
gem install bundler
bundle exec rake spec
if [-z "$SURF_BUILD_NAME" ]; then
## Posting to GitHub
bundle exec danger
else
## Local clean build, just print to console
bundle exec danger local
fi
|
danger/danger
|
build.sh
|
Shell
|
mit
| 198 |
#!/bin/bash
FN="JASPAR2014_1.26.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.12/data/experiment/src/contrib/JASPAR2014_1.26.0.tar.gz"
"https://bioarchive.galaxyproject.org/JASPAR2014_1.26.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-jaspar2014/bioconductor-jaspar2014_1.26.0_src_all.tar.gz"
)
MD5="04bec9056564f8ace769c30b929d475c"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
bebatut/bioconda-recipes
|
recipes/bioconductor-jaspar2014/post-link.sh
|
Shell
|
mit
| 1,302 |
# This script is an improvement over the default grub-install '(hd0)'
#
# However the following issues still exist:
#
# * We don't know what the first disk will be, so we cannot be sure the MBR
# is written to the correct disk(s). That's why we make all disks bootable.
#
# * There is no guarantee that GRUB was the boot loader used originally. One
# solution is to save and restore the MBR for each disk, but this does not
# guarantee a correct boot-order, or even a working boot-lader config (eg.
# GRUB stage2 might not be at the exact same location)
# skip if another bootloader was installed
if [[ -z "$NOBOOTLOADER" ]] ; then
return
fi
# for UEFI systems with grub legacy with should use efibootmgr instead
(( USING_UEFI_BOOTLOADER )) && return # set to 1 means UEFI booting
# check the BOOTLOADER variable (read by 01_prepare_checks.sh script)
if [[ "$BOOTLOADER" = "GRUB" ]]; then
if [[ $(type -p grub-probe) || $(type -p grub2-probe) ]]; then
# grub2 script should handle this instead
return
fi
fi
# Only for GRUB Legacy - GRUB2 will be handled by its own script
if [[ -z "$(type -p grub)" ]]; then
return
fi
LogPrint "Installing GRUB boot loader"
mount -t proc none /mnt/local/proc
if [[ -r "$LAYOUT_FILE" && -r "$LAYOUT_DEPS" ]]; then
# Check if we find GRUB stage 2 where we expect it
[[ -d "/mnt/local/boot" ]]
StopIfError "Could not find directory /boot"
[[ -d "/mnt/local/boot/grub" ]]
StopIfError "Could not find directory /boot/grub"
[[ -r "/mnt/local/boot/grub/stage2" ]]
StopIfError "Unable to find /boot/grub/stage2."
# Find exclusive partition(s) belonging to /boot
# or / (if /boot is inside root filesystem)
if [[ "$(filesystem_name /mnt/local/boot)" == "/mnt/local" ]]; then
bootparts=$(find_partition fs:/)
grub_prefix=/boot/grub
else
bootparts=$(find_partition fs:/boot)
grub_prefix=/grub
fi
# Should never happen
[[ "$bootparts" ]]
BugIfError "Unable to find any /boot partitions"
# Find the disks that need a new GRUB MBR
disks=$(grep '^disk \|^multipath ' $LAYOUT_FILE | cut -d' ' -f2)
[[ "$disks" ]]
StopIfError "Unable to find any disks"
for disk in $disks; do
# Use first boot partition by default
part=$(echo $bootparts | cut -d' ' -f1)
# Use boot partition that matches with this disk, if any
for bootpart in $bootparts; do
bootdisk=$(find_disk_and_multipath "$bootpart")
if [[ "$disk" == "$bootdisk" ]]; then
part=$bootpart
break
fi
done
# Find boot-disk and partition number
bootdisk=$(find_disk_and_multipath "$part")
partnr=${part#$bootdisk}
partnr=${partnr#p}
partnr=$((partnr - 1))
if [[ "$bootdisk" == "$disk" ]]; then
# Best case scenario is to have /boot on disk with MBR booted
chroot /mnt/local grub --batch --no-floppy >&2 <<EOF
device (hd0) $disk
root (hd0,$partnr)
setup --stage2=/boot/grub/stage2 --prefix=$grub_prefix (hd0)
quit
EOF
else
# hd1 is a best effort guess, we cannot predict how numbering
# changes when a disk fails.
chroot /mnt/local grub --batch --no-floppy >&2 <<EOF
device (hd0) $disk
device (hd1) $bootdisk
root (hd1,$partnr)
setup --stage2=/boot/grub/stage2 --prefix=$grub_prefix (hd0)
quit
EOF
fi
if (( $? == 0 )); then
NOBOOTLOADER=
fi
done
fi
if [[ "$NOBOOTLOADER" ]]; then
if chroot /mnt/local grub-install '(hd0)' >&2 ; then
NOBOOTLOADER=
fi
fi
umount /mnt/local/proc
|
krissi/rear
|
usr/share/rear/finalize/Linux-i386/21_install_grub.sh
|
Shell
|
gpl-2.0
| 3,695 |
#!/bin/sh
PORT=$1
time bin/hadoop jar build/hadoop-0.20.1-dev-examples.jar wordcount -jt localhost:$PORT randtext2 out-$RANDOM
|
pwendell/mesos
|
frameworks/hadoop-0.20.2/experiment-scripts/wc.sh
|
Shell
|
apache-2.0
| 127 |
#!/bin/bash
VERSION=0.13.1
URL=http://repo.scala-sbt.org/scalasbt/sbt-native-packages/org/scala-sbt/sbt/$VERSION/sbt.zip
function usage {
echo ""
echo " Usage: $0 [-f]"
echo ""
echo " Attempts to retrieve SBT $VERSION from $URL."
echo " If sbt.dir already exists, this will be skipped, unless the -f flag is specified."
echo ""
exit 1
}
cd `dirname $0`
[ "$1" != "-f" -a -d sbt.dir ] && usage
if hash wget 2>/dev/null; then
wget -O /tmp/sbt.zip $URL
elif hash curl 2>/dev/null; then
curl -o /tmp/sbt.zip $URL
else
echo "You need curl or wget installed to download sbt."
usage
fi
rm -fr sbt ; unzip /tmp/sbt.zip ; mv sbt sbt.dir
ln -s sbt.dir/bin/sbt sbt
|
ddf-project/ddf-jdbc
|
bin/get-sbt.sh
|
Shell
|
apache-2.0
| 680 |
#!/usr/bin/env bash
STARTTIME=$(date +%s)
# shellcheck source=openshift-hack/lib/init.sh
source "$(dirname "${BASH_SOURCE[0]}")/lib/init.sh"
pushd "${OS_ROOT}" > /dev/null || exit 1
make all WHAT='cmd/kube-apiserver cmd/kube-controller-manager cmd/kube-scheduler cmd/kubelet'
popd > /dev/null || exit 1
os::build::version::git_vars
if [[ "${OS_GIT_TREE_STATE:-dirty}" == "clean" ]]; then
# only when we are building from a clean state can we claim to
# have created a valid set of binaries that can resemble a release
mkdir -p "${OS_OUTPUT_RELEASEPATH}"
echo "${OS_GIT_COMMIT}" > "${OS_OUTPUT_RELEASEPATH}/.commit"
fi
ret=$?; ENDTIME=$(date +%s); echo "$0 took $((ENDTIME - STARTTIME)) seconds"; exit "$ret"
|
openshift/kubernetes
|
openshift-hack/build-go.sh
|
Shell
|
apache-2.0
| 725 |
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Starts a Kubernetes cluster, runs the e2e test suite, and shuts it
# down.
# For debugging of this test's components, it's helpful to leave the test
# cluster running.
ALREADY_UP=${1:-0}
LEAVE_UP=${2:-0}
# Exit on error
set -e
HAVE_JQ=$(which jq)
if [[ -z ${HAVE_JQ} ]]; then
echo "Please install jq, e.g.: 'sudo apt-get install jq' or, "
echo "if you're on a mac with homebrew, 'brew install jq'."
exit 1
fi
# Use testing config
export KUBE_CONFIG_FILE="config-test.sh"
export KUBE_REPO_ROOT="$(dirname $0)/.."
export CLOUDCFG="${KUBE_REPO_ROOT}/cluster/cloudcfg.sh"
source "${KUBE_REPO_ROOT}/cluster/util.sh"
source "${KUBE_REPO_ROOT}/hack/build-go.sh"
# Build a release
$(dirname $0)/../release/release.sh
if [[ ${ALREADY_UP} -ne 1 ]]; then
# Now bring a test cluster up with that release.
$(dirname $0)/../cluster/kube-up.sh
else
# Just push instead
$(dirname $0)/../cluster/kube-push.sh
fi
# Detect the project into $PROJECT if it isn't set
detect-project
set +e
if [[ ${ALREADY_UP} -ne 1 ]]; then
# Open up port 80 & 8080 so common containers on minions can be reached
gcutil addfirewall \
--norespect_terminal_width \
--project ${PROJECT} \
--target_tags ${MINION_TAG} \
--allowed tcp:80,tcp:8080 \
--network ${NETWORK} \
${MINION_TAG}-http-alt
fi
# Auto shutdown cluster when we exit
function shutdown-test-cluster () {
echo "Shutting down test cluster in background."
gcutil deletefirewall \
--project ${PROJECT} \
--norespect_terminal_width \
--force \
${MINION_TAG}-http-alt &
$(dirname $0)/../cluster/kube-down.sh > /dev/null &
}
if [[ ${LEAVE_UP} -ne 1 ]]; then
trap shutdown-test-cluster EXIT
fi
any_failed=0
for test_file in $(ls $(dirname $0)/e2e-suite/); do
"$(dirname $0)/e2e-suite/${test_file}"
result="$?"
if [[ "${result}" -eq "0" ]]; then
echo "${test_file} returned ${result}; passed!"
else
echo "${test_file} returned ${result}; FAIL!"
any_failed=1
fi
done
if [[ ${any_failed} -ne 0 ]]; then
echo "At least one test failed."
fi
exit ${any_failed}
|
discordianfish/kubernetes
|
hack/e2e-test.sh
|
Shell
|
apache-2.0
| 2,689 |
# publishUserGadget.sh {gadgetName}
TEMP_ENTITY_FILE=$0___temp-entity.xml
read -s -p password: password
./getUserEntry.sh PrivateGadgetSpec $1 -password $password | sed 's/.*<id>.*<\/id>.*//' > $TEMP_ENTITY_FILE && ./deleteEntry.sh PrivateGadgetSpec $1 -password $password && ./insertEntry.sh PrivateGadgetSpec $TEMP_ENTITY_FILE -password $password && ./publishGadget.sh $1 -password $password
rm -f $TEMP_ENTITY_FILE
|
fbpatel/google-feedserver
|
resources/clientTool/publishUserGadget.sh
|
Shell
|
apache-2.0
| 421 |
#!/bin/bash -e
declare -a dockerfiles
dockerfiles=(
["heywill/will:python2.7$CTAG"]="/will/will-py2/"
["heywill/will:python3.7$CTAG"]="/will/will-py3/")
build_containers() {
for tag in "${!dockerfiles[@]}";
do
echo "building $tag with context ${dockerfiles[$tag]}";
docker build -t $tag $(dirname $(readlink -f ${BASH_SOURCE[0]}))${dockerfiles[$tag]};
echo ""
done;
}
tag_production(){
docker tag heywill/will:python2.7$CTAG heywill/will:python2.7
echo "tagged heywill/will:python2.7$CTAG as heywill/will:latest"
docker tag heywill/will:python3.7$CTAG heywill/will:python3.7
docker tag heywill/will:python3.7$CTAG heywill/will:latest
echo "tagged heywill/will:python3.7$CTAG as heywill/will:latest & heywill/will:python3.7"
}
push_containers(){
tag_production
docker push heywill/will-base:latest
docker push heywill/will:latest
}
echo "Building with COMMIT TAG: $CTAG"
case $1 in
"--all")
build_containers
push_containers
;;
"--build")
build_containers
;;
"--push")
push_containers
;;
*)
echo "You did something wrong"
exit 1
;;
esac
|
skoczen/will
|
docker/buildwillcontainers.sh
|
Shell
|
mit
| 1,254 |
#!/bin/sh
[ -d ./tmp ] && rm -rf ./tmp
DEVKIT_ROOT=/mnt/devel/workspace/openee_build/dm800se/build/tmp
CROSS=${DEVKIT_ROOT}/cross/mipsel/bin/mipsel-oe-linux-
export CFLAGS+="-I${DEVKIT_ROOT}/staging/mipsel-oe-linux/usr/include \
-I${DEVKIT_ROOT}/staging/mipsel-oe-linux/usr/include/libxml2 \
-I${DEVKIT_ROOT}/staging/mipsel-oe-linux/usr/include/python2.6"
export CC=${CROSS}gcc
export STRIP=${CROSS}strip
export SWIG=${DEVKIT_ROOT}/staging/x86_64-linux/usr/bin/swig
export D=./tmp
make && make install
if [ $? != 0 ]; then
echo compile error
exit 1
fi
mkdir -p tmp/CONTROL
cp contrib/control tmp/CONTROL/
VERSION=`cat src/version.h | grep RELEASE | sed "s/.*RELEASE \"//" | sed "s/\"//" | sed "s/\ /-/" | sed "s/\ /-/" | sed "s/(//" | sed "s/)//"`
echo "Package: enigma2-plugin-systemplugins-crossepg" >> tmp/CONTROL/control
echo "Version: $VERSION-r0" >> tmp/CONTROL/control
echo "Architecture: mipsel" >> tmp/CONTROL/control
sh ipkg-build -o root -g root tmp/
[ ! -d out ] && mkdir out
mv *.ipk out
echo "Package moved in `pwd`/out folder"
|
tension9000/e2openplugin-CrossEPG
|
make_e2_oe1.6.sh
|
Shell
|
lgpl-2.1
| 1,054 |
#!/bin/bash
# strict mode http://redsymbol.net/articles/unofficial-bash-strict-mode/
set -euo pipefail
IFS=$'\n\t'
function lint(){
eslint --no-eslintrc --config .eslintrc ${@-.}
}
function git_require_clean_work_tree(){
git diff --exit-code
}
function find_changelog_file(){
# find the changelog file
local CHANGELOG=""
if test "$CHANGELOG" = ""; then
CHANGELOG="$(ls | egrep '^(change|history)' -i | head -n1)"
if test "$CHANGELOG" = ""; then
CHANGELOG="CHANGELOG.md";
fi
fi
echo $CHANGELOG
}
function find_last_git_tag(){
git tag -l | sort -V | tail -n 1
}
# based on https://github.com/tj/git-extras/blob/master/bin/git-changelog
function generate_git_changelog(){
GIT_LOG_OPTS="--no-merges"
local DATE=$(date +'%Y-%m-%d')
local HEAD='## '
# get the commits between the most recent tag and the second most recent
local lasttag=$(find_last_git_tag)
local version=$(git describe --tags --abbrev=0 "$lasttag" 2>/dev/null)
local previous_version=$(git describe --tags --abbrev=0 "$lasttag^" 2>/dev/null)
# if we don't have a previous version to look at
if test -z "$version"; then
local head="$HEAD$DATE"
local changes=$(git log $GIT_LOG_OPTS --pretty="format:* %s%n" 2>/dev/null)
# the more common case, there's a version to git the changes betwen
else
local head="$HEAD$version | $DATE"
# tail to get remove the first line, which will always just be the version commit
# awk to remove empty lines
local changes=$(tail -n +2 <<< "$(git log $GIT_LOG_OPTS --pretty="format:* %s%n" "$previous_version..$version" 2>/dev/null)" | awk NF)
fi
local CHANGELOG=$(find_changelog_file)
echo "Editing $CHANGELOG"
# insert the changes after the header (assumes markdown)
# this shells out to node b/c I couldn't figure out how to do it with awk
local tmp_changelog=/tmp/changelog
node -e "console.log(require('fs').readFileSync(process.argv[1]).toString().replace(/(#.*?\n\n)/, '\$1' + process.argv.slice(2).join('\n') + '\n\n'))" "$CHANGELOG" "$head" "$changes" > $tmp_changelog
# open the changelog in the editor for editing
test -n "$EDITOR" && $EDITOR $tmp_changelog
mv $tmp_changelog "$CHANGELOG"
}
function git_ammend_tag(){
git add "$(find_changelog_file)"
git commit --amend --no-edit --no-verify
git tag "$(find_last_git_tag)" -f
}
function npm_release(){
local version
if [ -z "${1:-}" ]; then
version="patch"
else
version="$1"
fi
npm version "$version" && generate_git_changelog && git_ammend_tag && npm run gitPush && npm publish
}
|
joeybaker/jscs-minimal
|
scripts.sh
|
Shell
|
artistic-2.0
| 2,570 |
#!/bin/sh
##
## Copyright (c) 2014 The WebM project authors. All Rights Reserved.
##
## Use of this source code is governed by a BSD-style license
## that can be found in the LICENSE file in the root of the source
## tree. An additional intellectual property rights grant can be found
## in the file PATENTS. All contributing project authors may
## be found in the AUTHORS file in the root of the source tree.
##
## This file contains shell code shared by test scripts for libvpx tools.
# Use $VPX_TEST_TOOLS_COMMON_SH as a pseudo include guard.
if [ -z "${VPX_TEST_TOOLS_COMMON_SH}" ]; then
VPX_TEST_TOOLS_COMMON_SH=included
set -e
devnull='> /dev/null 2>&1'
VPX_TEST_PREFIX=""
elog() {
echo "$@" 1>&2
}
vlog() {
if [ "${VPX_TEST_VERBOSE_OUTPUT}" = "yes" ]; then
echo "$@"
fi
}
# Sets $VPX_TOOL_TEST to the name specified by positional parameter one.
test_begin() {
VPX_TOOL_TEST="${1}"
}
# Clears the VPX_TOOL_TEST variable after confirming that $VPX_TOOL_TEST matches
# positional parameter one.
test_end() {
if [ "$1" != "${VPX_TOOL_TEST}" ]; then
echo "FAIL completed test mismatch!."
echo " completed test: ${1}"
echo " active test: ${VPX_TOOL_TEST}."
return 1
fi
VPX_TOOL_TEST='<unset>'
}
# Echoes the target configuration being tested.
test_configuration_target() {
vpx_config_mk="${LIBVPX_CONFIG_PATH}/config.mk"
# Find the TOOLCHAIN line, split it using ':=' as the field separator, and
# print the last field to get the value. Then pipe the value to tr to consume
# any leading/trailing spaces while allowing tr to echo the output to stdout.
awk -F ':=' '/TOOLCHAIN/ { print $NF }' "${vpx_config_mk}" | tr -d ' '
}
# Trap function used for failure reports and tool output directory removal.
# When the contents of $VPX_TOOL_TEST do not match the string '<unset>', reports
# failure of test stored in $VPX_TOOL_TEST.
cleanup() {
if [ -n "${VPX_TOOL_TEST}" ] && [ "${VPX_TOOL_TEST}" != '<unset>' ]; then
echo "FAIL: $VPX_TOOL_TEST"
fi
if [ -n "${VPX_TEST_OUTPUT_DIR}" ] && [ -d "${VPX_TEST_OUTPUT_DIR}" ]; then
rm -rf "${VPX_TEST_OUTPUT_DIR}"
fi
}
# Echoes the git hash portion of the VERSION_STRING variable defined in
# $LIBVPX_CONFIG_PATH/config.mk to stdout, or the version number string when
# no git hash is contained in VERSION_STRING.
config_hash() {
vpx_config_mk="${LIBVPX_CONFIG_PATH}/config.mk"
# Find VERSION_STRING line, split it with "-g" and print the last field to
# output the git hash to stdout.
vpx_version=$(awk -F -g '/VERSION_STRING/ {print $NF}' "${vpx_config_mk}")
# Handle two situations here:
# 1. The default case: $vpx_version is a git hash, so echo it unchanged.
# 2. When being run a non-dev tree, the -g portion is not present in the
# version string: It's only the version number.
# In this case $vpx_version is something like 'VERSION_STRING=v1.3.0', so
# we echo only what is after the '='.
echo "${vpx_version##*=}"
}
# Echoes the short form of the current git hash.
current_hash() {
if git --version > /dev/null 2>&1; then
(cd "$(dirname "${0}")"
git rev-parse --short HEAD)
else
# Return the config hash if git is unavailable: Fail silently, git hashes
# are used only for warnings.
config_hash
fi
}
# Echoes warnings to stdout when git hash in vpx_config.h does not match the
# current git hash.
check_git_hashes() {
hash_at_configure_time=$(config_hash)
hash_now=$(current_hash)
if [ "${hash_at_configure_time}" != "${hash_now}" ]; then
echo "Warning: git hash has changed since last configure."
fi
}
# This script requires that the LIBVPX_BIN_PATH, LIBVPX_CONFIG_PATH, and
# LIBVPX_TEST_DATA_PATH variables are in the environment: Confirm that
# the variables are set and that they all evaluate to directory paths.
verify_vpx_test_environment() {
if [ ! -d "${LIBVPX_BIN_PATH}" ]; then
echo "The LIBVPX_BIN_PATH environment variable must be set."
return 1
fi
if [ ! -d "${LIBVPX_CONFIG_PATH}" ]; then
echo "The LIBVPX_CONFIG_PATH environment variable must be set."
return 1
fi
if [ ! -d "${LIBVPX_TEST_DATA_PATH}" ]; then
echo "The LIBVPX_TEST_DATA_PATH environment variable must be set."
return 1
fi
}
# Greps vpx_config.h in LIBVPX_CONFIG_PATH for positional parameter one, which
# should be a LIBVPX preprocessor flag. Echoes yes to stdout when the feature
# is available.
vpx_config_option_enabled() {
vpx_config_option="${1}"
vpx_config_file="${LIBVPX_CONFIG_PATH}/vpx_config.h"
config_line=$(grep "${vpx_config_option}" "${vpx_config_file}")
if echo "${config_line}" | egrep -q '1$'; then
echo yes
fi
}
# Echoes yes when output of test_configuration_target() contains win32 or win64.
is_windows_target() {
if test_configuration_target \
| grep -q -e win32 -e win64 > /dev/null 2>&1; then
echo yes
fi
}
# Echoes path to $1 when it's executable and exists in ${LIBVPX_BIN_PATH}, or an
# empty string. Caller is responsible for testing the string once the function
# returns.
vpx_tool_path() {
local readonly tool_name="$1"
local tool_path="${LIBVPX_BIN_PATH}/${tool_name}${VPX_TEST_EXE_SUFFIX}"
if [ ! -x "${tool_path}" ]; then
# Try one directory up: when running via examples.sh the tool could be in
# the parent directory of $LIBVPX_BIN_PATH.
tool_path="${LIBVPX_BIN_PATH}/../${tool_name}${VPX_TEST_EXE_SUFFIX}"
fi
if [ ! -x "${tool_path}" ]; then
tool_path=""
fi
echo "${tool_path}"
}
# Echoes yes to stdout when the file named by positional parameter one exists
# in LIBVPX_BIN_PATH, and is executable.
vpx_tool_available() {
local tool_name="$1"
local tool="${LIBVPX_BIN_PATH}/${tool_name}${VPX_TEST_EXE_SUFFIX}"
[ -x "${tool}" ] && echo yes
}
# Echoes yes to stdout when vpx_config_option_enabled() reports yes for
# CONFIG_VP8_DECODER.
vp8_decode_available() {
[ "$(vpx_config_option_enabled CONFIG_VP8_DECODER)" = "yes" ] && echo yes
}
# Echoes yes to stdout when vpx_config_option_enabled() reports yes for
# CONFIG_VP8_ENCODER.
vp8_encode_available() {
[ "$(vpx_config_option_enabled CONFIG_VP8_ENCODER)" = "yes" ] && echo yes
}
# Echoes yes to stdout when vpx_config_option_enabled() reports yes for
# CONFIG_VP9_DECODER.
vp9_decode_available() {
[ "$(vpx_config_option_enabled CONFIG_VP9_DECODER)" = "yes" ] && echo yes
}
# Echoes yes to stdout when vpx_config_option_enabled() reports yes for
# CONFIG_VP9_ENCODER.
vp9_encode_available() {
[ "$(vpx_config_option_enabled CONFIG_VP9_ENCODER)" = "yes" ] && echo yes
}
# Echoes yes to stdout when vpx_config_option_enabled() reports yes for
# CONFIG_WEBM_IO.
webm_io_available() {
[ "$(vpx_config_option_enabled CONFIG_WEBM_IO)" = "yes" ] && echo yes
}
# Filters strings from $1 using the filter specified by $2. Filter behavior
# depends on the presence of $3. When $3 is present, strings that match the
# filter are excluded. When $3 is omitted, strings matching the filter are
# included.
# The filtered result is echoed to stdout.
filter_strings() {
strings=${1}
filter=${2}
exclude=${3}
if [ -n "${exclude}" ]; then
# When positional parameter three exists the caller wants to remove strings.
# Tell grep to invert matches using the -v argument.
exclude='-v'
else
unset exclude
fi
if [ -n "${filter}" ]; then
for s in ${strings}; do
if echo "${s}" | egrep -q ${exclude} "${filter}" > /dev/null 2>&1; then
filtered_strings="${filtered_strings} ${s}"
fi
done
else
filtered_strings="${strings}"
fi
echo "${filtered_strings}"
}
# Runs user test functions passed via positional parameters one and two.
# Functions in positional parameter one are treated as environment verification
# functions and are run unconditionally. Functions in positional parameter two
# are run according to the rules specified in vpx_test_usage().
run_tests() {
local env_tests="verify_vpx_test_environment $1"
local tests_to_filter="$2"
local test_name="${VPX_TEST_NAME}"
if [ -z "${test_name}" ]; then
test_name="$(basename "${0%.*}")"
fi
if [ "${VPX_TEST_RUN_DISABLED_TESTS}" != "yes" ]; then
# Filter out DISABLED tests.
tests_to_filter=$(filter_strings "${tests_to_filter}" ^DISABLED exclude)
fi
if [ -n "${VPX_TEST_FILTER}" ]; then
# Remove tests not matching the user's filter.
tests_to_filter=$(filter_strings "${tests_to_filter}" ${VPX_TEST_FILTER})
fi
# User requested test listing: Dump test names and return.
if [ "${VPX_TEST_LIST_TESTS}" = "yes" ]; then
for test_name in $tests_to_filter; do
echo ${test_name}
done
return
fi
# Combine environment and actual tests.
local tests_to_run="${env_tests} ${tests_to_filter}"
check_git_hashes
# Run tests.
for test in ${tests_to_run}; do
test_begin "${test}"
vlog " RUN ${test}"
"${test}"
vlog " PASS ${test}"
test_end "${test}"
done
local tested_config="$(test_configuration_target) @ $(current_hash)"
echo "${test_name}: Done, all tests pass for ${tested_config}."
}
vpx_test_usage() {
cat << EOF
Usage: ${0##*/} [arguments]
--bin-path <path to libvpx binaries directory>
--config-path <path to libvpx config directory>
--filter <filter>: User test filter. Only tests matching filter are run.
--run-disabled-tests: Run disabled tests.
--help: Display this message and exit.
--test-data-path <path to libvpx test data directory>
--show-program-output: Shows output from all programs being tested.
--prefix: Allows for a user specified prefix to be inserted before all test
programs. Grants the ability, for example, to run test programs
within valgrind.
--list-tests: List all test names and exit without actually running tests.
--verbose: Verbose output.
When the --bin-path option is not specified the script attempts to use
\$LIBVPX_BIN_PATH and then the current directory.
When the --config-path option is not specified the script attempts to use
\$LIBVPX_CONFIG_PATH and then the current directory.
When the -test-data-path option is not specified the script attempts to use
\$LIBVPX_TEST_DATA_PATH and then the current directory.
EOF
}
# Returns non-zero (failure) when required environment variables are empty
# strings.
vpx_test_check_environment() {
if [ -z "${LIBVPX_BIN_PATH}" ] || \
[ -z "${LIBVPX_CONFIG_PATH}" ] || \
[ -z "${LIBVPX_TEST_DATA_PATH}" ]; then
return 1
fi
}
# Parse the command line.
while [ -n "$1" ]; do
case "$1" in
--bin-path)
LIBVPX_BIN_PATH="$2"
shift
;;
--config-path)
LIBVPX_CONFIG_PATH="$2"
shift
;;
--filter)
VPX_TEST_FILTER="$2"
shift
;;
--run-disabled-tests)
VPX_TEST_RUN_DISABLED_TESTS=yes
;;
--help)
vpx_test_usage
exit
;;
--test-data-path)
LIBVPX_TEST_DATA_PATH="$2"
shift
;;
--prefix)
VPX_TEST_PREFIX="$2"
shift
;;
--verbose)
VPX_TEST_VERBOSE_OUTPUT=yes
;;
--show-program-output)
devnull=
;;
--list-tests)
VPX_TEST_LIST_TESTS=yes
;;
*)
vpx_test_usage
exit 1
;;
esac
shift
done
# Handle running the tests from a build directory without arguments when running
# the tests on *nix/macosx.
LIBVPX_BIN_PATH="${LIBVPX_BIN_PATH:-.}"
LIBVPX_CONFIG_PATH="${LIBVPX_CONFIG_PATH:-.}"
LIBVPX_TEST_DATA_PATH="${LIBVPX_TEST_DATA_PATH:-.}"
# Create a temporary directory for output files, and a trap to clean it up.
if [ -n "${TMPDIR}" ]; then
VPX_TEST_TEMP_ROOT="${TMPDIR}"
elif [ -n "${TEMPDIR}" ]; then
VPX_TEST_TEMP_ROOT="${TEMPDIR}"
else
VPX_TEST_TEMP_ROOT=/tmp
fi
VPX_TEST_RAND=$(awk 'BEGIN { srand(); printf "%d\n",(rand() * 32768)}')
VPX_TEST_OUTPUT_DIR="${VPX_TEST_TEMP_ROOT}/vpx_test_${VPX_TEST_RAND}"
if ! mkdir -p "${VPX_TEST_OUTPUT_DIR}" || \
[ ! -d "${VPX_TEST_OUTPUT_DIR}" ]; then
echo "${0##*/}: Cannot create output directory, giving up."
echo "${0##*/}: VPX_TEST_OUTPUT_DIR=${VPX_TEST_OUTPUT_DIR}"
exit 1
fi
if [ "$(is_windows_target)" = "yes" ]; then
VPX_TEST_EXE_SUFFIX=".exe"
fi
# Variables shared by tests.
VP8_IVF_FILE="${LIBVPX_TEST_DATA_PATH}/vp80-00-comprehensive-001.ivf"
VP9_IVF_FILE="${LIBVPX_TEST_DATA_PATH}/vp90-2-09-subpixel-00.ivf"
VP9_WEBM_FILE="${LIBVPX_TEST_DATA_PATH}/vp90-2-00-quantizer-00.webm"
YUV_RAW_INPUT="${LIBVPX_TEST_DATA_PATH}/hantro_collage_w352h288.yuv"
YUV_RAW_INPUT_WIDTH=352
YUV_RAW_INPUT_HEIGHT=288
# Setup a trap function to clean up after tests complete.
trap cleanup EXIT
vlog "$(basename "${0%.*}") test configuration:
LIBVPX_BIN_PATH=${LIBVPX_BIN_PATH}
LIBVPX_CONFIG_PATH=${LIBVPX_CONFIG_PATH}
LIBVPX_TEST_DATA_PATH=${LIBVPX_TEST_DATA_PATH}
VP8_IVF_FILE=${VP8_IVF_FILE}
VP9_IVF_FILE=${VP9_IVF_FILE}
VP9_WEBM_FILE=${VP9_WEBM_FILE}
VPX_TEST_EXE_SUFFIX=${VPX_TEST_EXE_SUFFIX}
VPX_TEST_FILTER=${VPX_TEST_FILTER}
VPX_TEST_LIST_TESTS=${VPX_TEST_LIST_TESTS}
VPX_TEST_OUTPUT_DIR=${VPX_TEST_OUTPUT_DIR}
VPX_TEST_PREFIX=${VPX_TEST_PREFIX}
VPX_TEST_RAND=${VPX_TEST_RAND}
VPX_TEST_RUN_DISABLED_TESTS=${VPX_TEST_RUN_DISABLED_TESTS}
VPX_TEST_SHOW_PROGRAM_OUTPUT=${VPX_TEST_SHOW_PROGRAM_OUTPUT}
VPX_TEST_TEMP_ROOT=${VPX_TEST_TEMP_ROOT}
VPX_TEST_VERBOSE_OUTPUT=${VPX_TEST_VERBOSE_OUTPUT}
YUV_RAW_INPUT=${YUV_RAW_INPUT}
YUV_RAW_INPUT_WIDTH=${YUV_RAW_INPUT_WIDTH}
YUV_RAW_INPUT_HEIGHT=${YUV_RAW_INPUT_HEIGHT}"
fi # End $VPX_TEST_TOOLS_COMMON_SH pseudo include guard.
|
mxOBS/deb-pkg_trusty_chromium-browser
|
third_party/libvpx/source/libvpx/test/tools_common.sh
|
Shell
|
bsd-3-clause
| 13,545 |
#### This script is meant to be sourced by ltconfig.
# ltcf-cxx.sh - Create a C++ compiler specific configuration
#
# Copyright (C) 1996-1999, 2000, 2001, 2003 Free Software Foundation, Inc.
# Originally by Gordon Matzigkeit <[email protected]>, 1996
#
# Original C++ support by:Gary V. Vaughan <[email protected]>
# Alexandre Oliva <[email protected]>
# Ossama Othman <[email protected]>
# Thomas Thanner <[email protected]>
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
# Source file extension for C++ test sources.
ac_ext=cc
# Object file extension for compiled C++ test sources.
objext=o
# Code to be used in simple compile tests
lt_simple_compile_test_code="int some_variable = 0;"
# Code to be used in simple link tests
lt_simple_link_test_code='int main(int, char *[]) { return (0); }'
# C++ compiler
CXX=${CXX-c++}
# ltmain only uses $CC for tagged configurations so make sure $CC is set.
CC=${CC-"$CXX"}
CFLAGS=${CFLAGS-"$CXXFLAGS"}
# Allow CC to be a program name with arguments.
set dummy $CC
compiler=$2
cc_basename=`$echo X"$compiler" | $Xsed -e 's%^.*/%%'`
# Check if we are using GNU gcc (taken/adapted from configure script)
# We need to check here since "--with-gcc" is set at configure time,
# not ltconfig time!
cat > conftest.$ac_ext <<EOF
#ifdef __GNUC__
yes;
#endif
EOF
if { ac_try='${CC-c++} -E conftest.$ac_ext'; { (eval echo \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }; } | egrep yes >/dev/null 2>&1; then
with_gcc=yes
# Set up default GNU C++ configuration
# Check if GNU C++ uses GNU ld as the underlying linker, since the
# archiving commands below assume that GNU ld is being used. The
# assumption here is that the linker is going to be the same as that
# used by the C compiler. For the purposes of GCC, this is ok, but
# if someone uses g++ along with a non-GNU C compiler that doesn't
# use GNU ld, we may lose. This is ok for the toolchain tree, since
# the only users of ltcf-cxx.sh are libstdc++-v3 and libjava,
# anyway, and those use both gcc and g++, so the settings are bound
# to be the same.
if test "$with_gnu_ld" = yes; then
archive_cmds='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
archive_expsym_cmds='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
hardcode_libdir_flag_spec='${wl}--rpath ${wl}$libdir'
export_dynamic_flag_spec='${wl}--export-dynamic'
# If archive_cmds runs LD, not CC, wlarc should be empty
# XXX I think wlarc can be eliminated in ltcf-cxx, but I need to
# investigate it a little bit more. (MM)
wlarc='${wl}'
# ancient GNU ld didn't support --whole-archive et. al.
if eval "`$CC -print-prog-name=ld` --help 2>&1" | \
egrep 'no-whole-archive' > /dev/null; then
whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
else
whole_archive_flag_spec=
fi
else
wlarc=
# A generic and very simple default shared library creation
# command for GNU C++ for the case where it uses the native
# linker, instead of GNU ld. If possible, this setting should
# overridden to take advantage of the native linker features on
# the platform it is being used on.
archive_cmds='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
fi
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | egrep "\-L"'
else
with_gcc=no
wlarc=
fi
# PORTME: fill in a description of your system's C++ link characteristics
case $host_os in
aix3*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
aix4* | aix5*)
archive_cmds=''
hardcode_direct=yes
hardcode_libdir_separator=':'
link_all_deplibs=yes
# When large executables or shared objects are built, AIX ld can
# have problems creating the table of contents. If linking a library
# or program results in "error TOC overflow" add -mminimal-toc to
# CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not
# enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
if test "$with_gcc" = yes; then
case $host_os in aix4.[012]|aix4.[012].*)
# We only want to do this on AIX 4.2 and lower, the check
# below for broken collect2 doesn't work under 4.3+
collect2name=`${CC} -print-prog-name=collect2`
if test -f "$collect2name" && \
strings "$collect2name" | grep resolve_lib_name >/dev/null
then
# We have reworked collect2
hardcode_direct=yes
else
# We have old collect2
hardcode_direct=unsupported
# It fails to find uninstalled libraries when the uninstalled
# path is not listed in the libpath. Setting hardcode_minus_L
# to unsupported forces relinking
hardcode_minus_L=yes
hardcode_libdir_flag_spec='-L$libdir'
hardcode_libdir_separator=
fi
esac
shared_flag='-shared'
else
# not using gcc
if test "$host_cpu" = ia64; then
shared_flag='${wl}-G'
else
shared_flag='${wl}-bM:SRE'
fi
fi
if test "$host_cpu" = ia64; then
# On IA64, the linker does run time linking by default, so we don't
# have to do anything special.
aix_use_runtimelinking=no
if test $with_gnu_ld = no; then
exp_sym_flag='-Bexport'
no_entry_flag=""
fi
else
# Test if we are trying to use run time linking, or normal AIX style linking.
# If -brtl is somewhere in LDFLAGS, we need to do run time linking.
aix_use_runtimelinking=no
for ld_flag in $LDFLAGS; do
if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl" ); then
aix_use_runtimelinking=yes
break
fi
done
exp_sym_flag='-bexport'
no_entry_flag='-bnoentry'
fi
# It seems that -bexpall does not export symbols beginning with
# underscore (_), so it is better to generate a list of symbols to export.
always_export_symbols=yes
if test "$aix_use_runtimelinking" = yes; then
hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:/usr/lib:/lib'
allow_undefined_flag=' -Wl,-G'
archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}-brtl \${wl}$exp_sym_flag:\$export_symbols"
else
if test "$host_cpu" = ia64; then
if test $with_gnu_ld = no; then
hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
allow_undefined_flag="-z nodefs"
archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$no_entry_flag \${wl}$exp_sym_flag:\$export_symbols"
fi
else
hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:/usr/lib:/lib'
# Warning - without using the other run time loading flags, -berok will
# link without error, but may produce a broken library.
no_undefined_flag=' ${wl}-bnoerok'
allow_undefined_flag=' ${wl}-berok'
# -bexpall does not export symbols beginning with underscore (_)
always_export_symbols=yes
# Exported symbols can be pulled into shared objects from archives
whole_archive_flag_spec=' '
build_libtool_need_lc=yes
# This is similar to how AIX traditionally builds it's shared libraries.
archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs $compiler_flags ${wl}-bE:$export_symbols ${wl}-bnoentry${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
fi
fi
;;
chorus*)
case $cc_basename in
*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
esac
;;
dgux*)
case $cc_basename in
ec++)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
ghcx)
# Green Hills C++ Compiler
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
esac
;;
freebsd[12]*)
# C++ shared libraries reported to be fairly broken before switch to ELF
ld_shlibs=no
;;
freebsd* | kfreebsd*-gnu)
# FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
# conventions
ld_shlibs=yes
;;
gnu*)
;;
hpux*)
if test $with_gnu_ld = no; then
case "$host_cpu" in
ia64*)
hardcode_libdir_flag_spec='-L$libdir'
hardcode_shlibpath_var=no ;;
*)
hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' ;;
esac
hardcode_direct=yes
hardcode_libdir_separator=:
export_dynamic_flag_spec='${wl}-E'
fi
hardcode_minus_L=yes # Not in the search PATH, but as the default
# location of the library.
case $cc_basename in
CC)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
aCC)
case $host_os in
hpux9*) archive_cmds='$rm $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ;;
*) archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;;
esac
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
#
# There doesn't appear to be a way to prevent this compiler from
# explicitly linking system object files so we need to strip them
# from the output so that they don't get included in the library
# dependencies.
output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | egrep "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list'
;;
*)
if test $with_gcc = yes; then
if test $with_gnu_ld = no; then
case "$host_os" in
hpux9*) archive_cmds='$rm $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ;;
*)
case "$host_cpu" in
ia64*)
archive_cmds='$LD -b +h $soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' ;;
*)
archive_cmds='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;;
esac
;;
esac
fi
else
# FIXME: insert proper C++ library support
ld_shlibs=no
fi
;;
esac
;;
irix5* | irix6*)
case $cc_basename in
CC)
# SGI C++
archive_cmds='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
# Archives containing C++ object files must be created using
# "CC -ar", where "CC" is the IRIX C++ compiler. This is
# necessary to make sure instantiated templates are included
# in the archive.
old_archive_cmds='$CC -ar -WR,-u -o $oldlib $oldobjs'
;;
*)
if test "$with_gcc" = yes; then
if test "$with_gnu_ld" = no; then
archive_cmds='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
else
archive_cmds='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && echo -set_version $verstring` -o $lib'
fi
fi
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
hardcode_libdir_separator=:
link_all_deplibs=yes
;;
esac
;;
linux*)
case $cc_basename in
KCC)
# Kuck and Associates, Inc. (KAI) C++ Compiler
# KCC will only create a shared library if the output file
# ends with ".so" (or ".sl" for HP-UX), so rename the library
# to its proper name (with version) after linking.
archive_cmds='templib=`echo $lib | sed -e "s/\.so\..*/\.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
archive_expsym_cmds='templib=`echo $lib | sed -e "s/\.so\..*/\.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib'
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
#
# There doesn't appear to be a way to prevent this compiler from
# explicitly linking system object files so we need to strip them
# from the output so that they don't get included in the library
# dependencies.
output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest.so 2>&1 | egrep "ld"`; rm -f libconftest.so; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list'
hardcode_libdir_flag_spec='${wl}--rpath,$libdir'
export_dynamic_flag_spec='${wl}--export-dynamic'
# Archives containing C++ object files must be created using
# "CC -Bstatic", where "CC" is the KAI C++ compiler.
old_archive_cmds='$CC -Bstatic -o $oldlib $oldobjs'
;;
cxx)
# Compaq C++
archive_cmds='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
archive_expsym_cmds='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols'
runpath_var=LD_RUN_PATH
hardcode_libdir_flag_spec='-rpath $libdir'
hardcode_libdir_separator=:
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
#
# There doesn't appear to be a way to prevent this compiler from
# explicitly linking system object files so we need to strip them
# from the output so that they don't get included in the library
# dependencies.
output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld"`; templist=`echo $templist | sed "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list'
;;
esac
;;
lynxos*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
m88k*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
mvs*)
case $cc_basename in
cxx)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
esac
;;
netbsd* | knetbsd*-gnu)
# NetBSD uses g++ - do we need to do anything?
;;
osf3*)
case $cc_basename in
KCC)
# Kuck and Associates, Inc. (KAI) C++ Compiler
# KCC will only create a shared library if the output file
# ends with ".so" (or ".sl" for HP-UX), so rename the library
# to its proper name (with version) after linking.
archive_cmds='templib=`echo $lib | sed -e "s/\.so\..*/\.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
hardcode_libdir_separator=:
# Archives containing C++ object files must be created using
# "CC -Bstatic", where "CC" is the KAI C++ compiler.
old_archive_cmds='$CC -Bstatic -o $oldlib $oldobjs'
;;
RCC)
# Rational C++ 2.4.1
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
cxx)
allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
archive_cmds='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && echo ${wl}-set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
hardcode_libdir_separator=:
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
#
# There doesn't appear to be a way to prevent this compiler from
# explicitly linking system object files so we need to strip them
# from the output so that they don't get included in the library
# dependencies.
output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | sed "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list'
;;
*)
if test "$with_gcc" = yes && test "$with_gnu_ld" = no; then
allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
archive_cmds='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
hardcode_libdir_separator=:
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | egrep "\-L"'
else
# FIXME: insert proper C++ library support
ld_shlibs=no
fi
;;
esac
;;
osf4* | osf5*)
case $cc_basename in
KCC)
# Kuck and Associates, Inc. (KAI) C++ Compiler
# KCC will only create a shared library if the output file
# ends with ".so" (or ".sl" for HP-UX), so rename the library
# to its proper name (with version) after linking.
archive_cmds='templib=`echo $lib | sed -e "s/\.so\..*/\.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
hardcode_libdir_separator=:
# Archives containing C++ object files must be created using
# the KAI C++ compiler.
old_archive_cmds='$CC -o $oldlib $oldobjs'
;;
RCC)
# Rational C++ 2.4.1
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
cxx)
allow_undefined_flag=' -expect_unresolved \*'
archive_cmds='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && echo -set_version $verstring` -update_registry ${objdir}/so_locations -o $lib'
archive_expsym_cmds='for i in `cat $export_symbols`; do printf "-exported_symbol " >> $lib.exp; echo "\$i" >> $lib.exp; done~
echo "-hidden">> $lib.exp~
$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname -Wl,-input -Wl,$lib.exp `test -n "$verstring" && echo -set_version $verstring` -update_registry $objdir/so_locations -o $lib~
$rm $lib.exp'
hardcode_libdir_flag_spec='-rpath $libdir'
hardcode_libdir_separator=:
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
#
# There doesn't appear to be a way to prevent this compiler from
# explicitly linking system object files so we need to strip them
# from the output so that they don't get included in the library
# dependencies.
output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | grep "ld" | grep -v "ld:"`; templist=`echo $templist | sed "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list'
;;
*)
if test "$with_gcc" = yes && test "$with_gnu_ld" = no; then
allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
archive_cmds='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && echo ${wl}-set_version ${wl}$verstring` ${wl}-update_registry ${wl}${objdir}/so_locations -o $lib'
hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
hardcode_libdir_separator=:
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | egrep "\-L"'
else
# FIXME: insert proper C++ library support
ld_shlibs=no
fi
;;
esac
;;
psos*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
sco*)
case $cc_basename in
CC)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
esac
;;
sunos4*)
case $cc_basename in
CC)
# Sun C++ 4.x
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
lcc)
# Lucid
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
esac
;;
solaris*)
case $cc_basename in
CC)
# Sun C++ 4.2, 5.x and Centerline C++
no_undefined_flag=' -zdefs'
archive_cmds='$CC -G${allow_undefined_flag} -nolib -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
$CC -G${allow_undefined_flag} -nolib ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp'
hardcode_libdir_flag_spec='-R$libdir'
hardcode_shlibpath_var=no
case $host_os in
solaris2.[0-5] | solaris2.[0-5].*) ;;
*)
# The C++ compiler is used as linker so we must use $wl
# flag to pass the commands to the underlying system
# linker.
# Supported since Solaris 2.6 (maybe 2.5.1?)
whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
;;
esac
link_all_deplibs=yes
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
#
# There doesn't appear to be a way to prevent this compiler from
# explicitly linking system object files so we need to strip them
# from the output so that they don't get included in the library
# dependencies.
output_verbose_link_cmd='templist=`$CC -G $CFLAGS -v conftest.$objext 2>&1 | egrep "\-R|\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; echo $list'
# Archives containing C++ object files must be created using
# "CC -xar", where "CC" is the Sun C++ compiler. This is
# necessary to make sure instantiated templates are included
# in the archive.
old_archive_cmds='$CC -xar -o $oldlib $oldobjs'
;;
gcx)
# Green Hills C++ Compiler
archive_cmds='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
# The C++ compiler must be used to create the archive.
old_archive_cmds='$CC $LDFLAGS -archive -o $oldlib $oldobjs'
;;
*)
# GNU C++ compiler with Solaris linker
if test "$with_gcc" = yes && test "$with_gnu_ld" = no; then
no_undefined_flag=' ${wl}-z ${wl}defs'
if $CC --version | egrep -v '^2\.7' > /dev/null; then
archive_cmds='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
$CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp'
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
output_verbose_link_cmd="$CC -shared $CFLAGS -v conftest.$objext 2>&1 | egrep \"\-L\""
else
# g++ 2.7 appears to require `-G' NOT `-shared' on this
# platform.
archive_cmds='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
archive_expsym_cmds='$echo "{ global:" > $lib.exp~cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $lib.exp~$echo "local: *; };" >> $lib.exp~
$CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$rm $lib.exp'
# Commands to make compiler produce verbose output that lists
# what "hidden" libraries, object files and flags are used when
# linking a shared library.
output_verbose_link_cmd="$CC -G $CFLAGS -v conftest.$objext 2>&1 | egrep \"\-L\""
fi
hardcode_libdir_flag_spec='${wl}-R $wl$libdir'
fi
;;
esac
;;
tandem*)
case $cc_basename in
NCC)
# NonStop-UX NCC 3.20
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
esac
;;
tpf*)
ld_shlibs=yes
;;
unixware*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
vxworks*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
*)
# FIXME: insert proper C++ library support
ld_shlibs=no
;;
esac
## Compiler Characteristics: PIC flags, static flags, etc
# We don't use cached values here since only the C compiler
# characteristics should be cached.
ac_cv_prog_cc_pic=
ac_cv_prog_cc_shlib=
ac_cv_prog_cc_wl=
ac_cv_prog_cc_static=
ac_cv_prog_cc_no_builtin=
ac_cv_prog_cc_can_build_shared=$can_build_shared
ac_cv_prog_cc_pic_works=
ac_cv_prog_cc_static_works=
if test "$with_gcc" = yes; then
ac_cv_prog_cc_wl='-Wl,'
ac_cv_prog_cc_static='-static'
case $host_os in
aix*)
# All AIX code is PIC.
if test "$host_cpu" = ia64; then
# AIX 5 now supports IA64 processor
lt_cv_prog_cc_static='-Bstatic'
else
lt_cv_prog_cc_static='-bnso -bI:/lib/syscalls.exp'
fi
;;
amigaos*)
# FIXME: we need at least 68020 code to build shared libraries, but
# adding the `-m68020' flag to GCC prevents building anything better,
# like `-m68040'.
ac_cv_prog_cc_pic='-m68020 -resident32 -malways-restore-a4'
;;
beos* | irix5* | irix6* | osf3* | osf4* | osf5*)
# PIC is the default for these OSes.
;;
cygwin* | mingw* | os2*)
# This hack is so that the source file can tell whether it is being
# built for inclusion in a dll (and should export symbols for example).
ac_cv_prog_cc_pic='-DDLL_EXPORT'
;;
darwin* | rhapsody*)
# PIC is the default on this platform
# Common symbols not allowed in MH_DYLIB files
lt_cv_prog_cc_pic='-fno-common'
;;
*djgpp*)
# DJGPP does not support shared libraries at all
ac_cv_prog_cc_pic=
;;
sysv4*MP*)
if test -d /usr/nec; then
ac_cv_prog_cc_pic=-Kconform_pic
fi
;;
*)
ac_cv_prog_cc_pic='-fPIC'
;;
esac
else
case $host_os in
aix4* | aix5*)
# All AIX code is PIC.
if test "$host_cpu" = ia64; then
# AIX 5 now supports IA64 processor
lt_cv_prog_cc_static='-Bstatic'
else
lt_cv_prog_cc_static='-bnso -bI:/lib/syscalls.exp'
fi
;;
chorus*)
case $cc_basename in
cxch68)
# Green Hills C++ Compiler
# ac_cv_prog_cc_static="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a"
;;
esac
;;
dgux*)
case $cc_basename in
ec++)
ac_cv_prog_cc_pic='-KPIC'
;;
ghcx)
# Green Hills C++ Compiler
ac_cv_prog_cc_pic='-pic'
;;
*)
;;
esac
;;
freebsd* | kfreebsd*-gnu)
# FreeBSD uses GNU C++
;;
gnu*)
;;
hpux9* | hpux10* | hpux11*)
case $cc_basename in
CC)
ac_cv_prog_cc_wl='-Wl,'
ac_cv_prog_cc_static="${ac_cv_prog_cc_wl}-a ${ac_cv_prog_cc_wl}archive"
ac_cv_prog_cc_pic='+Z'
;;
aCC)
ac_cv_prog_cc_wl='-Wl,'
ac_cv_prog_cc_static="${ac_cv_prog_cc_wl}-a ${ac_cv_prog_cc_wl}archive"
ac_cv_prog_cc_pic='+Z'
;;
*)
;;
esac
;;
irix5* | irix6*)
case $cc_basename in
CC)
ac_cv_prog_cc_wl='-Wl,'
ac_cv_prog_cc_static='-non_shared'
ac_cv_prog_cc_pic='-KPIC'
;;
*)
;;
esac
;;
linux*)
case $cc_basename in
KCC)
# KAI C++ Compiler
ac_cv_prog_cc_wl='--backend -Wl,'
ac_cv_prog_cc_pic='-fPIC'
;;
cxx)
# Compaq C++
# Make sure the PIC flag is empty. It appears that all Alpha
# Linux and Compaq Tru64 Unix objects are PIC.
ac_cv_prog_cc_pic=
ac_cv_prog_cc_static='-non_shared'
;;
*)
;;
esac
;;
lynxos*)
;;
m88k*)
;;
mvs*)
case $cc_basename in
cxx)
ac_cv_prog_cc_pic='-W c,exportall'
;;
*)
;;
esac
;;
netbsd*)
;;
osf3* | osf4* | osf5*)
case $cc_basename in
KCC)
ac_cv_prog_cc_wl='--backend -Wl,'
;;
RCC)
# Rational C++ 2.4.1
ac_cv_prog_cc_pic='-pic'
;;
cxx)
# Digital/Compaq C++
ac_cv_prog_cc_wl='-Wl,'
# Make sure the PIC flag is empty. It appears that all Alpha
# Linux and Compaq Tru64 Unix objects are PIC.
ac_cv_prog_cc_pic=
ac_cv_prog_cc_static='-non_shared'
;;
*)
;;
esac
;;
psos*)
;;
sco*)
case $cc_basename in
CC)
ac_cv_prog_cc_pic='-fPIC'
;;
*)
;;
esac
;;
solaris*)
case $cc_basename in
CC)
# Sun C++ 4.2, 5.x and Centerline C++
ac_cv_prog_cc_pic='-KPIC'
ac_cv_prog_cc_static='-Bstatic'
ac_cv_prog_cc_wl='-Qoption ld '
;;
gcx)
# Green Hills C++ Compiler
ac_cv_prog_cc_pic='-PIC'
;;
*)
;;
esac
;;
sunos4*)
case $cc_basename in
CC)
# Sun C++ 4.x
ac_cv_prog_cc_pic='-pic'
ac_cv_prog_cc_static='-Bstatic'
;;
lcc)
# Lucid
ac_cv_prog_cc_pic='-pic'
;;
*)
;;
esac
;;
tandem*)
case $cc_basename in
NCC)
# NonStop-UX NCC 3.20
ac_cv_prog_cc_pic='-KPIC'
;;
*)
;;
esac
;;
unixware*)
;;
vxworks*)
;;
*)
ac_cv_prog_cc_can_build_shared=no
;;
esac
fi
case "$host_os" in
# Platforms which do not suport PIC and -DPIC is meaningless
# on them:
*djgpp*)
ac_cv_prog_cc_pic=
;;
*)
ac_cv_prog_cc_pic="$ac_cv_prog_cc_pic -DPIC"
;;
esac
# Figure out "hidden" C++ library dependencies from verbose
# compiler output whening linking a shared library.
cat > conftest.$ac_ext <<EOF
class Foo
{
public:
Foo (void) { a = 0; }
private:
int a;
};
EOF
if (eval $ac_compile) 2>&5; then
# Parse the compiler output and extract the necessary
# objects, libraries and library flags.
# Sentinel used to keep track of whether or not we are before
# the conftest object file.
pre_test_object_deps_done=no
for p in `eval $output_verbose_link_cmd`; do
case $p in
-L* | -R* | -l*)
# Some compilers place space between "-{L,R}" and the path.
# Remove the space.
if test $p = "-L" \
|| test $p = "-R"; then
prev=$p
continue
else
prev=
fi
if test "$pre_test_object_deps_done" = no; then
case $p in
-L* | -R*)
# Internal compiler library paths should come after those
# provided the user. The postdeps already come after the
# user supplied libs so there is no need to process them.
if test -z "$compiler_lib_search_path"; then
compiler_lib_search_path="${prev}${p}"
else
compiler_lib_search_path="${compiler_lib_search_path} ${prev}${p}"
fi
;;
# The "-l" case would never come before the object being
# linked, so don't bother handling this case.
esac
else
if test -z "$postdeps"; then
postdeps="${prev}${p}"
else
postdeps="${postdeps} ${prev}${p}"
fi
fi
;;
*.$objext)
# This assumes that the test object file only shows up
# once in the compiler output.
if test "$p" = "conftest.$objext"; then
pre_test_object_deps_done=yes
continue
fi
if test "$pre_test_object_deps_done" = no; then
if test -z "$predep_objects"; then
predep_objects="$p"
else
predep_objects="$predep_objects $p"
fi
else
if test -z "$postdep_objects"; then
postdep_objects="$p"
else
postdep_objects="$postdep_objects $p"
fi
fi
;;
*) ;; # Ignore the rest.
esac
done
# Clean up.
rm -f a.out
else
echo "ltcf-cxx.sh: error: problem compiling test program"
fi
$rm -f confest.$objext
case " $postdeps " in
*" -lc "*) need_lc=no ;;
*) need_lc=yes ;;
esac
|
ipwndev/DSLinux-Mirror
|
user/gdb/ltcf-cxx.sh
|
Shell
|
gpl-2.0
| 37,122 |
#!/bin/sh
: ==== start ====
TESTNAME=nat-pluto-01
source /testing/pluto/bin/eastlocal.sh
arp -s 192.0.2.1 10:00:00:dc:bc:01
ipsec setup start
/testing/pluto/bin/wait-until-pluto-started
ipsec auto --add northnet--eastnet-nat
: ==== cut ====
ipsec klipsdebug --set rcv
ipsec klipsdebug --set verbose
: ==== tuc ====
echo done
|
y-trudeau/openswan-patch-meraki
|
testing/pluto/nat-pluto-01/eastinit.sh
|
Shell
|
gpl-2.0
| 330 |
#!/bin/sh
# assumes that
# ROOTDIR= set to root of source code.
# OBJDIRTOP= set to location of object files
#
exe=${OBJDIRTOP}/programs/readwriteconf/readwriteconf
conf=testing/pluto/multinet-01/west.conf
args="--rootdir=${ROOTDIR}/testing/baseconfigs/all --config ${ROOTDIR}/$conf --verbose --verbose --verbose"
echo "file $exe" >.gdbinit
echo "set args $args " >>.gdbinit
eval $exe $args
|
y-trudeau/openswan-patch-meraki
|
testing/scripts/conf-multinet-01/runit.sh
|
Shell
|
gpl-2.0
| 419 |
#!/bin/bash
########################################################################
#
# Linux on Hyper-V and Azure Test Code, ver. 1.0.0
# Copyright (c) Microsoft Corporation
#
# All rights reserved.
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
# ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR
# PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
#
########################################################################
#######################################################################
#
# netperf_server.sh
# This script starts netperf in server mode on dependency VM.
#######################################################################
ICA_TESTRUNNING="TestRunning"
ICA_NETPERFRUNNING="netperfRunning"
ICA_TESTABORTED="TestAborted"
ICA_TESTFAILED="TestFailed"
LogMsg() {
echo `date "+%a %b %d %T %Y"` ": ${1}"
}
UpdateTestState() {
echo $1 > ~/state.txt
}
#######################################################################
#
# Main script body
#
#######################################################################
cd ~
UpdateTestState $ICA_TESTRUNNING
LogMsg "Starting running the script"
#Delete any old summary.log file
LogMsg "Cleaning up old summary.log"
if [ -e ~/summary.log ]; then
rm -f ~/summary.log
fi
touch ~/summary.log
#Convert eol
dos2unix utils.sh
#Source utils.sh
. utils.sh || {
echo "Error: unable to source utils.sh!"
echo "TestAborted" > state.txt
exit 20
}
#Source constants file and initialize most common variables
UtilsInit
#In case of error
case $? in
0)
#do nothing, init succeeded
;;
1)
LogMsg "Unable to cd to $LIS_HOME. Aborting..."
UpdateSummary "Unable to cd to $LIS_HOME. Aborting..."
UpdateTestState $ICA_TESTABORTED
exit 20
;;
2)
LogMsg "Unable to use test state file. Aborting..."
UpdateSummary "Unable to use test state file. Aborting..."
#need to wait for test timeout to kick in
#hailmary try to update teststate
sleep 60
echo "TestAborted" > state.txt
exit 20
;;
3)
LogMsg "Error: unable to source constants file. Aborting..."
UpdateSummary "Error: unable to source constants file"
UpdateTestState $ICA_TESTABORTED
exit 20
;;
*)
#should not happen
LogMsg "UtilsInit returned an unknown error. Aborting..."
UpdateSummary "UtilsInit returned an unknown error. Aborting..."
UpdateTestState $ICA_TESTABORTED
exit 20
;;
esac
#Make sure the required test parameters are defined
if [ "${STATIC_IP2:-UNDEFINED}" = "UNDEFINED" ]; then
msg="The test parameter STATIC_IP2 is not defined in constants file!"
LogMsg "$msg"
UpdateSummary "$msg"
UpdateTestState $ICA_TESTABORTED
exit 20
else
CheckIP "$STATIC_IP2"
if [ 0 -ne $? ]; then
msg="Test parameter STATIC_IP2 = $STATIC_IP2 is not a valid IP Address."
LogMsg "$msg"
UpdateSummary "$msg"
UpdateTestState $ICA_TESTABORTED
exit 20
fi
fi
#Download NETPERF
wget https://github.com/HewlettPackard/netperf/archive/netperf-2.7.0.tar.gz > /dev/null 2>&1
if [ $? -ne 0 ]; then
msg="Error: Unable to download netperf."
LogMsg "$msg"
UpdateSummary "$msg"
UpdateTestState $ICA_TESTFAILED
exist 1
fi
tar -xvf netperf-2.7.0.tar.gz > /dev/null 2>&1
#Get the root directory of the tarball
rootDir="netperf-netperf-2.7.0"
cd ${rootDir}
#Distro specific setup
GetDistro
case "$DISTRO" in
debian*|ubuntu*)
service ufw status
if [ $? -ne 3 ]; then
LogMsg "Disabling firewall on Ubuntu.."
iptables -t filter -F
if [ $? -ne 0 ]; then
msg="Error: Failed to stop ufw."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
iptables -t nat -F
if [ $? -ne 0 ]; then
msg="Error: Failed to stop ufw."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
fi;;
redhat_5|redhat_6)
LogMsg "Check iptables status on RHEL."
service iptables status
if [ $? -ne 3 ]; then
LogMsg "Disabling firewall on Redhat.."
iptables -t filter -F
if [ $? -ne 0 ]; then
msg="Error: Failed to flush iptables rules."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
iptables -t nat -F
if [ $? -ne 0 ]; then
msg="Error: Failed to flush iptables nat rules."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
ip6tables -t filter -F
if [ $? -ne 0 ]; then
msg="Error: Failed to flush ip6tables rules."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
ip6tables -t nat -F
if [ $? -ne 0 ]; then
msg="Error: Failed to flush ip6tables nat rules."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
fi;;
redhat_7)
LogMsg "Check iptables status on RHEL."
systemctl status firewalld
if [ $? -ne 3 ]; then
LogMsg "Disabling firewall on Redhat 7.."
systemctl disable firewalld
if [ $? -ne 0 ]; then
msg="Error: Failed to stop firewalld."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
systemctl stop firewalld
if [ $? -ne 0 ]; then
msg="Error: Failed to turn off firewalld."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
fi
LogMsg "Check iptables status on RHEL 7."
service iptables status
if [ $? -ne 3 ]; then
iptables -t filter -F
if [ $? -ne 0 ]; then
msg="Error: Failed to flush iptables rules."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
iptables -t nat -F
if [ $? -ne 0 ]; then
msg="Error: Failed to flush iptables nat rules."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
ip6tables -t filter -F
if [ $? -ne 0 ]; then
msg="Error: Failed to flush ip6tables rules."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
ip6tables -t nat -F
if [ $? -ne 0 ]; then
msg="Error: Failed to flush ip6tables nat rules."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
fi;;
suse_12)
LogMsg "Check iptables status on SLES 12."
service SuSEfirewall2 status
if [ $? -ne 3 ]; then
iptables -F;
if [ $? -ne 0 ]; then
msg="Error: Failed to flush iptables rules."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
service SuSEfirewall2 stop
if [ $? -ne 0 ]; then
msg="Error: Failed to stop iptables."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
chkconfig SuSEfirewall2 off
if [ $? -ne 0 ]; then
msg="Error: Failed to turn off iptables."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
fi;;
esac
./configure > /dev/null 2>&1
if [ $? -ne 0 ]; then
msg="Error: Unable to configure make file for netperf."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
make > /dev/null 2>&1
if [ $? -ne 0 ]; then
msg="Error: Unable to build netperf."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
make install > /dev/null 2>&1
if [ $? -ne 0 ]; then
msg="Error: Unable to install netperf."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
#go back to test root folder
cd ~
# Start netperf server instances
LogMsg "Starting netperf in server mode."
UpdateTestState $ICA_NETPERFRUNNING
LogMsg "Netperf server instances are now ready to run."
netserver -L ${STATIC_IP2} >> ~/summary.log
if [ $? -ne 0 ]; then
msg="Error: Unable to start netperf in server mode."
LogMsg "${msg}"
UpdateSummary "${msg}"
UpdateTestState $ICA_TESTFAILED
exit 1
fi
|
ilenghel/lis-test
|
WS2012R2/lisa/remote-scripts/ica/netperf_server.sh
|
Shell
|
apache-2.0
| 9,509 |
#!/usr/bin/env bash
# Copyright ©2015 The Gonum Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
WARNINGF32='//\
// Float32 implementations are autogenerated and not directly tested.\
'
WARNINGC64='//\
// Complex64 implementations are autogenerated and not directly tested.\
'
# Level1 routines.
echo Generating level1float32.go
echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1float32.go
cat level1float64.go \
| gofmt -r 'blas.Float64Level1 -> blas.Float32Level1' \
\
| gofmt -r 'float64 -> float32' \
| gofmt -r 'blas.DrotmParams -> blas.SrotmParams' \
\
| gofmt -r 'f64.AxpyInc -> f32.AxpyInc' \
| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \
| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \
| gofmt -r 'f64.L2NormInc -> f32.L2NormInc' \
| gofmt -r 'f64.L2NormUnitary -> f32.L2NormUnitary' \
| gofmt -r 'f64.ScalInc -> f32.ScalInc' \
| gofmt -r 'f64.ScalUnitary -> f32.ScalUnitary' \
\
| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \
-e 's_^// D_// S_' \
-e "s_^\(func (Implementation) \)Id\(.*\)\$_$WARNINGF32\1Is\2_" \
-e 's_^// Id_// Is_' \
-e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \
-e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \
>> level1float32.go
echo Generating level1cmplx64.go
echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1cmplx64.go
cat level1cmplx128.go \
| gofmt -r 'blas.Complex128Level1 -> blas.Complex64Level1' \
\
| gofmt -r 'float64 -> float32' \
| gofmt -r 'complex128 -> complex64' \
\
| gofmt -r 'c128.AxpyInc -> c64.AxpyInc' \
| gofmt -r 'c128.AxpyUnitary -> c64.AxpyUnitary' \
| gofmt -r 'c128.DotcInc -> c64.DotcInc' \
| gofmt -r 'c128.DotcUnitary -> c64.DotcUnitary' \
| gofmt -r 'c128.DotuInc -> c64.DotuInc' \
| gofmt -r 'c128.DotuUnitary -> c64.DotuUnitary' \
| gofmt -r 'c128.ScalInc -> c64.ScalInc' \
| gofmt -r 'c128.ScalUnitary -> c64.ScalUnitary' \
| gofmt -r 'dcabs1 -> scabs1' \
\
| sed -e "s_^\(func (Implementation) \)Zdot\(.*\)\$_$WARNINGC64\1Cdot\2_" \
-e 's_^// Zdot_// Cdot_' \
-e "s_^\(func (Implementation) \)Zdscal\(.*\)\$_$WARNINGC64\1Csscal\2_" \
-e 's_^// Zdscal_// Csscal_' \
-e "s_^\(func (Implementation) \)Z\(.*\)\$_$WARNINGC64\1C\2_" \
-e 's_^// Z_// C_' \
-e "s_^\(func (Implementation) \)Iz\(.*\)\$_$WARNINGC64\1Ic\2_" \
-e 's_^// Iz_// Ic_' \
-e "s_^\(func (Implementation) \)Dz\(.*\)\$_$WARNINGC64\1Sc\2_" \
-e 's_^// Dz_// Sc_' \
-e 's_"gonum.org/v1/gonum/internal/asm/c128"_"gonum.org/v1/gonum/internal/asm/c64"_' \
-e 's_"math"_math "gonum.org/v1/gonum/internal/math32"_' \
>> level1cmplx64.go
echo Generating level1float32_sdot.go
echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1float32_sdot.go
cat level1float64_ddot.go \
| gofmt -r 'float64 -> float32' \
\
| gofmt -r 'f64.DotInc -> f32.DotInc' \
| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \
\
| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \
-e 's_^// D_// S_' \
-e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \
>> level1float32_sdot.go
echo Generating level1float32_dsdot.go
echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1float32_dsdot.go
cat level1float64_ddot.go \
| gofmt -r '[]float64 -> []float32' \
\
| gofmt -r 'f64.DotInc -> f32.DdotInc' \
| gofmt -r 'f64.DotUnitary -> f32.DdotUnitary' \
\
| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1Ds\2_" \
-e 's_^// D_// Ds_' \
-e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \
>> level1float32_dsdot.go
echo Generating level1float32_sdsdot.go
echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level1float32_sdsdot.go
cat level1float64_ddot.go \
| gofmt -r 'float64 -> float32' \
\
| gofmt -r 'f64.DotInc(x, y, f(n), f(incX), f(incY), f(ix), f(iy)) -> alpha + float32(f32.DdotInc(x, y, f(n), f(incX), f(incY), f(ix), f(iy)))' \
| gofmt -r 'f64.DotUnitary(a, b) -> alpha + float32(f32.DdotUnitary(a, b))' \
\
| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1Sds\2_" \
-e 's_^// D\(.*\)$_// Sds\1 plus a constant_' \
-e 's_\\sum_alpha + \\sum_' \
-e 's/n int/n int, alpha float32/' \
-e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \
>> level1float32_sdsdot.go
# Level2 routines.
echo Generating level2float32.go
echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level2float32.go
cat level2float64.go \
| gofmt -r 'blas.Float64Level2 -> blas.Float32Level2' \
\
| gofmt -r 'float64 -> float32' \
\
| gofmt -r 'f64.AxpyInc -> f32.AxpyInc' \
| gofmt -r 'f64.AxpyIncTo -> f32.AxpyIncTo' \
| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \
| gofmt -r 'f64.AxpyUnitaryTo -> f32.AxpyUnitaryTo' \
| gofmt -r 'f64.DotInc -> f32.DotInc' \
| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \
| gofmt -r 'f64.ScalInc -> f32.ScalInc' \
| gofmt -r 'f64.ScalUnitary -> f32.ScalUnitary' \
| gofmt -r 'f64.Ger -> f32.Ger' \
\
| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \
-e 's_^// D_// S_' \
-e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \
>> level2float32.go
echo Generating level2cmplx64.go
echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level2cmplx64.go
cat level2cmplx128.go \
| gofmt -r 'blas.Complex128Level2 -> blas.Complex64Level2' \
\
| gofmt -r 'complex128 -> complex64' \
| gofmt -r 'float64 -> float32' \
\
| gofmt -r 'c128.AxpyInc -> c64.AxpyInc' \
| gofmt -r 'c128.AxpyUnitary -> c64.AxpyUnitary' \
| gofmt -r 'c128.DotuInc -> c64.DotuInc' \
| gofmt -r 'c128.DotuUnitary -> c64.DotuUnitary' \
| gofmt -r 'c128.ScalInc -> c64.ScalInc' \
| gofmt -r 'c128.ScalUnitary -> c64.ScalUnitary' \
\
| sed -e "s_^\(func (Implementation) \)Z\(.*\)\$_$WARNINGC64\1C\2_" \
-e 's_^// Z_// C_' \
-e 's_"gonum.org/v1/gonum/internal/asm/c128"_"gonum.org/v1/gonum/internal/asm/c64"_' \
-e 's_"math/cmplx"_cmplx "gonum.org/v1/gonum/internal/cmplx64"_' \
>> level2cmplx64.go
# Level3 routines.
echo Generating level3float32.go
echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level3float32.go
cat level3float64.go \
| gofmt -r 'blas.Float64Level3 -> blas.Float32Level3' \
\
| gofmt -r 'float64 -> float32' \
\
| gofmt -r 'f64.AxpyUnitaryTo -> f32.AxpyUnitaryTo' \
| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \
| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \
| gofmt -r 'f64.ScalUnitary -> f32.ScalUnitary' \
\
| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \
-e 's_^// D_// S_' \
-e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \
>> level3float32.go
echo Generating sgemm.go
echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > sgemm.go
cat dgemm.go \
| gofmt -r 'float64 -> float32' \
| gofmt -r 'sliceView64 -> sliceView32' \
\
| gofmt -r 'dgemmParallel -> sgemmParallel' \
| gofmt -r 'computeNumBlocks64 -> computeNumBlocks32' \
| gofmt -r 'dgemmSerial -> sgemmSerial' \
| gofmt -r 'dgemmSerialNotNot -> sgemmSerialNotNot' \
| gofmt -r 'dgemmSerialTransNot -> sgemmSerialTransNot' \
| gofmt -r 'dgemmSerialNotTrans -> sgemmSerialNotTrans' \
| gofmt -r 'dgemmSerialTransTrans -> sgemmSerialTransTrans' \
\
| gofmt -r 'f64.AxpyInc -> f32.AxpyInc' \
| gofmt -r 'f64.AxpyUnitary -> f32.AxpyUnitary' \
| gofmt -r 'f64.DotUnitary -> f32.DotUnitary' \
\
| sed -e "s_^\(func (Implementation) \)D\(.*\)\$_$WARNINGF32\1S\2_" \
-e 's_^// D_// S_' \
-e 's_^// d_// s_' \
-e 's_"gonum.org/v1/gonum/internal/asm/f64"_"gonum.org/v1/gonum/internal/asm/f32"_' \
>> sgemm.go
echo Generating level3cmplx64.go
echo -e '// Code generated by "go generate gonum.org/v1/gonum/blas/gonum”; DO NOT EDIT.\n' > level3cmplx64.go
cat level3cmplx128.go \
| gofmt -r 'blas.Complex128Level3 -> blas.Complex64Level3' \
\
| gofmt -r 'float64 -> float32' \
| gofmt -r 'complex128 -> complex64' \
\
| gofmt -r 'c128.ScalUnitary -> c64.ScalUnitary' \
| gofmt -r 'c128.DscalUnitary -> c64.SscalUnitary' \
| gofmt -r 'c128.DotcUnitary -> c64.DotcUnitary' \
| gofmt -r 'c128.AxpyUnitary -> c64.AxpyUnitary' \
| gofmt -r 'c128.DotuUnitary -> c64.DotuUnitary' \
\
| sed -e "s_^\(func (Implementation) \)Z\(.*\)\$_$WARNINGC64\1C\2_" \
-e 's_^// Z_// C_' \
-e 's_"gonum.org/v1/gonum/internal/asm/c128"_"gonum.org/v1/gonum/internal/asm/c64"_' \
-e 's_"math/cmplx"_cmplx "gonum.org/v1/gonum/internal/cmplx64"_' \
>> level3cmplx64.go
|
SpectoLabs/hoverfly
|
vendor/gonum.org/v1/gonum/blas/gonum/single_precision.bash
|
Shell
|
apache-2.0
| 8,954 |
#!/bin/sh
clear
export BOOST_ROOT=~/boost
GCC_ROOT=/usr/local/gcc-433
export PATH=${GCC_ROOT}/bin:${PATH}
export LD_LIBRARY_PATH=${GCC_ROOT}/lib:${LD_LIBRARY_PATH}
./build.sh -b 64 \
--clean \
-x 'g++' \
-z '-std=c++0x -Wall -pedantic' \
-l '-L${BOOST_ROOT}/bin/hpux1123-ia64' \
--with-boost ${BOOST_ROOT} \
--with-make gmake \
--platform-suffix 11.23 \
2>&1 | tee build-hpux11.23-ia64.log
|
Rapotkinnik/libcds
|
build/sample/build-hpux1123.sh
|
Shell
|
bsd-2-clause
| 394 |
#!/bin/bash
# configs/qemu-i486/nsh/setenv.sh
#
# Copyright (C) 2011 Gregory Nutt. All rights reserved.
# Author: Gregory Nutt <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name NuttX nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
if [ "$(basename $0)" = "setenv.sh" ] ; then
echo "You must source this script, not run it!" 1>&2
exit 1
fi
if [ -z ${PATH_ORIG} ]; then export PATH_ORIG=${PATH}; fi
# Uncomment and modify the following if you are using anything other
# than the system GCC
# WD=`pwd`
# export BUILDROOT_BIN="${WD}/../../../misc/buildroot/build_i486/staging_dir/bin"
# export PATH="${BUILDROOT_BIN}:/sbin:/usr/sbin:${PATH_ORIG}"
echo "PATH : ${PATH}"
|
IUTInfoAix/terrarium_2015
|
nuttx/configs/qemu-i486/nsh/setenv.sh
|
Shell
|
bsd-2-clause
| 2,070 |
#! /usr/bin/env sh
# This file is part of Mbed TLS (https://tls.mbed.org)
#
# Copyright (c) 2018, Arm Limited, All Rights Reserved
#
# Purpose:
#
# Run 'pylint' on Python files for programming errors and helps enforcing
# PEP8 coding standards.
if `hash pylint > /dev/null 2>&1`; then
pylint -j 2 tests/scripts/generate_test_code.py --rcfile .pylint
pylint -j 2 tests/scripts/test_generate_test_code.py --rcfile .pylint
pylint -j 2 tests/scripts/mbedtls_test.py --rcfile .pylint
else
echo "$0: WARNING: 'pylint' not found! Skipping checks on Python files."
fi
|
bukepo/openthread
|
third_party/mbedtls/repo/tests/scripts/check-python-files.sh
|
Shell
|
bsd-3-clause
| 578 |
#!/bin/sh
TERM="$1"
AS="$2"
HEAD=/tmp/textmate_Rhelper_head.html
DATA=/tmp/textmate_Rhelper_data.html
SEARCH=/tmp/textmate_Rhelper_search.html
RhelperAnswer=/tmp/textmate_Rhelper_out
"$TM_BUNDLE_SUPPORT"/bin/askRhelperDaemon.sh "cat(getRversion()>='2.10.0',sep='')"
sleep 0.05
IS_HELPSERVER=$(cat "$RhelperAnswer")
"$TM_BUNDLE_SUPPORT"/bin/askRhelperDaemon.sh "@getHttpPort()"
sleep 0.05
PORT=$(cat "$RhelperAnswer")
echo "<html><body style='margin-top:5mm'><table style='border-collapse:collapse'><tr><td style='padding-right:1cm;border-bottom:1px solid black'><b>Package</b></td><td style='border-bottom:1px solid black'><b>Topic</b></td></tr>" > "$HEAD"
if [ "$AS" == "1" ]; then
"$TM_BUNDLE_SUPPORT"/bin/askRhelperDaemon.sh "@getSearchHelp('^$TERM')"
AS="checked"
else
"$TM_BUNDLE_SUPPORT"/bin/askRhelperDaemon.sh "@getSearchHelp('$TERM')"
AS=""
fi
sleep 0.05
CNT=`cat "$RhelperAnswer" | wc -l`
if [ $CNT -gt 500 ]; then
echo "<tr colspan=2><td><i>too much matches...</i></td></tr>" >> "$HEAD"
else
exec<"$RhelperAnswer"
if [ "$IS_HELPSERVER" == "TRUE" ]; then
while read i
do
lib=$(echo -e "$i" | cut -d ' ' -f1)
fun=$(echo -e "$i" | cut -d ' ' -f2)
link=$(echo -e "$i" | cut -d ' ' -f3)
echo "<tr><td>$lib</td><td><a href='$link' target='data'>$fun</a></td></tr>" >> "$HEAD"
done
if [ $CNT -eq 1 ]; then
echo "<base href=\"$link\">" > "$DATA"
curl -gsS "$link" >> "$DATA"
fi
else
while read i
do
lib=$(echo -e "$i" | cut -d ' ' -f1)
fun=$(echo -e "$i" | cut -d ' ' -f2)
link=$(echo -e "$i" | cut -d ' ' -f3)
echo "<tr><td>$lib</td><td><a href='file://$link' target='data'>$fun</a></td></tr>" >> "$HEAD"
done
if [ $CNT -eq 1 ]; then
echo "<base href=\"file://$link\">"
cat "$link" | iconv -s -f ISO8859-1 -t UTF-8
fi
fi
fi
echo "</table></body></html>" >> "$HEAD"
cat <<-HFS > "$SEARCH"
<html>
<head>
<script type='text/javascript' charset='utf-8'>
function SearchServer(term) {
if (term.length > 0) {
TextMate.isBusy = true;
if(document.sform.where.checked == true) {
TextMate.system('"$TM_BUNDLE_SUPPORT/bin/Rsearch.sh" "' + term + '" 1', null);
} else {
TextMate.system('"$TM_BUNDLE_SUPPORT/bin/Rsearch.sh" "' + term + '" 0', null);
}
TextMate.system('sleep 0.3', null);
parent.head.location.reload();
parent.data.location.reload();
TextMate.isBusy = false;
parent.search.sform.search.value = term;
}
}
HFS
if [ "$IS_HELPSERVER" != "TRUE" ]; then
echo "function Rdoc() {TextMate.system('open \"${R_HOME:=/Library/Frameworks/R.framework/Versions/Current/Resources}/doc/html/index.html\"', null);}" >> "$SEARCH"
else
echo "function Rdoc() {TextMate.system('open \"http://127.0.0.1:$PORT/doc/html/index.html\"', null);}" >> "$SEARCH"
fi
cat <<-HFS2 >> "$SEARCH"
</script>
</head>
<body bgcolor='#ECECEC''>
<table>
<tr>
<td>
<form name='sform' onsubmit='SearchServer(document.sform.search.value)'>
<small><small><i>Search for</i><br /></small></small>
<input tabindex='0' id='search' type='search' placeholder='regexp' results='20' onsearch='SearchServer(this.value)' value="$TERM">
</td>
<td>
<font style='font-size:7pt'>
<br /><button onclick='SearchServer(document.sform.search.value)'>Search</button>
<br /><input type='checkbox' name='where' value='key' $AS><i> begins with</i>
</font>
</td>
</form>
</td>
</tr>
<tr>
<td align=center colspan=3>
<input onclick='Rdoc()' type=button value='R documentation'>
</td>
</tr>
</table>
</body>
</html>
HFS2
sleep 0.05
|
Nikpolik/apm
|
spec/fixtures/r.tmbundle/Support/bin/Rsearch.sh
|
Shell
|
mit
| 3,581 |
find . -name "Makefile" -exec sed -i '1,/CXX_FLAGS/a CXX_FLAGS+=-DPROFLING' "{}" \;
|
kolewu/sirius
|
sirius-suite/scripts/add-profiling.sh
|
Shell
|
bsd-3-clause
| 84 |
#!/bin/sh -e
./scripts/disable_log_statements.sh
python setup.py sdist upload -r pypi
./scripts/enable_log_statements.sh
|
zchee/python-client
|
scripts/publish.sh
|
Shell
|
apache-2.0
| 122 |
#!/bin/sh
# SUMMARY: Namespace stress with 10 concurrent short TCP/IPv4 connections over a veth pair
# LABELS:
# REPEAT:
set -e
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
NAME=test-ns
clean_up() {
rm -rf ${NAME}-*
}
trap clean_up EXIT
moby build -format kernel+initrd -name ${NAME} ../../common.yml test.yml
RESULT="$(linuxkit run -cpus 2 ${NAME})"
echo "${RESULT}" | grep -q "suite PASSED"
exit 0
|
davefreitag/linuxkit
|
test/cases/020_kernel/110_namespace/005_kernel-4.13.x/010_veth/011_echo-tcp-ipv4-short-10con-single/test.sh
|
Shell
|
apache-2.0
| 463 |
source_sh ${srcdir}/emulparams/armelf.sh
OUTPUT_FORMAT="elf32-bigarm"
|
mattstock/binutils-bexkat1
|
ld/emulparams/armelfb.sh
|
Shell
|
gpl-2.0
| 70 |
#!/bin/bash
mkdir -p "$PREFIX/bin"
if [ "$(uname)" == "Darwin" ]; then
cp faCount "$PREFIX/bin"
else
export MACHTYPE=x86_64
export BINDIR=$(pwd)/bin
mkdir -p "$BINDIR"
(cd kent/src/lib && make)
(cd kent/src/htslib && make)
(cd kent/src/jkOwnLib && make)
(cd kent/src/hg/lib && make)
(cd kent/src/utils/faCount && make)
cp bin/faCount "$PREFIX/bin"
fi
chmod +x "$PREFIX/bin/faCount"
|
dmaticzka/bioconda-recipes
|
recipes/ucsc-facount/build.sh
|
Shell
|
mit
| 422 |
#!/bin/bash
# Set up the Puppet Master
vagrant ssh puppet -c "sudo service iptables stop; \
sudo apt-get install -y puppetmaster; \
sudo rmdir /etc/puppet/modules || sudo unlink /etc/puppet/modules; \
sudo ln -s /vagrant/modules /etc/puppet/modules; \
sudo ln -s /vagrant/site.pp /etc/puppet/manifests/site.pp; \
sudo service puppetmaster start;\
sudo puppet agent -t;"
|
guessi/puppetlabs-openstack
|
examples/allinone/10_setup_master.sh
|
Shell
|
apache-2.0
| 371 |
/*
* Copyright 2021 elven cache. All rights reserved.
* License: https://github.com/bkaradzic/bgfx#license-bsd-2-clause
*/
#ifndef PARAMETERS_SH
#define PARAMETERS_SH
// struct PassUniforms
uniform vec4 u_params[13];
#define u_depthUnpackConsts (u_params[0].xy)
#define u_frameIdx (u_params[0].z)
#define u_lobeRotation (u_params[0].w)
#define u_ndcToViewMul (u_params[1].xy)
#define u_ndcToViewAdd (u_params[1].zw)
#define u_blurSteps (u_params[2].x)
#define u_lobeCount (u_params[2].y)
#define u_lobeRadiusMin (u_params[2].z)
#define u_lobeRadiusDelta2x (u_params[2].w)
#define u_samplePattern (u_params[2].y)
#define u_maxBlurSize (u_params[3].x)
#define u_focusPoint (u_params[3].y)
#define u_focusScale (u_params[3].z)
#define u_radiusScale (u_params[3].w)
#endif // PARAMETERS_SH
|
emoon/bgfx
|
examples/45-bokeh/parameters.sh
|
Shell
|
bsd-2-clause
| 834 |
#!/bin/sh
#
# A simple RTP receiver
#
VIDEO_CAPS="application/x-rtp,media=(string)video,clock-rate=(int)90000,encoding-name=(string)H263-1998"
#DEST=192.168.1.126
DEST=localhost
VIDEO_DEC="rtph263pdepay ! ffdec_h263"
VIDEO_SINK="ffmpegcolorspace ! autovideosink"
LATENCY=100
gst-launch -v gstrtpbin name=rtpbin latency=$LATENCY \
udpsrc caps=$VIDEO_CAPS port=5000 ! rtpbin.recv_rtp_sink_0 \
rtpbin. ! $VIDEO_DEC ! $VIDEO_SINK \
udpsrc port=5001 ! rtpbin.recv_rtcp_sink_0 \
rtpbin.send_rtcp_src_0 ! udpsink host=$DEST port=5005 sync=false async=false
|
ahmedammar/platform_external_gst_plugins_good
|
tests/examples/rtp/client-H263p.sh
|
Shell
|
lgpl-2.1
| 723 |
#!/bin/sh
#
# Copyright (c) 2009 Eric Wong
#
test_description='git svn initial master branch is "trunk" if possible'
. ./lib-git-svn.sh
test_expect_success 'setup test repository' '
mkdir i &&
> i/a &&
svn_cmd import -m trunk i "$svnrepo/trunk" &&
svn_cmd import -m b/a i "$svnrepo/branches/a" &&
svn_cmd import -m b/b i "$svnrepo/branches/b"
'
test_expect_success 'git svn clone --stdlayout sets up trunk as master' '
git svn clone -s "$svnrepo" g &&
(
cd g &&
test x`git rev-parse --verify refs/remotes/origin/trunk^0` = \
x`git rev-parse --verify refs/heads/master^0`
)
'
test_done
|
overtherain/scriptfile
|
tool-kit/git-2.1.2/t/t9145-git-svn-master-branch.sh
|
Shell
|
mit
| 607 |
R CMD REMOVE --library=$PREFIX/lib/R/library/ MeSH.Pfa.3D7.eg.db
|
joachimwolff/bioconda-recipes
|
recipes/bioconductor-mesh.pfa.3d7.eg.db/pre-unlink.sh
|
Shell
|
mit
| 65 |
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Don't run this script standalone. Instead, run from the repository root:
# ./tools/run_tests/run_tests.py -l objc
set -e
# CocoaPods requires the terminal to be using UTF-8 encoding.
export LANG=en_US.UTF-8
cd $(dirname $0)
hash pod 2>/dev/null || { echo >&2 "Cocoapods needs to be installed."; exit 1; }
hash xcodebuild 2>/dev/null || {
echo >&2 "XCode command-line tools need to be installed."
exit 1
}
# clean the directory
rm -rf Pods
rm -rf Tests.xcworkspace
rm -f Podfile.lock
rm -f RemoteTestClient/*.{h,m}
pod install
|
arkmaxim/grpc
|
src/objective-c/tests/build_tests.sh
|
Shell
|
bsd-3-clause
| 2,083 |
#!/bin/bash
which ragel >/dev/null
if [ $? -ne 0 ] ; then
echo "ERROR. Ragel not installed, cannot compile the Ragel grammar." >&2
exit 1
else
ragel -v
echo
fi
set -e
RAGEL_FILE=rfc1918_parser
echo ">>> Compiling Ragel grammar $RAGEL_FILE.rl ..."
ragel -G2 -C $RAGEL_FILE.rl
echo
echo "<<< OK: $RAGEL_FILE.c generated"
echo
|
MayamaTakeshi/kamailio
|
modules/ipops/compile_rfc1918_parser.rl.sh
|
Shell
|
gpl-2.0
| 337 |
#!/bin/sh
##
## Copyright (c) 2014 The WebM project authors. All Rights Reserved.
##
## Use of this source code is governed by a BSD-style license
## that can be found in the LICENSE file in the root of the source
## tree. An additional intellectual property rights grant can be found
## in the file PATENTS. All contributing project authors may
## be found in the AUTHORS file in the root of the source tree.
##
##
## This script generates 'VPX.framework'. An iOS app can encode and decode VPx
## video by including 'VPX.framework'.
##
## Run iosbuild.sh to create 'VPX.framework' in the current directory.
##
set -e
devnull='> /dev/null 2>&1'
BUILD_ROOT="_iosbuild"
CONFIGURE_ARGS="--disable-docs
--disable-examples
--disable-libyuv
--disable-unit-tests"
DIST_DIR="_dist"
FRAMEWORK_DIR="VPX.framework"
HEADER_DIR="${FRAMEWORK_DIR}/Headers/vpx"
MAKE_JOBS=1
SCRIPT_DIR=$(dirname "$0")
LIBVPX_SOURCE_DIR=$(cd ${SCRIPT_DIR}/../..; pwd)
LIPO=$(xcrun -sdk iphoneos${SDK} -find lipo)
ORIG_PWD="$(pwd)"
TARGETS="arm64-darwin-gcc
armv7-darwin-gcc
armv7s-darwin-gcc
x86-iphonesimulator-gcc
x86_64-iphonesimulator-gcc"
# Configures for the target specified by $1, and invokes make with the dist
# target using $DIST_DIR as the distribution output directory.
build_target() {
local target="$1"
local old_pwd="$(pwd)"
vlog "***Building target: ${target}***"
mkdir "${target}"
cd "${target}"
eval "${LIBVPX_SOURCE_DIR}/configure" --target="${target}" \
${CONFIGURE_ARGS} ${EXTRA_CONFIGURE_ARGS} ${devnull}
export DIST_DIR
eval make -j ${MAKE_JOBS} dist ${devnull}
cd "${old_pwd}"
vlog "***Done building target: ${target}***"
}
# Returns the preprocessor symbol for the target specified by $1.
target_to_preproc_symbol() {
target="$1"
case "${target}" in
arm64-*)
echo "__aarch64__"
;;
armv7-*)
echo "__ARM_ARCH_7A__"
;;
armv7s-*)
echo "__ARM_ARCH_7S__"
;;
x86-*)
echo "__i386__"
;;
x86_64-*)
echo "__x86_64__"
;;
*)
echo "#error ${target} unknown/unsupported"
return 1
;;
esac
}
# Create a vpx_config.h shim that, based on preprocessor settings for the
# current target CPU, includes the real vpx_config.h for the current target.
# $1 is the list of targets.
create_vpx_framework_config_shim() {
local targets="$1"
local config_file="${HEADER_DIR}/vpx_config.h"
local preproc_symbol=""
local target=""
local include_guard="VPX_FRAMEWORK_HEADERS_VPX_VPX_CONFIG_H_"
local file_header="/*
* Copyright (c) $(date +%Y) The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/* GENERATED FILE: DO NOT EDIT! */
#ifndef ${include_guard}
#define ${include_guard}
#if defined"
printf "%s" "${file_header}" > "${config_file}"
for target in ${targets}; do
preproc_symbol=$(target_to_preproc_symbol "${target}")
printf " ${preproc_symbol}\n" >> "${config_file}"
printf "#define VPX_FRAMEWORK_TARGET \"${target}\"\n" >> "${config_file}"
printf "#include \"VPX/vpx/${target}/vpx_config.h\"\n" >> "${config_file}"
printf "#elif defined" >> "${config_file}"
mkdir "${HEADER_DIR}/${target}"
cp -p "${BUILD_ROOT}/${target}/vpx_config.h" "${HEADER_DIR}/${target}"
done
# Consume the last line of output from the loop: We don't want it.
sed -i '' -e '$d' "${config_file}"
printf "#endif\n\n" >> "${config_file}"
printf "#endif // ${include_guard}" >> "${config_file}"
}
# Configures and builds each target specified by $1, and then builds
# VPX.framework.
build_framework() {
local lib_list=""
local targets="$1"
local target=""
local target_dist_dir=""
# Clean up from previous build(s).
rm -rf "${BUILD_ROOT}" "${FRAMEWORK_DIR}"
# Create output dirs.
mkdir -p "${BUILD_ROOT}"
mkdir -p "${HEADER_DIR}"
cd "${BUILD_ROOT}"
for target in ${targets}; do
build_target "${target}"
target_dist_dir="${BUILD_ROOT}/${target}/${DIST_DIR}"
lib_list="${lib_list} ${target_dist_dir}/lib/libvpx.a"
done
cd "${ORIG_PWD}"
# The basic libvpx API includes are all the same; just grab the most recent
# set.
cp -p "${target_dist_dir}"/include/vpx/* "${HEADER_DIR}"
# Build the fat library.
${LIPO} -create ${lib_list} -output ${FRAMEWORK_DIR}/VPX
# Create the vpx_config.h shim that allows usage of vpx_config.h from
# within VPX.framework.
create_vpx_framework_config_shim "${targets}"
# Copy in vpx_version.h.
cp -p "${BUILD_ROOT}/${target}/vpx_version.h" "${HEADER_DIR}"
vlog "Created fat library ${FRAMEWORK_DIR}/VPX containing:"
for lib in ${lib_list}; do
vlog " $(echo ${lib} | awk -F / '{print $2, $NF}')"
done
# TODO(tomfinegan): Verify that expected targets are included within
# VPX.framework/VPX via lipo -info.
}
# Trap function. Cleans up the subtree used to build all targets contained in
# $TARGETS.
cleanup() {
local readonly res=$?
cd "${ORIG_PWD}"
if [ $res -ne 0 ]; then
elog "build exited with error ($res)"
fi
if [ "${PRESERVE_BUILD_OUTPUT}" != "yes" ]; then
rm -rf "${BUILD_ROOT}"
fi
}
iosbuild_usage() {
cat << EOF
Usage: ${0##*/} [arguments]
--help: Display this message and exit.
--extra-configure-args <args>: Extra args to pass when configuring libvpx.
--jobs: Number of make jobs.
--preserve-build-output: Do not delete the build directory.
--show-build-output: Show output from each library build.
--targets <targets>: Override default target list. Defaults:
${TARGETS}
--verbose: Output information about the environment and each stage of the
build.
EOF
}
elog() {
echo "${0##*/} failed because: $@" 1>&2
}
vlog() {
if [ "${VERBOSE}" = "yes" ]; then
echo "$@"
fi
}
trap cleanup EXIT
# Parse the command line.
while [ -n "$1" ]; do
case "$1" in
--extra-configure-args)
EXTRA_CONFIGURE_ARGS="$2"
shift
;;
--help)
iosbuild_usage
exit
;;
--jobs)
MAKE_JOBS="$2"
shift
;;
--preserve-build-output)
PRESERVE_BUILD_OUTPUT=yes
;;
--show-build-output)
devnull=
;;
--targets)
TARGETS="$2"
shift
;;
--verbose)
VERBOSE=yes
;;
*)
iosbuild_usage
exit 1
;;
esac
shift
done
if [ "${VERBOSE}" = "yes" ]; then
cat << EOF
BUILD_ROOT=${BUILD_ROOT}
DIST_DIR=${DIST_DIR}
CONFIGURE_ARGS=${CONFIGURE_ARGS}
EXTRA_CONFIGURE_ARGS=${EXTRA_CONFIGURE_ARGS}
FRAMEWORK_DIR=${FRAMEWORK_DIR}
HEADER_DIR=${HEADER_DIR}
MAKE_JOBS=${MAKE_JOBS}
PRESERVE_BUILD_OUTPUT=${PRESERVE_BUILD_OUTPUT}
LIBVPX_SOURCE_DIR=${LIBVPX_SOURCE_DIR}
LIPO=${LIPO}
ORIG_PWD=${ORIG_PWD}
TARGETS="${TARGETS}"
EOF
fi
build_framework "${TARGETS}"
echo "Successfully built '${FRAMEWORK_DIR}' for:"
echo " ${TARGETS}"
|
Teamxrtc/webrtc-streaming-node
|
third_party/webrtc/src/chromium/src/third_party/libvpx/source/libvpx/build/make/iosbuild.sh
|
Shell
|
mit
| 7,228 |
#!/usr/bin/env bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#!/bin/bash
. /etc/os-release
[[ "${NAME}" == "Ubuntu" ]] || exit 0
sed -i "s/deb\ /deb \[arch=amd64\]\ /g" /etc/apt/sources.list
cat <<EOT >> /etc/apt/sources.list
deb [arch=arm64,armhf] http://ports.ubuntu.com/ubuntu-ports ${UBUNTU_CODENAME} main universe
deb [arch=arm64,armhf] http://ports.ubuntu.com/ubuntu-ports ${UBUNTU_CODENAME}-updates main universe
deb [arch=arm64,armhf] http://ports.ubuntu.com/ubuntu-ports ${UBUNTU_CODENAME}-security main universe
EOT
|
chromium/chromium
|
third_party/tflite_support/src/tensorflow_lite_support/tools/pip_package/rpi/update_sources.sh
|
Shell
|
bsd-3-clause
| 1,178 |
#!/bin/bash
echo "start - cleanup"
rm -rf /tmp/*
rm -f /var/log/wtmp
rm -f /var/log/btmp
yum clean all
history -c
echo "end - cleanup"
|
garandam/packer-vagrant-puppet
|
scripts/cleanup.sh
|
Shell
|
mit
| 135 |
#!/bin/sh
ipa build --scheme "HP Printers Staging" --config Release
|
kongcao7/aurasma_hp
|
learningcenter/Scripts/create_build.sh
|
Shell
|
mit
| 68 |
#!/usr/bin/env bash
# Via https://github.com/elithrar <[email protected]>
# Sets up a Linux and/or WSL (Windows Subsystem for Linux) based dev-environment.
# Inspired by https://github.com/minamarkham/formation (great!)
# Configuration
DOTFILES_REPO="https://github.com/elithrar/dotfiles"
BREW_PACKAGES=(asciinema cmake curl fd gifski git go htop jq lua make mkcert neovim nmap node python rcm ripgrep tmux tree wget wrk yarn youtube-dl zsh)
CASKS=(1password alfred font-fira-code rectangle)
SSH_EMAIL="[email protected]"
CLOUDSDK_INSTALL_DIR="${HOME}/repos"
# Colors
reset="$(tput sgr0)"
highlight="$(tput smso)"
dim="$(tput dim)"
red="$(tput setaf 1)"
blue="$(tput setaf 4)"
green="$(tput setaf 2)"
yellow="$(tput setaf 3)"
bold=$(tput bold)
normal=$(tput sgr0)
underline="$(tput smul)"
indent=" "
# Error handling
trap 'ret=$?; test $ret -ne 0 && printf "${red}Setup failed${reset}\n" >&2; exit $ret' EXIT
set -e
# --- Helpers
print_success() {
printf "${green}✔ success:${reset} %b\n" "$1"
}
print_error() {
printf "${red}✖ error:${reset} %b\n" "$1"
}
print_info() {
printf "${blue}ⓘ info:${reset} %b\n" "$1"
}
# ------
# Setup
# ------
printf "
${yellow}
Running...
_ _ _ _ _
(_)_ __ ___| |_ __ _| | | ___| |__
| | '_ \/ __| __/ _ | | | / __| '_ \
| | | | \__ \ || (_| | | |_\__ \ | | |
|_|_| |_|___/\__\__,_|_|_(_)___/_| |_|
-----
- Sets up a macOS or Linux based development machine.
- Can be run in WSL on Windows
- Safe to run repeatedly (checks for existing installs)
- Repository at https://github.com/elithrar/dotfiles
- Fork as needed
- Deeply inspired by https://github.com/minamarkham/formation
-----
${reset}
"
# Check environments
OS=$(uname -s 2> /dev/null)
DISTRO=""
IS_WSL=false
INTERACTIVE=true
if [ "${OS}" = "Linux" ]; then
# Check Debian vs. RHEL
if [ -f /etc/os-release ] && $(grep -iq "Debian" /etc/os-release); then
DISTRO="Debian"
fi
if $(grep -q "Microsoft" /proc/version); then
IS_WSL=true
fi
if [[ $- == *i* ]]; then
INTERACTIVE=true
else
INTERACTIVE=false
fi
fi
print_info "Detected environment: ${OS} (distro: ${DISTRO})"
print_info "Windows for Linux Subsystem (WSL): ${IS_WSL}"
print_info "Interactive shell session: ${INTERACTIVE}"
# Check for connectivity
if [ ping -q -w1 -c1 google.com &>/dev/null ]; then
print_error "Cannot connect to the Internet"
exit 0
else
print_success "Internet reachable"
fi
# Ask for sudo
sudo -v &> /dev/null
# Update the system & install core dependencies
if [ "$OS" = "Linux" ] && [ "$DISTRO" = "Debian" ]; then
print_info "Updating system packages"
sudo apt update
sudo apt -y upgrade
sudo apt -y install build-essential curl file git
else
print_info "Skipping system package updates"
fi
# Generate an SSH key (if none) if we're in an interactive shell
if [ "$INTERACTIVE" = true ] && ! [[ -f "$HOME/.ssh/id_ed25519" ]]; then
printf "🔑 Generating new SSH key"
ssh-keygen -t ed25519 -f $HOME/.ssh/id_ed25519 -C "[email protected]"
print "Key generated!"
fi
# Set up repos directory
if [ ! -d "${HOME}/repos" ]; then
mkdir -p $HOME/repos
fi
# Install Homebrew
if ! [ -x "$(command -v brew)" ]; then
if [ "${OS}" = "Linux" ]; then
# Install Linuxbrew - http://linuxbrew.sh/
print_info "Installing Linuxbrew..."
# Unattended
echo "" | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
# Put 'brew' on the current path
test -d ~/.linuxbrew && export PATH="/home/linuxbrew/.linuxbrew/bin:/home/linuxbrew/.linuxbrew/sbin:$PATH"
eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)"
print_success "Linuxbrew installed"
elif [ "$OS" = "Darwin" ]; then
print_info "Installing Homebrew..."
curl -fsS 'https://raw.githubusercontent.com/Homebrew/install/master/install' | ruby
export PATH="/usr/local/bin:$PATH"
print_success "Homebrew installed"
fi
else
print_success "Homebrew/Linuxbrew already installed."
fi
# --- Homebrew
print_info "Installing Homebrew packages"
# Install taps first
brew tap thoughtbot/formulae
for pkg in "${BREW_PACKAGES[@]}"; do
# Check if $pkg is already installed
print_info "Checking package $pkg"
if test ! $(brew list | grep $pkg); then
print_info "Installing $pkg"
brew install --quiet $pkg
else
print_success "$pkg already installed"
fi
done
# reattach-to-user-namespace
if [ "$OS" = "Darwin" ]; then
brew install --quiet reattach-to-user-namespace
fi
# Casks
if [ "$OS" = "Darwin" ]; then
print_info "Installing Homebrew Casks"
for pkg in "${CASKS[@]}"; do
# Check if $pkg is already installed
print_info "Checking package $pkg"
if test ! $(brew list --cask | grep $pkg); then
print_info "Installing $pkg"
brew install --cask $pkg
else
print_success "$pkg already installed"
fi
done
else
print_info "Skipping Cask installation: not on macOS"
fi
print_success "Homebrew packages
# --- dotfiles
# Clone & install dotfiles
print_info "Configuring dotfiles"
if ! [ -x "$(command -v stow)" ]; then
# Install GNU stow
# https://linux.die.net/man/8/stow
brew install stow
fi
if [ ! -d "${HOME}/repos/dotfiles"]; then
print_info "Cloning dotfiles"
git clone ${DOTFILES_REPO} "${HOME}/repos/dotfiles"
else
print_info "dotfiles already cloned"
fi
print_info "Linking dotfiles"
stow --dir="${HOME}/repos/dotfiles" --target="${HOME}"
print_success "dotfiles installed"
installed"
# --- Configure zsh
if [ ! -d "${HOME}/.oh-my-zsh" ]; then
print_info "Installing oh-my-zsh"
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
command -v zsh | sudo tee -a /etc/shells
chsh -s $(which zsh)
else
print_success "oh-my-zsh already installed"
fi
# gcloud SDK
if ! [ -f "${CLOUDSDK_INSTALL_DIR}/google-cloud-sdk" ]; then
print_info "Installing gcloud SDK"
curl https://sdk.cloud.google.com > install_gcloud.sh
bash install_gcloud.sh --disable-prompts --install-dir="${CLOUDSDK_INSTALL_DIR}/google-cloud-sdk"
print_success "gcloud SDK installed"
else
print_success "gcloud SDK already installed"
fi
# Fly.io
if ! [ -f "${HOME}/.fly" ]; then
print_info "Installing flyctl"
curl -L https://fly.io/install.sh | sh
print_success "flyctl installed"
else
print_success "flyctl already installed"
fi
print_success "All done! Visit https://github.com/elithrar/dotfiles for the full source & related configs."
|
elithrar/dotfiles
|
install.sh
|
Shell
|
mit
| 6,767 |
./gradlew cleanCache && ./gradlew setupDecompWorkspace
|
talandar/ProgressiveDifficulty
|
switchToWorkspace.sh
|
Shell
|
mit
| 55 |
#!/bin/sh
set -xe
npm install
./node_modules/.bin/pancake
pip install --upgrade pip setuptools
pip install -r requirements.txt
pip install -r requirements_for_test.txt
npm run frontend-build:production
git log --pretty=format:'%h' -n 1 > version_label
|
AusDTO/dto-digitalmarketplace-buyer-frontend
|
scripts/ci_build.sh
|
Shell
|
mit
| 252 |
#!/usr/bin/env bash
# Linux version (should work, not tested):
_BASEPATH=$PWD
TARGETDIR=$_BASEPATH/../www/mmirf
EXCLUDEDIRS="-E=3rdParty|input_manager_state_chart|DialogDescription.js|grammar.js|gen|.json"
OUTPUTDIR=$_BASEPATH/api_doc
JSDOCDIR=./node_modules/jsdoc-toolkit
TEMPLATEDIR=$JSDOCDIR/templates
TEMPLATE=$TEMPLATEDIR/jsdoc
node $JSDOCDIR/app/noderun.js -A -t=$TEMPLATE -p -v -r $EXCLUDEDIRS -d=$OUTPUTDIR -v $TARGETDIR
|
patbit/mmir-starter-kit
|
doc/create_doc2.sh
|
Shell
|
mit
| 439 |
#!/bin/bash -ev
#
# Installation Script
# Written by: Tommy Lincoln <[email protected]>
# Github: https://github.com/pajamapants3000
# Legal: See LICENSE in parent directory
#
#
# Dependencies
#**************
# Begin Required
#gobject_introspection-1.44.0
#pycairo-1.10.0
# End Required
# Begin Recommended
# End Recommended
# Begin Optional
#pep8
#pyflakes
# End Optional
# Begin Kernel
# End Kernel
#
# Installation
#**************
# Check for previous installation:
PROCEED="yes"
REINSTALL=0
grep pygobject-3.16.2-python3 /list-$CHRISTENED"-"$SURNAME > /dev/null && ((\!$?)) &&\
REINSTALL=1 && echo "Previous installation detected, proceed?" && read PROCEED
[ $PROCEED = "yes" ] || [ $PROCEED = "y" ] || exit 0
# Download:
wget http://ftp.gnome.org/pub/gnome/sources/pygobject/3.16/pygobject-3.16.2.tar.xz
# FTP/alt Download:
#wget ftp://ftp.gnome.org/pub/gnome/sources/pygobject/3.16/pygobject-3.16.2.tar.xz
#
# md5sum:
echo "8a3720efa69dd2d520e0b21b5d9e7c19 pygobject-3.16.2.tar.xz" | md5sum -c ;\
( exit ${PIPESTATUS[0]} )
#
tar -xvf pygobject-3.16.2.tar.xz
cd pygobject-3.16.2
# Prep if testsuite is desired
#sed -i '/d =.*MUSIC/d' tests/test_glib.py
#
mkdir python3
pushd python3
../configure --prefix=/usr --with-python=/usr/bin/python3
make
popd
# Test (graphical session required):
#make -C python3 check
#
as_root make -C python3 install
cd ..
as_root rm -rf pygobject-3.16.2
#
# Add to installed list for this computer:
echo "pygobject-3.16.2-python3" >> /list-$CHRISTENED"-"$SURNAME
#
###################################################
|
pajamapants3000/BLFS_scripts_etc
|
scripts/pygobject-3.16.2-python3.sh
|
Shell
|
mit
| 1,566 |
#!/usr/bin/env bash
cd $(dirname $0)
curl -O http://dl.yf.io/dilation/models/dilation7_kitti.caffemodel
|
fyu/dilation
|
pretrained/download_kitti.sh
|
Shell
|
mit
| 105 |
cd mct-core && git pull && npm update
cd ../generator-m && git pull && npm update
cd ../generator-mcap && git pull && npm update
cd ../generator-m-server && git pull && npm update
cd ../mcap-cli && git pull && npm update
cd ../mcap-deploy && git pull && npm update
cd ../mcap-log && git pull && npm update
cd ../mcap-application-validation && git pull && npm update
|
mwaylabs/mcap-developer-utils
|
pull.sh
|
Shell
|
mit
| 365 |
#!/bin/bash
set -o errexit
parse_url()
{
unset errexit
local prefix=DOCKER_LINK
[ -n "$2" ] && prefix=$2
# extract the protocol
local proto="`echo $1 | grep '://' | sed -e's,^\(.*\)://.*,\1,g'`"
# remove the protocol
local url=`echo $1 | sed -e s,$proto://,,g`
# extract the host -- updated
local hostport=`echo $url | cut -d/ -f1`
local port=`echo $hostport | grep : | cut -d: -f2`
if [ -n "$port" ]; then
local host=`echo $hostport | grep : | cut -d: -f1`
else
local host=$hostport
fi
[ -n "$proto" ] && eval "export ${prefix}_PROTO=\"$proto\""
[ -n "$host" ] && eval "export ${prefix}_ADDR=\"$host\""
[ -n "$port" ] && eval "export ${prefix}_PORT=\"$port\""
set -o errexit
}
if [ -z "$CROWD_SERVER_URL" ]; then
parse_url "$(eval "echo \$$CROWD_LINK_ENV")"
export CROWD_SERVER_URL="${CROWD_SERVER_PROTO}://${DOCKER_LINK_ADDR}:${DOCKER_LINK_PORT}${CROWD_SERVER_PATH}"
fi
cat <<END > /opt/sonatype-work/nexus/conf/crowd-plugin.xml
<?xml version="1.0" encoding="UTF-8"?>
<crowdConfiguration>
<crowdServerUrl>${CROWD_SERVER_URL}</crowdServerUrl>
<applicationName>${CROWD_APP_NAME}</applicationName>
<applicationPassword>${CROWD_APP_PASSWORD}</applicationPassword>
</crowdConfiguration>
END
exec "/opt/start_nexus.sh"
|
griff/sonatype-nexus-docker
|
crowd-plugin/start_crowd_nexus.sh
|
Shell
|
mit
| 1,270 |
#!/bin/bash
set -e
# Create NFS and SMB configuration
echo "Configure NFS share"
echo "/usr/share/openitcockpit 172.28.128.0/24(rw,no_subtree_check,no_root_squash,all_squash,anonuid=33,anongid=33)" >> /etc/exports
echo "Configure SMB share"
cp smb.conf /etc/samba/smb.conf
echo -e 'vagrant\nvagrant' | smbpasswd -a www-data
service nfs-kernel-server restart
service smbd restart
cp database.php /usr/share/openitcockpit/app/Config/database.php
chmod +x /usr/share/openitcockpit/app/UPDATE.sh
ln -s /usr/share/openitcockpit/app/UPDATE.sh /usr/sbin/openitcockpit-update
# Run openITCOCKPIT setup
expect expect.file
|
it-novum/vagrantboxes
|
openITCOCKPIT_V3-dev/install.sh
|
Shell
|
mit
| 623 |
#!/bin/bash
# Adaptation of https://docs.docker.com/engine/admin/multi-service_container/
# Start the first process
/discover-process.sh &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start discover-process: $status"
exit $status
fi
# Start the base process
/base-process.sh &
status=$?
if [ $status -ne 0 ]; then
echo "Failed to start base-process: $status"
exit $status
fi
# Naive check runs checks once a minute to see if either of the processes exited. The container will
# exit with an error
while /bin/true; do
DISCOVER_STATUS=$(ps aux | grep discover-process | grep -v grep | wc -l)
BASE_STATUS=$(ps aux | grep base-process | grep -v grep | wc -l)
# If the greps above find anything, they will exit with 0 status
# If they are not both 0, then something is wrong
if [ $DISCOVER_STATUS -ne 1 ]; then
echo "discover-process has already exited."
exit -1
fi
if [ $BASE_STATUS -ne 1 ]; then
echo "base-processes has already exited."
exit -1
fi
sleep 30
done
|
redgeoff/docker-discover-tasks
|
examples/couchdb/wrapper.sh
|
Shell
|
mit
| 1,015 |
#!/usr/bin/env bash
vendor/bin/phpspec run
|
openh2labs/elastic-erga
|
src/laravel/spec.sh
|
Shell
|
mit
| 43 |
#!/bin/bash
#
# environmental_variables.sh
#
# Copyright (C) 2015 Mitchell C Kuppinger, MD
#
# Source functions
. "/vagrant/setup/functions.sh"
echo "++ setting environmental variables ++" | tee -a /var/log/vagrant_setup.log
# Clean PATH
# http://unix.stackexchange.com/questions/40749/remove-duplicate-path-entries-with-awk-command
export PATH=$(echo -n $PATH | awk -vRS=: -vORS= '!a[$0]++ {if (NR>1) printf(":"); printf("%s", $0) }' )
# Linux
export ENCODING=en_US.UTF-8
export ROOT_PROVISIONED_FLAGS_DIR=/etc/provisioned
export USER_PROVISIONED_FLAGS_DIR=~/provisioned
export SYNCED_FLDR=/vagrant
# Postgresql
export PG_VERSION=9.3
export DBUSER=ember_crm
export DBPASS=Dragon123
# rbenv
export RBENV_SHELL=bash
# ruby
export RUBY_VERSION=2.2.0
# rails
export RAILS_VERSION=4.2.0
# Application
export MY_RAILS_APP=ember-crm
export MY_EMBER_APP=releases
export MY_APP=myproj
export SYNCED_APPS_FLDR=~/projects
export APP_BUILD_FLDR=~/srv
export APPUSER=$MY_RAILS_APP
export APPUSER_PW=Dragon123
export DB=postgresql
export DBYML=config/database.yml
export ORIG_DBYML=config/database_orig.yml
|
dpneumo/Vagrant_Rails_PG
|
setup/environmental_variables.sh
|
Shell
|
mit
| 1,105 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2084-1
#
# Security announcement date: 2014-01-21 00:00:00 UTC
# Script generation date: 2017-01-01 21:03:33 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: i386
#
# Vulnerable packages fix on version:
# - devscripts:2.11.6ubuntu1.6
#
# Last versions recommanded by security team:
# - devscripts:2.11.6ubuntu1.7
#
# CVE List:
# - CVE-2013-6888
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade devscripts=2.11.6ubuntu1.7 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/i386/2014/USN-2084-1.sh
|
Shell
|
mit
| 623 |
#!/bin/sh
set -eo pipefail -o nounset
wget --quiet -O blekhman_ar.tsv https://raw.githubusercontent.com/macarthur-lab/gene_lists/master/lists/blekhman_ar.tsv
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/GRCh38/GRCh38.genome
## Get the .genome file
genome2=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg38/hg38.genome
## Get the chromomsome mapping file
chr_mapping=$(ggd get-files hg38-chrom-mapping-ensembl2ucsc-ncbi-v1 --pattern "*.txt")
grch38_gtf="$(ggd get-files grch38-gene-features-ensembl-v1 -p 'grch38-gene-features-ensembl-v1.gtf.gz')"
cat << EOF > parse_gtf_by_gene.py
"""
Get a list of genome coordinates for a list of ar genes
"""
import sys
import io
import gzip
gtf_file = sys.argv[1] ## A gtf file with CDS features
ar_gene_file = sys.argv[2] ## A single column tsv file for ar genes
outfile = sys.argv[3] ## File to write to
## Get a set of gene symbols
ar_gene_set = {}
with io.open(ar_gene_file, "rt", encoding = "utf-8") as ar:
ar_gene_set = set(x.strip() for x in ar)
## Parse the gtf file
fh = gzip.open(gtf_file, "rt", encoding = "utf-8") if gtf_file.endswith(".gz") else io.open(ar_gene_file, "rt", encoding = "utf-8")
ar_gene_dict = dict()
header = []
for line in fh:
if line[0] == "#":
header = line.strip().split("\t")
continue
line_dict = dict(zip(header,line.strip().split("\t")))
line_dict.update({x.strip().replace("\"","").split(" ")[0]:x.strip().replace("\"","").split(" ")[1] for x in line_dict["attribute"].strip().split(";")[:-1]})
## If the current gene is in the ar gene set
if line_dict["gene_name"] in ar_gene_set:
if line_dict["gene_name"] not in ar_gene_dict:
ar_gene_dict[line_dict["gene_name"]] = []
## If CDS or stop_codon feature, ard feature info to ar_gene_dict
if line_dict["feature"] == "CDS" or line_dict["feature"] == "stop_codon":
## Change 1 based start to zero based start
ar_gene_dict[line_dict["gene_name"]].append([str(line_dict["#chrom"]),
str(int(line_dict["start"]) - 1),
str(line_dict["end"]),
str(line_dict["strand"]),
str(line_dict["gene_id"]),
str(line_dict["gene_name"]),
str(line_dict["transcript_id"]),
str(line_dict["gene_biotype"])
])
fh.close()
## Write dict out
with open(outfile, "w") as o:
for gene, coor in ar_gene_dict.items():
for line in coor:
o.write("\t".join(line) + "\n")
EOF
python parse_gtf_by_gene.py $grch38_gtf blekhman_ar.tsv unflattened_ar_genes.bed
cat << EOF > sort_columns.py
"""
sort the transcript id column
sort and get a unique list of the gene column
"""
import sys
for line in sys.stdin.readlines():
line_list = line.strip().split("\t")
## Sort column 4 - 8 and get a uniqe list
line_list[3] = ",".join(sorted(list(set(line_list[3].strip().split(",")))))
line_list[4] = ",".join(sorted(list(set(line_list[4].strip().split(",")))))
line_list[5] = ",".join(sorted(list(set(line_list[5].strip().split(",")))))
line_list[6] = ",".join(sorted(list(set(line_list[6].strip().split(",")))))
line_list[7] = ",".join(sorted(list(set(line_list[7].strip().split(",")))))
## Print to stdout
print("\t".join(line_list))
EOF
## Merge and sort ad genes with coordinates
gsort unflattened_ar_genes.bed $genome \
| bedtools merge -i - -c 4,5,6,7,8 -o collapse \
| awk -v OFS="\t" 'BEGIN { print "#chrom\tstart\tend\tstrand\tgene_ids\tgene_symbols\ttranscript_ids\tgene_biotypes" } {print $0}' \
| python sort_columns.py \
| gsort --chromosomemappings $chr_mapping /dev/stdin $genome2 \
| bgzip -c > hg38-autosomal-recessive-genes-blekhman-v1.bed.gz
tabix hg38-autosomal-recessive-genes-blekhman-v1.bed.gz
wget --quiet $genome2
## Get ad gene complement coordinates
sed "1d" hg38.genome \
| bedtools complement -i <(zgrep -v "#" hg38-autosomal-recessive-genes-blekhman-v1.bed.gz) -g /dev/stdin \
| gsort /dev/stdin $genome2 \
| awk -v OFS="\t" 'BEGIN {print "#chrom\tstart\tend"} {print $1,$2,$3}' \
| bgzip -c > hg38-autosomal-recessive-genes-blekhman-v1.compliment.bed.gz
tabix hg38-autosomal-recessive-genes-blekhman-v1.compliment.bed.gz
rm hg38.genome
rm blekhman_ar.tsv
rm unflattened_ar_genes.bed
rm parse_gtf_by_gene.py
rm sort_columns.py
|
gogetdata/ggd-recipes
|
recipes/genomics/Homo_sapiens/hg38/hg38-autosomal-recessive-genes-blekhman-v1/recipe.sh
|
Shell
|
mit
| 4,821 |
mkdir -p reports
diff-quality --violations=pep8 --html-report reports/diff_quality_pep8.html
bash ./post_worker.sh
|
muhammad-ammar/alpha
|
run_quality.sh
|
Shell
|
mit
| 117 |
#!/bin/sh
# Name:pgsql_yum_install.sh
# Info:A Script For Setup PostgreSQL Use Yum Repo
# Author:Jason_z
# Create:2013-05-15
# Mail:[email protected]
# CopyRight @ AnAn Studio,http://www.anan-studio.com
# Define Variables
version="9.2"
# default postgres data dir,so don't modify this !!
defaultDir=/var/lib/pgsql/$version/data
# you can also your own data dir
yourDir=/data/pg_data
# Notice:you cat get this from:http://yum.postgresql.org/repopackages.php
# if you do not kown what's your release or arch
# type 'cat /etc/redhat_release' ,get your os release
# type 'uname -m' ,get your os arch
rpm=http://yum.postgresql.org/9.2/redhat/rhel-6-x86_64/pgdg-centos92-9.2-6.noarch.rpm
#Check postgres installed
rpm -qa postgres
if [ $? -ne 0 ];then
echo "PostgreSQL is installed,then will be quit."
exit 1
fi
#Install the repo RPM
sudo rpm -ivh $rpm
#along with -contrib subpackage
sudo yum -y groupinstall "PostgreSQL Database Server $version PGDG"
if [ "$yourDir" == "$defaultDir" ];then
sudo service postgresql-$version initdb
sudo service postgresql-$version start
else
if [ ! -d $yourDir ];then
sudo mkdir -p $yourDir
sudo chown postgres:postgres $yourDir
fi
#assign PG data dir
sudo -u postgres /usr/pgsql-$version/bin/pg_ctl -D $yourDir initdb
#modify conf
sudo sed -i "s/localhost/*/" $yourDir/postgresql.conf
#modify init.d
sudo sed -i 's $defaultDir $yourDir g' /etc/init.d/postgresql-$version
#start pg
sudo -u postgres /usr/pgsql-$version/bin/pg_ctl -D $yourDir start
fi
# join in startup
sudo chkconfig postgres-$version on
#modify password
#sudo -u postgres psql -d postgres -c "alter user postgres with password '123456'"
|
jasonz1987/postgresql-install
|
pgsql_yum_install.sh
|
Shell
|
mit
| 1,673 |
#!/bin/bash -vx
set -e
dirmngr < /dev/null
{% for package in packages.system.aur %}
echo 'Building {{package}}'
mkdir build
cd build
git clone {{ package }} .
chmod -R 777 .
sudo -u nobody makepkg
pacman --noconfirm -U `ls *pkg.tar.xz`
cd ../
rm -rf build
{% endfor %}
sed -i -e $'/\tissue_discards = 0/ s/= 0/= 1/' /etc/lvm/lvm.conf
sed -i 's/#en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen
locale-gen
echo LANG=en_US.UTF-8 > /etc/locale.conf
export LANG=en_US.UTF-8
touch /etc/vconsole.conf
{% if keymap %}echo "KEYMAP={{ keymap }}" >> /etc/vconsole.conf{% endif %}
{% if consolefont %}echo "FONT={{ consolefont }}" >> /etc/vconsole.conf{% endif %}
ln -sf /usr/share/zoneinfo/{{ localtime }} /etc/localtime
hwclock --systohc --utc
echo {{ hostname }} > /etc/hostname
echo "127.0.0.1 {{ hostname }}
::1 {{ hostname }}" > /etc/hosts
cat << EOF > /etc/mkinitcpio.conf
MODULES=()
BINARIES=()
FILES=()
HOOKS=(base systemd autodetect modconf block keyboard sd-vconsole {% if fs.system.encrypted %} sd-encrypt {% endif %} lvm2 filesystems fsck)
COMPRESSION=(zstd)
COMPRESSION_OPTIONS=()
EOF
mkinitcpio -p linux
mkdir -p /boot/loader/entries
cat << EOF > /boot/loader/entries/arch-{{ hostname }}.conf
title Arch Linux
linux /vmlinuz-linux
initrd /intel-ucode.img
initrd /initramfs-linux.img
{% if fs.system.encrypted %}
options rw root=/dev/mapper/main-root rd.luks.name=$1=main rd.luks.options=discard
{% else %}
options rw root=/dev/mapper/main-root
{% endif %}
EOF
if [ ! -f /boot/loader/loader.conf ]; then
echo "default arch-{{ hostname }}" > /boot/loader/loader.conf
fi
bootctl install
{% if network_wired %}
cat << EOF > /etc/systemd/network/20-wired.network
[Match]
Name={{ network_wired }}
[Network]
DHCP=ipv4
[DHCP]
RouteMetric=10
UseDomains=yes
EOF
{% endif %}
until passwd
do sleep 1; done
useradd -m -G wheel -s /bin/bash kaylyn
until passwd kaylyn
do sleep 1; done
echo "%wheel ALL=(ALL) ALL" > /etc/sudoers.d/wheel
chmod 0440 /etc/sudoers.d/wheel
|
kaylynb/arch-iso
|
generate-iso/tpl/isofiles/root/chroot-install.sh
|
Shell
|
mit
| 1,986 |
#!/bin/bash
#
# Find OV movies at the cinestar chemnitz
set -e -u
url='http://www.cinestar.de/de/kino/chemnitz-cinestar-am-roten-turm/kinoprogramm/?version=OV'
baseurl="http://www.cinestar.de"
text=$(curl -s "$url")
moviecolumn=$(echo -ne "$text" | sed -e 's/</\n</g' | sed -e '/<script/,/<\/script>/d' -e '/class="content_col_right"/,$d' -e '/^\s*$/d' -e '1,/id="main_content_full"/d')
rawmoviedata=$(echo -ne "$moviecolumn" | grep -o '<a href="[^"]*/\(veranstaltungen\|filmprogramm\)/[^"]*" title="[^"]*"\|\(Vorstellung\|Screening\) .*\|datetime="[^"]*"' | sed -e 's/datetime="\([^"]*\)"/\1/' -e 's/<a href="\([^"]*\)" title="\([^"]*\)"/\2\n\1/')
movies=$(echo -e "$rawmoviedata" | while IFS= read line; do
if [ "${line:0:1}" == '/' ]; then
echo "<$baseurl$line>"
else
echo "$line"
fi
grep '^Vorstellung' <<< "$line\n" &>/dev/null && echo "\n"
done | recode html)
echo -ne "$movies"
cat <<EOF
Alle OV-Vorstellungen:
$url
EOF
|
elor/eltools
|
src/interactive/cinestarov.sh
|
Shell
|
mit
| 993 |
#!/bin/bash
cd $( dirname $BASH_SOURCE[0] )
mkdir repo
cd repo
git init > /dev/null
cat > example.txt << EOF
first
second
third
fourth
fifth
EOF
git add example.txt
git commit -m 'First commit' > /dev/null
cat > example.txt << EOF
first
fourth
fifth
EOF
git commit -am 'Second commit' > /dev/null
cat > example.txt << EOF
another
yet another
first
fourth
fifth
EOF
git commit -am 'Third commit' > /dev/null
cat > example.txt << EOF
another
yet another
first
fourth
fifth
EOF
git commit -am 'Fourth commit' > /dev/null
cat > example.txt << EOF
another
yet another
first
fourth
fifth
EOF
git commit -am 'Fifth commit' > /dev/null
|
georgebrock/git-browse
|
tests/createrepo.sh
|
Shell
|
mit
| 643 |
#!/bin/sh
# Origin: http://blogs.wcode.org/2013/09/howto-boot-your-raspberry-pi-into-a-fullscreen-browser-kiosk/
#while true; do
/etc/init.d/lightdm stop &
# Clean up previously running apps, gracefully at first then harshly
killall -TERM chromium 2>/dev/null;
killall -TERM matchbox-window-manager 2>/dev/null;
sleep 2;
killall -9 chromium 2>/dev/null;
killall -9 matchbox-window-manager 2>/dev/null;
# Clean out existing profile information
rm -rf /home/pi/.cache;
rm -rf /home/pi/.config;
rm -rf /home/pi/.pki;
|
M3kH/momo
|
utils/stopx.sh
|
Shell
|
mit
| 517 |
#!/bin/sh
# Ruby-on-Rails setup for Ubuntu-14.04
RED='\033[0;31m'
NC='\033[0m' # No Color
echo "${RED} Refreshing packages ${NC}"
dpkg -l > default_packages
# Add Mongodb to default_packages source list
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.0 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.0.list
sudo apt-get update
sudo apt-get upgrade -y
echo
echo "${RED} Installing aptitude... ${NC}"
sudo apt-get install -y aptitude
echo
echo "${RED} Updating aptitude... ${NC}"
sudo aptitude update
echo
echo "${RED} Installing dev tools... ${NC}"
sudo aptitude install -y build-essential git-core curl openssl libreadline6 libreadline6-dev curl git-core zlib1g zlib1g-dev libssl-dev libyaml-dev libsqlite3-0 libsqlite3-dev sqlite3 libxml2-dev libxslt-dev autoconf libc6-dev ncurses-dev automake libtool bison subversion ruby-dev libpq-dev libcurl4-openssl-dev libapr1-dev libaprutil1-dev
echo
echo "${RED} Installing Apache... ${NC}"
sudo aptitude install -y apache2 apache2-prefork-dev
echo
echo "${RED} Installing PostgreSql... ${NC}"
sudo aptitude install -y postgresql-client postgresql
echo
echo "${RED} Installing rvm, ruby & rails (Stable versions) ${NC}"
gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
curl -sSL https://get.rvm.io | bash -s stable --rails
/bin/bash --login
echo
echo "${RED} Setting RVM to use ruby-2.2.0 as default ${NC}"
rvm use ruby-2.2.0 --default
echo
echo "${RED} Installing OpenJDK-7 ${NC}"
sudo aptitude install -y openjdk-7-jdk
echo
echo "${RED} Installing Mongodb ${NC}"
sudo apt-get install -y mongodb-org
echo
echo "${RED} Installing Oh-My-Zsh ${NC}"
sudo rm -rf ~/.oh-my-zsh
git clone git://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh
cp ~/.oh-my-zsh/templates/zshrc.zsh-template ~/.zshrc
chsh -s /bin/zsh
echo
echo "${RED} Installation finished!!! ${NC}"
echo "${RED} Consider restarting your machine. ${NC}"
|
nitinstp23/dotfiles
|
ubuntu-setup.sh
|
Shell
|
mit
| 2,034 |
#!/usr/bin/env bash
puppet apply --modulepath=/src/backer/clean/modules /src/backer/clean/clean.pp
|
dockerizedrupal/backer
|
src/backer/clean.sh
|
Shell
|
mit
| 100 |
#!/bin/sh
set -ex
pg_short_version=pg"`pg_config --version | cut -d' ' -f2 | tr -d . | cut -c-2`"
echo $pg_short_version
|
durch/google-bigtable-postgres-fdw
|
utils/get_version.sh
|
Shell
|
mit
| 120 |
#!/usr/bin/env bash
# Install command-line tools using Homebrew.
# Ask for the administrator password upfront.
sudo -v
# Keep-alive: update existing `sudo` time stamp until the script has finished.
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Make sure we’re using the latest Homebrew.
brew update
# Upgrade any already-installed formulae.
brew upgrade --all
# Install GNU core utilities (those that come with OS X are outdated).
# Don’t forget to add `$(brew --prefix coreutils)/libexec/gnubin` to `$PATH`.
#brew install coreutils
#sudo ln -s /usr/local/bin/gsha256sum /usr/local/bin/sha256sum
# Install some other useful utilities like `sponge`.
brew install moreutils
# Install GNU `find`, `locate`, `updatedb`, and `xargs`, `g`-prefixed.
brew install findutils
# Install GNU `sed`, overwriting the built-in `sed`.
brew install gnu-sed --with-default-names
# Install Bash 4.
# Note: don’t forget to add `/usr/local/bin/bash` to `/etc/shells` before
# running `chsh`.
#brew install bash
brew tap homebrew/versions
#brew install bash-completion2
# Install `wget` with IRI support.
brew install wget --with-iri
# Install RingoJS and Narwhal.
# Note that the order in which these are installed is important;
# see http://git.io/brew-narwhal-ringo.
brew install ringojs
brew install narwhal
# Install more recent versions of some OS X tools.
brew install vim --override-system-vi
brew install homebrew/dupes/grep
#brew install homebrew/dupes/openssh
#brew install homebrew/dupes/screen
#brew install homebrew/php/php55 --with-gmp
# Install font tools.
brew tap bramstein/webfonttools
brew install sfnt2woff
brew install sfnt2woff-zopfli
brew install woff2
# Install some CTF tools; see https://github.com/ctfs/write-ups.
#brew install aircrack-ng
#brew install bfg
#brew install binutils
#brew install binwalk
#brew install cifer
#brew install dex2jar
#brew install dns2tcp
#brew install fcrackzip
#brew install foremost
#brew install hashpump
#brew install hydra
#brew install john
#brew install knock
#brew install netpbm
#brew install nmap
#brew install pngcheck
#brew install socat
#brew install sqlmap
#brew install tcpflow
#brew install tcpreplay
#brew install tcptrace
#brew install ucspi-tcp # `tcpserver` etc.
#brew install xpdf
#brew install xz
# Install other useful binaries.
brew install ack
brew install dark-mode
#brew install exiv2
brew install git
brew install git-lfs
brew install imagemagick --with-webp
#brew install lua
brew install lynx
brew install p7zip
brew install pigz
brew install pv
#brew install rename
#brew install rhino
brew install speedtest_cli
brew install ssh-copy-id
brew install tree
brew install webkit2png
brew install zopfli
# Remove outdated versions from the cellar.
brew cleanup
|
bezhermoso/matiasbynens-dotfiles
|
brew.sh
|
Shell
|
mit
| 2,779 |
python -Bm SimpleHTTPServer
|
carlcalderon/science.js
|
tests/start.sh
|
Shell
|
mit
| 27 |
#!/usr/bin/env bash
set -e
if [ -z "$(ls -A /data/mysql)" -a "${1%_safe}" = 'mysqld' ]; then
if [ -z "$MYSQL_ROOT_PASSWORD" ]; then
echo >&2 'error: database is uninitialized and MYSQL_ROOT_PASSWORD not set'
echo >&2 ' Did you forget to add -e MYSQL_ROOT_PASSWORD=... ?'
exit 1
fi
mysql_install_db --datadir=/data/mysql
chown -R mysql:mysql /data/mysql
# These statements _must_ be on individual lines, and _must_ end with
# semicolons (no line breaks or comments are permitted).
cat > /tmp/mysql-first-time.sql <<-EOSQL
UPDATE mysql.user SET host = "172.17.%", password = PASSWORD("${MYSQL_ROOT_PASSWORD}") WHERE user = "root" LIMIT 1 ;
DELETE FROM mysql.user WHERE user != "root" OR host != "%" ;
DROP DATABASE IF EXISTS test ;
CREATE DATABASE veilleur ;
GRANT ALL PRIVILEGES ON veilleur.* to veilleur@"%" IDENTIFIED BY 'veilleur';
FLUSH PRIVILEGES ;
EOSQL
exec "$@" --init-file=/tmp/mysql-first-time.sql
fi
exec "$@"
|
un-zero-un/Veilleur
|
docker/mysql/entrypoint.sh
|
Shell
|
mit
| 953 |
#!/usr/bin/env bash
set -e
cd $BUILD_STAGINGDIRECTORY
mkdir extraction
cd extraction
git clone --depth 1 https://github.com/Microsoft/vscode-extension-telemetry.git
git clone --depth 1 https://github.com/Microsoft/vscode-chrome-debug-core.git
git clone --depth 1 https://github.com/Microsoft/vscode-node-debug2.git
git clone --depth 1 https://github.com/Microsoft/vscode-node-debug.git
git clone --depth 1 https://github.com/Microsoft/vscode-html-languageservice.git
git clone --depth 1 https://github.com/Microsoft/vscode-json-languageservice.git
node $BUILD_SOURCESDIRECTORY/build/node_modules/.bin/vscode-telemetry-extractor --sourceDir $BUILD_SOURCESDIRECTORY --excludedDir $BUILD_SOURCESDIRECTORY/extensions --outputDir . --applyEndpoints
node $BUILD_SOURCESDIRECTORY/build/node_modules/.bin/vscode-telemetry-extractor --config $BUILD_SOURCESDIRECTORY/build/azure-pipelines/common/telemetry-config.json -o .
mkdir -p $BUILD_SOURCESDIRECTORY/.build/telemetry
mv declarations-resolved.json $BUILD_SOURCESDIRECTORY/.build/telemetry/telemetry-core.json
mv config-resolved.json $BUILD_SOURCESDIRECTORY/.build/telemetry/telemetry-extensions.json
cd ..
rm -rf extraction
|
hoovercj/vscode
|
build/azure-pipelines/common/extract-telemetry.sh
|
Shell
|
mit
| 1,170 |
if [ $# -lt 1 ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]; then
echo "Usage : test/tools/shell/get-average-time.sh log-path [capture_pattern] [replace_to]"
echo "Example : test/tools/shell/get-average-time.sh mobius-stream.log \"^.*func process time: (\d+).*$\" '\$1' "
echo "Example : test/tools/shell/get-average-time.sh mobius-stream.log \"^.*command process time: (\d+).*$\" '\$1' "
exit
fi
log=$1
capture_pattern=$2
replace_to=$3
if [ -n "$(uname -a | egrep -ie cygwin)" ]; then
cd $(dirname $log)
log=$(basename $log)
fi
if [ -z "$capture_pattern" ]; then
capture_pattern="^.*func process time: (\d+).*$"
fi
if [ -z "$replace_to" ]; then
replace_to='$1'
fi
lzmw -p $log -it "$capture_pattern" -o "$replace_to" -PAC | awk 'BEGIN{sum=0; rows=0} { if($0 > 0) {rows++; sum+=$0;} } END { printf("rows = %d, average = %f , ", rows, sum/rows); }'
echo "pattern = $capture_pattern"
|
qualiu/testMobius
|
scripts/log/get-worker-average-time.sh
|
Shell
|
mit
| 961 |
#!/bin/sh
# CYBERWATCH SAS - 2016
#
# Security fix for RHSA-2014:0382
#
# Security announcement date: 2014-04-09 18:55:27 UTC
# Script generation date: 2016-05-12 18:11:55 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - python-keystoneclient.noarch:0.7.1-2.el6ost
# - python-keystoneclient-doc.noarch:0.7.1-2.el6ost
# - python-six.noarch:1.5.2-1.el6
#
# Last versions recommanded by security team:
# - python-keystoneclient.noarch:0.9.0-6.el6ost
# - python-keystoneclient-doc.noarch:0.9.0-6.el6ost
# - python-six.noarch:1.6.1-1.el6
#
# CVE List:
# - CVE-2014-0105
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install python-keystoneclient.noarch-0.9.0 -y
sudo yum install python-keystoneclient-doc.noarch-0.9.0 -y
sudo yum install python-six.noarch-1.6.1 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2014/RHSA-2014:0382.sh
|
Shell
|
mit
| 931 |
#!/bin/csh -f
#
# svn $Id: job_r4dvar_sen.sh 2328 2014-01-23 20:16:18Z arango $
#######################################################################
# Copyright (c) 2002-2014 The ROMS/TOMS Group #
# Licensed under a MIT/X style license #
# See License_ROMS.txt #
#######################################################################
# #
# Strong/Weak constraint R4D-Var observation impact or sensitivity #
# job script: #
# #
# This script NEEDS to be run before any run: #
# #
# (1) It copies a new clean nonlinear model initial conditions #
# file. The nonlinear model is initialized from the #
# background or reference state. #
# (2) It copies representer model initial condition, same as #
# nonlinear model. #
# (3) It copies Lanczos vectors from previous R4D-Var run. They #
# are stored in 4D-Var data assimilation file. #
# (4) It copies the adjoint sensitivy functional file for the #
# observation impact or sensitivity. #
# (5) Specify model, initial conditions, boundary conditions, and #
# surface forcing error convariance input standard deviations #
# files. #
# (6) Specify model, initial conditions, boundary conditions, and #
# surface forcing error convariance input/output normalization #
# factors files. #
# (7) Copy a clean copy of the observations NetCDF file. #
# (8) Create 4D-Var input script "r4dvar.in" from template and #
# specify the error covariance standard deviation, error #
# covariance normalization factors, and observation files to #
# be used. #
# #
#######################################################################
# Set path definition to one directory up in the tree.
set Dir=`dirname ${PWD}`
# Set string manipulations perl script.
set SUBSTITUTE=${ROMS_ROOT}/ROMS/Bin/substitute
# Copy nonlinear model initial conditions file, use background or
# first guess state.
cp -p ${Dir}/Data/wc13_ini.nc wc13_ini.nc
# Copy representer model initial conditions file, same as nonlinear
# model.
cp -p ${Dir}/Data/wc13_ini.nc wc13_irp.nc
# Copy Lanczos vectors from previous R4D-Var run. They are stored
# in 4D-Var data assimilation file.
cp -p ${Dir}/R4DVAR/wc13_mod.nc wc13_lcz.nc
# Copy adjoint sensitivity functional.
cp -p ${Dir}/Data/wc13_ads.nc wc13_ads.nc
# Set model, initial conditions, boundary conditions and surface
# forcing error covariance standard deviations files.
set STDnameM=${Dir}/Data/wc13_std_m.nc
set STDnameI=${Dir}/Data/wc13_std_i.nc
set STDnameB=${Dir}/Data/wc13_std_b.nc
set STDnameF=${Dir}/Data/wc13_std_f.nc
# Set model, initial conditions, boundary conditions and surface
# forcing error covariance normalization factors files.
set NRMnameM=${Dir}/Data/wc13_nrm_m.nc
set NRMnameI=${Dir}/Data/wc13_nrm_i.nc
set NRMnameB=${Dir}/Data/wc13_nrm_b.nc
set NRMnameF=${Dir}/Data/wc13_nrm_f.nc
# Set observations file.
set OBSname=wc13_obs.nc
# Get a clean copy of the observation file. This is really
# important since this file is modified.
cp -p ${Dir}/Data/${OBSname} .
# Modify 4D-Var template input script and specify above files.
set R4DVAR=r4dvar.in
if (-e $R4DVAR) then
/bin/rm $R4DVAR
endif
cp s4dvar.in $R4DVAR
$SUBSTITUTE $R4DVAR ocean_std_m.nc $STDnameM
$SUBSTITUTE $R4DVAR ocean_std_i.nc $STDnameI
$SUBSTITUTE $R4DVAR ocean_std_b.nc $STDnameB
$SUBSTITUTE $R4DVAR ocean_std_f.nc $STDnameF
$SUBSTITUTE $R4DVAR ocean_nrm_m.nc $NRMnameM
$SUBSTITUTE $R4DVAR ocean_nrm_i.nc $NRMnameI
$SUBSTITUTE $R4DVAR ocean_nrm_b.nc $NRMnameB
$SUBSTITUTE $R4DVAR ocean_nrm_f.nc $NRMnameF
$SUBSTITUTE $R4DVAR ocean_obs.nc $OBSname
$SUBSTITUTE $R4DVAR ocean_hss.nc wc13_hss.nc
$SUBSTITUTE $R4DVAR ocean_lcz.nc wc13_lcz.nc
$SUBSTITUTE $R4DVAR ocean_mod.nc wc13_mod.nc
$SUBSTITUTE $R4DVAR ocean_err.nc wc13_err.nc
|
mcflugen/roms-lite
|
src/roms/ROMS/Bin/job_r4dvar_sen.sh
|
Shell
|
mit
| 4,621 |
#!/bin/bash
GRAV_INSTANCE_NAME=${1:-"default"}
ENV_FILE=./config/$GRAV_INSTANCE_NAME.env
if [[ ! -f "$ENV_FILE" ]]; then
echo "Environment file $ENV_FILE not found"
echo "Please provide a valid env file name"
echo "For instance : "
echo " install.sh grav : will use the file config/grav.env"
echo " install.sh : will use the file config/default.env"
exit 0
fi
source $ENV_FILE
LOCAL_USER_ID="$(id -u $USER)"
GIT_USER=${GIT_USER:-$USER}
GIT_MAIL=${GIT_MAIL:-$USER'@example.com'}
VIRTUAL_HOST=${VIRTUAL_HOST:-"$GRAV_INSTANCE_NAME".localhost}
GRAV_SYSTEM_REPOSITORY=${GRAV_SYSTEM_REPOSITORY:-$GRAV_GIT}
# This script might not work well on OSX : (just replace the command by a random string)
#GIT_PULL_DIRECTORY_NAME=${GIT_PULL_DIRECTORY_NAME:-"$(cat /dev/urandom | env LC_CTYPE=C tr -cd 'a-f0-9' | head -c 32)"}
GIT_PULL_SCRIPT_NAME=${GIT_PULL_DIRECTORY_NAME:-"${DOCKERNAME}"'_pull'}
function summary {
echo '----------- SUMMARY -----------------------'
echo 'CONFIGURATION FILE = '$ENV_FILE
echo 'Grav instance name is '$GRAV_INSTANCE_NAME
echo "LOCAL_USER_ID is " $LOCAL_USER_ID
#If GRAV_GIT IS NOT SET then
if [ -z "$GRAV_GIT" ]; then
# Print from GRAV_ZIP
echo "Grav system is extrated from : " $GRAV_ZIP
else
# Otherwise print GRAV_GIT
echo "Grav system is cloned from : " $GRAV_GIT
fi
echo "VIRTUAL_HOST are : " $VIRTUAL_HOST
echo "Grav System will be commited in : " $GRAV_SYSTEM_REPOSITORY
echo "Grav accounts shared directory : " /www/.shared/$SHARED_ACCOUNTS_GROUP
echo "EXPOSED HTTP PORT: " $HTTP_PORT
echo "GIT USER NAME : "$GIT_USER
echo "GIT USER MAIL : "$GIT_MAIL
echo "PULL ADDRESS : http://<vhost>.<hostname>/git/${GIT_PULL_DIRECTORY_NAME}/pull.php"
if [[ ! -z "$HTPASSWD_NAME" ]]; then
echo "htpasswd file : "./www/.htpasswd/$HTPASSWD_NAME
fi
if [[ ! -z "$LETSENCRYPT_HOST" ]]; then
echo "LETSENCRYPT_HOST=${LETSENCRYPT_HOST}"
fi;
if [[ ! -z "$LETSENCRYPT_EMAIL" ]]; then
echo "LETSENCRYPT_EMAIL=${LETSENCRYPT_EMAIL}"
fi;
echo '-------------------------------------------'
}
echo
echo
echo "This script is meant to be executed as a normal user (non root)"
echo
echo "It will configure LOCAL_USER_ID to your USER ID (current user has $(id -u $USER)) in docker-compose.yml"
echo
summary;
DOCKERNAME="$GRAV_INSTANCE_NAME"
NGINXNAME="$DOCKERNAME"_web_1
mkdir ./www/$DOCKERNAME 2> /dev/null
read -p "Is this configuration OK ? Press a key to continue or CTRL+C to abort ..."
export HTTP_PORT;
export VIRTUAL_HOST;
export LOCAL_USER_ID;
cat docker-compose.yml.tpl > ./cache/$DOCKERNAME.yml
if [[ ! -z "$HTTP_PORT" ]] || [[ ! -z "$HTTPS_PORT" ]] ; then
cat ./cache/$DOCKERNAME.yml |sed -e "s|#ports:|ports:|" > /tmp/$DOCKERNAME.yml
cat /tmp/$DOCKERNAME.yml > ./cache/$DOCKERNAME.yml
fi
if [[ ! -z "$HTTP_PORT" ]]; then
cat ./cache/$DOCKERNAME.yml |sed -e "s|#HTTP_PORT#|- ${HTTP_PORT}:80|" > /tmp/$DOCKERNAME.yml
cat /tmp/$DOCKERNAME.yml > ./cache/$DOCKERNAME.yml
fi;
if [[ ! -z "$HTTPS_PORT" ]]; then
cat ./cache/$DOCKERNAME.yml |sed -e "s|#HTTP_PORT#|- ${HTTPS_PORT}:443|" > /tmp/$DOCKERNAME.yml
cat /tmp/$DOCKERNAME.yml > ./cache/$DOCKERNAME.yml
fi;
if [[ ! -z "$VIRTUAL_HOST" ]]; then
cat ./cache/$DOCKERNAME.yml |sed -e "s|#VIRTUAL_HOST#|- VIRTUAL_HOST=${VIRTUAL_HOST}|" > /tmp/$DOCKERNAME.yml
cat /tmp/$DOCKERNAME.yml > ./cache/$DOCKERNAME.yml
fi;
if [[ ! -z "$LOCAL_USER_ID" ]]; then
cat ./cache/$DOCKERNAME.yml |sed -e "s|#LOCAL_USER_ID#|- LOCAL_USER_ID=${LOCAL_USER_ID}|" > /tmp/$DOCKERNAME.yml
cat /tmp/$DOCKERNAME.yml > ./cache/$DOCKERNAME.yml
fi;
if [[ ! -z "$LETSENCRYPT_HOST" ]]; then
cat ./cache/$DOCKERNAME.yml |sed -e "s|#LETSENCRYPT_HOST#|- LETSENCRYPT_HOST=${LETSENCRYPT_HOST}|" > /tmp/$DOCKERNAME.yml
cat /tmp/$DOCKERNAME.yml > ./cache/$DOCKERNAME.yml
fi;
if [[ ! -z "$LETSENCRYPT_EMAIL" ]]; then
cat ./cache/$DOCKERNAME.yml |sed -e "s|#LETSENCRYPT_EMAIL#|- LETSENCRYPT_EMAIL=${LETSENCRYPT_EMAIL}|" > /tmp/$DOCKERNAME.yml
cat /tmp/$DOCKERNAME.yml > ./cache/$DOCKERNAME.yml
fi;
if [[ ! -z "$HTPASSWD_NAME" ]]; then
touch ./www/.htpasswd/${HTPASSWD_NAME}
cat ./cache/$DOCKERNAME.yml |sed -e "s|#HTPASSWD#|- ./www/.htpasswd/${HTPASSWD_NAME}:/www/.htpasswd|" > /tmp/$DOCKERNAME.yml
cat /tmp/$DOCKERNAME.yml > ./cache/$DOCKERNAME.yml
fi;
cat ./cache/$DOCKERNAME.yml |sed -e "s|#WWW_VOLUME#|- ./www/${DOCKERNAME}:/www/${DOCKERNAME}|" > /tmp/$DOCKERNAME.yml
cat /tmp/$DOCKERNAME.yml > ./cache/$DOCKERNAME.yml
if [ ! -z "$SHARED_ACCOUNTS_GROUP" ]; then
echo "WILL STORE ACCOUNTS in ./www/.shared/$SHARED_ACCOUNTS_GROUP/"
cat ./cache/$DOCKERNAME.yml |sed -e "s|#SHARED_ACCOUNT_VOLUME#|- ./www/.shared/$SHARED_ACCOUNTS_GROUP/:/www/${DOCKERNAME}/user/accounts|" > /tmp/$DOCKERNAME.yml
cat /tmp/$DOCKERNAME.yml > ./cache/$DOCKERNAME.yml
fi
cat ./cache/$DOCKERNAME.yml
sudo docker network create www
cat ./cache/$DOCKERNAME.yml | sudo docker-compose -f - -p $DOCKERNAME down
sudo docker pull nnynn/dkgr-nginx:latest
sudo docker pull nnynn/dkgr-php:latest
cat ./cache/$DOCKERNAME.yml | sudo docker-compose -f - -p $DOCKERNAME up -d
read -p "ready to configure NGINX ... change the default.conf root is to /www/$DOCKERNAME (PRESS A KEY OR CTRL+C TO ABORT)"
sudo docker exec $NGINXNAME /bin/sh -c "(sed -i -e \"s|#DOCKERNAME#|$DOCKERNAME|\" /etc/nginx/conf.d/default.conf)"
if [ ! -z "$HTPASSWD_NAME" ]; then
sudo docker exec $NGINXNAME /bin/sh -c "(sed -i -e \"s|#AUTH_BASIC#|auth_basic|\" /etc/nginx/conf.d/default.conf)"
sudo docker exec $NGINXNAME /bin/sh -c "(cat /etc/nginx/conf.d/default.conf)"
fi;
echo "Will use the following default.conf :"
sudo docker exec $NGINXNAME /bin/sh -c "(cat /etc/nginx/conf.d/default.conf)"
sudo docker exec $NGINXNAME /bin/sh -c "/usr/sbin/nginx -s reload"
echo "Will retrieve grav from given location (PRESS A KEY or CTRL+C)"
# SET GIT USER NAME AND MAIL :
./bin/git $DOCKERNAME config --global user.email "${GIT_MAIL}"
./bin/git $DOCKERNAME config --global user.name "${GIT_USER}"
#if [[ ! "$(ls -A ./www/$DOCKERNAME/bin)" ]]; then
if [[ ! -d "./www/$DOCKERNAME/bin" ]]; then
#If GRAV_GIT IS NOT SET then
if [ -z "$GRAV_GIT" ]; then
# Print from GRAV_ZIP
echo "Extract grav skeleton from : " $GRAV_ZIP
if [[ ! $GRAV_ZIP =~ \.zip$ ]]; then
echo "Invalid zip files";
exit 0;
fi
sudo unzip $GRAV_ZIP -d ./www/$DOCKERNAME
else
# Otherwise print GRAV_GIT
echo "Grav system is cloned from : " $GRAV_GIT
(cd ./www/$DOCKERNAME && git init)
(cd ./www/$DOCKERNAME && git remote add origin $GRAV_GIT)
#./bin/git $DOCKERNAME clone $GRAV_GIT '.'
./bin/git $DOCKERNAME fetch origin
./bin/permissions-fixing "$DOCKERNAME"
./bin/git $DOCKERNAME reset --hard origin/master
./bin/git $DOCKERNAME submodule update --init --recursive
fi
else
echo "THE TARGET DIRECTORY IS NOT EMPTY (SKIPPING DOWNLOAD)"
fi
if [ ! -z "$GRAV_SYSTEM_REPOSITORY" ]; then
echo checks "./www/$DOCKERNAME/.git" exists
if [[ ! -d "./www/$DOCKERNAME/.git" ]]; then
echo "create a git structure"
cp ./nginx/grav_gitignore ./www/$DOCKERNAME/.gitignore
(cd ./www/$DOCKERNAME && git init)
(cd ./www/$DOCKERNAME && git remote add origin $GRAV_SYSTEM_REPOSITORY)
else
echo "change git origin url"
(cd ./www/$DOCKERNAME && git remote set-url origin $GRAV_SYSTEM_REPOSITORY)
fi;
else
echo "no system git repository"
fi
mkdir -p ./www/$DOCKERNAME/logs
mkdir -p ./www/$DOCKERNAME/images
mkdir -p ./www/$DOCKERNAME/assets
mkdir -p ./www/$DOCKERNAME/user/data
### SET GIT PULL SCRIPT WITH THE PROPER NAME INTO THE PULL DIRECTORY OF THE GRAV INSTALL
rm -Rf ./www/${DOCKERNAME}/git/
mkdir ./www/${DOCKERNAME}/git/
cp ./nginx/git/gitignore ./www/${DOCKERNAME}/git/.gitignore
mkdir "./www/${DOCKERNAME}/git/${GIT_PULL_DIRECTORY_NAME}/"
cp -R ./nginx/git/* "./www/${DOCKERNAME}/git/${GIT_PULL_DIRECTORY_NAME}/"
PULL_SCRIPT="/www/${DOCKERNAME}/git/${GIT_PULL_DIRECTORY_NAME}/pull.sh"
sudo docker exec $NGINXNAME /bin/sh -c "(chown www:www ${PULL_SCRIPT} && chmod +x ${PULL_SCRIPT} && ls -l ${PULL_SCRIPT})"
sudo docker exec $NGINXNAME /bin/sh -c "(sed -i -e \"s|#DOCKERNAME#|$DOCKERNAME|\" ./www/${DOCKERNAME}/git/${GIT_PULL_SCRIPT_NAME}/pull.php)"
sudo docker exec $NGINXNAME /bin/sh -c "(sed -i -e \"s|#DOCKERNAME#|$DOCKERNAME|\" ./www/${DOCKERNAME}/git/${GIT_PULL_SCRIPT_NAME}/commit.php)"
echo "pull request can be send to http://host:$HTTP_PORT/git/${GIT_PULL_SCRIPT_NAME}/pull.php"
sudo docker exec $NGINXNAME /bin/sh -c "(ls -l ${PULL_SCRIPT})"
./bin/permissions-fixing "$DOCKERNAME"
sudo docker exec $NGINXNAME /bin/sh -c "(ls -l ${PULL_SCRIPT})"
read -p "grav has been downloaded (press a key) ..."
summary;
echo "Grav is supposed to be accessible on http://localhost:$HTTP_PORT/ (unless you changed the port)"
echo "If this is a new install (grav has been downloaded from official repo), you have to run grav-admin/grav $NGINXNAME install"
|
Ynn/dkgr
|
install.sh
|
Shell
|
mit
| 8,962 |
#!/bin/bash
set -ex
REPOSITORY_NAME="voskobovich/yii2-php"
MAJOR_TAG="7.1.7-fpm-alpine"
MINOR_TAGS="7.1-fpm-alpine 7-fpm-alpine fpm-alpine"
docker build -t "${REPOSITORY_NAME}:${MAJOR_TAG}" .
docker push "${REPOSITORY_NAME}:${MAJOR_TAG}"
# Tag and push image for each additional tag
for TAG in ${MINOR_TAGS}; do
docker tag "${REPOSITORY_NAME}:${MAJOR_TAG}" "${REPOSITORY_NAME}:${TAG}"
docker push "${REPOSITORY_NAME}:${TAG}"
done
|
voskobovich/yii2-docker
|
php/7.1/fpm/alpine/bin/build.sh
|
Shell
|
mit
| 442 |
#!/bin/bash
node ~/scripts/resourceLoading $1
rsync -rtv -e "ssh -l $2 -o StrictHostkeyChecking=no" ~/repos/$1 $3
|
jostylr/write-web
|
old/build/fileuser/rsync-ssh.sh
|
Shell
|
mit
| 116 |
#! /bin/bash
. ./func.sh
echo "Setting up X configuration:"
linkOrWarn x11/Xresources ~/.Xresources;
|
melloc/dotfiles
|
x11/init.sh
|
Shell
|
mit
| 104 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2302-1
#
# Security announcement date: 2014-07-30 00:00:00 UTC
# Script generation date: 2017-02-02 21:02:31 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: i386
#
# Vulnerable packages fix on version:
# - libtomcat6-java:6.0.35-1ubuntu3.5
#
# Last versions recommanded by security team:
# - libtomcat6-java:6.0.35-1ubuntu3.10
#
# CVE List:
# - CVE-2014-0075
# - CVE-2014-0096
# - CVE-2014-0099
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libtomcat6-java=6.0.35-1ubuntu3.10 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/i386/2014/USN-2302-1.sh
|
Shell
|
mit
| 686 |
GIT=https://github.com/jackpot51/pixelcannon.git
BINDIR=/ui/bin
DEPENDS="orbital"
function recipe_stage {
mkdir -pv "$1/apps/pixelcannon"
cp -Rv assets "$1/apps/pixelcannon"
mkdir -pv "$1/ui/apps"
cp -v manifest "$1/ui/apps/pixelcannon"
}
|
redox-os/cookbook
|
recipes/pixelcannon/recipe.sh
|
Shell
|
mit
| 256 |
#!/bin/bash -e
function clean_up {
rm -f update-modules.lock
exit
}
if [ "$(id -u)" != "52" ]; then
echo "This script must be run as user Puppet" 1>&2
exit 1
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
pushd ${DIR} > /dev/null
cd <%= @root_directory %>/puppet/environments
lockfile -r0 update-modules.lock
trap clean_up EXIT
npm install
npm update
CI=true #https://bower.io/docs/api/#running-on-a-continuous-integration-server
node_modules/bower/bin/bower prune
node_modules/bower/bin/bower install --force-latest
node_modules/bower/bin/bower update --force-latest
clean_up
popd > /dev/null
|
TomiTakussaari/bower_puppet_server
|
templates/opt/puppet/scripts/update-modules.sh
|
Shell
|
mit
| 628 |
#!/bin/bash
printf "\nCalling service to list all notifications...\n\n"
printf "Reponse: \n\n"
curl http://localhost:4567/notifications/find
printf "\n\n"
printf "\nCalling service to create a new notification...\n\n"
printf "Reponse: \n\n"
curl -H "Content-Type: application/json" -d '{"title":"blah","message":"blah","image_url":"http://coolurl.com/test.jpg","email_address":"[email protected]"}' http://localhost:4567/notification/create
printf "\n\n"
printf "\nCalling service to verify creation...\n\n"
printf "Reponse: \n\n"
curl http://localhost:4567/notifications/find
printf "\n\n"
printf "\nCalling service to create a with a bad notification...\n\n"
printf "Reponse: \n\n"
curl -H "Content-Type: application/json" -d '{"title":"blah","message":"blah","email_address":"[email protected]"}' http://localhost:4567/notification/create
printf "\n\n"
printf "\nCalling service to verify it did not create notification...\n\n"
printf "Reponse: \n\n"
curl http://localhost:4567/notifications/find
printf "\n\n"
|
hasbrettn/flaming-cyril
|
notifications/test_web.sh
|
Shell
|
mit
| 1,026 |
#!/usr/bin/env bash
# Ask for the administrator password upfront.
sudo -v
# Keep-alive: update existing `sudo` time stamp until the script has finished.
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Install RVM stable with ruby and rails
gpg --keyserver hkp://pool.sks-keyservers.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
curl -sSL https://get.rvm.io | bash -s stable --rails
|
neutraltone/dotfiles
|
ruby.sh
|
Shell
|
mit
| 471 |
#!/bin/bash
set -e
RESULTS_FOLDER="results"
#Work in temporal directory
if [ -a $RESULTS_FOLDER ]; then
echo "cannot create directory named \""$RESULTS_FOLDER"\": file already exists"
exit 1
fi
mkdir $RESULTS_FOLDER
cd $RESULTS_FOLDER
#Load image for this project
wget -O - guillep.github.io/files/get/OzVmLatest | bash
wget -O - get.pharo.org/30 | bash
wget http://files.pharo.org/sources/PharoV30.sources
./oz Pharo.image save PharoCandleBootstrap --delete-old
#Load stable version of the monticello configuration, according to this git sources
REPO=http://smalltalkhub.com/mc/Guille/Seed/main
./oz PharoCandleBootstrap.image config $REPO ConfigurationOfHazelnut --install=bleedingEdge
echo "Configuration Loaded. running tests"
./oz PharoCandleBootstrap.image test --junit-xml-output "Seed.*"
echo "Script created and loaded. Finished! :D"
|
guillep/PharoCandle
|
build/runtests.sh
|
Shell
|
mit
| 856 |
echo "Converting the municipio centroids to GeoJSON"
if [ -f municipios-centroids.json ]
then
rm municipios-centroids.json
fi
ogr2ogr -f "GeoJSON" municipios-centroids.json municipios-centroids.vrt
|
diegovalle/new.crimenmexico
|
R/interactive-map/convert.sh
|
Shell
|
mit
| 202 |
#!/bin/sh
set -e
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
realpath() {
DIRECTORY="$(cd "${1%/*}" && pwd)"
FILENAME="${1##*/}"
echo "$DIRECTORY/$FILENAME"
}
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1"`.mom\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm\""
xcrun mapc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE=$(realpath "${PODS_ROOT}/$1")
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
/*)
echo "$1"
echo "$1" >> "$RESOURCES_TO_COPY"
;;
*)
echo "${PODS_ROOT}/$1"
echo "${PODS_ROOT}/$1" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "MJRefresh/MJRefresh/MJRefresh.bundle"
install_resource "SVProgressHUD-0.8.1/SVProgressHUD/SVProgressHUD.bundle"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "MJRefresh/MJRefresh/MJRefresh.bundle"
install_resource "SVProgressHUD-0.8.1/SVProgressHUD/SVProgressHUD.bundle"
fi
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]] && [[ "${SKIP_INSTALL}" == "NO" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "$XCASSET_FILES" ]
then
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "`realpath $PODS_ROOT`*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${IPHONEOS_DEPLOYMENT_TARGET}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
zzBelieve/BaiSi
|
百思不得姐/Pods/Target Support Files/Pods/Pods-resources.sh
|
Shell
|
mit
| 4,984 |
#!/bin/bash
# http://ltp.sourceforge.net/coverage/lcov/readme.php
# paragraph 4
mkdir -p test_results/
lcov -d . -c -o test_results/profile_test.info -q --no-external
lcov -d . --zerocounters
genhtml -o test_results/ test_results/profile_test.info -q
see test_results/index.html
|
Kamilcuk/Anthill
|
generateTestReport.sh
|
Shell
|
mit
| 282 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2015:0253
#
# Security announcement date: 2015-02-23 19:49:37 UTC
# Script generation date: 2017-01-01 21:15:56 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - samba3x-debuginfo.i386:3.5.4-0.70.el5_6.4
# - samba3x-winbind.i386:3.5.4-0.70.el5_6.4
# - samba3x-winbind-devel.i386:3.5.4-0.70.el5_6.4
# - samba3x.x86_64:3.5.4-0.70.el5_6.4
# - samba3x-client.x86_64:3.5.4-0.70.el5_6.4
# - samba3x-common.x86_64:3.5.4-0.70.el5_6.4
# - samba3x-debuginfo.x86_64:3.5.4-0.70.el5_6.4
# - samba3x-doc.x86_64:3.5.4-0.70.el5_6.4
# - samba3x-domainjoin-gui.x86_64:3.5.4-0.70.el5_6.4
# - samba3x-swat.x86_64:3.5.4-0.70.el5_6.4
# - samba3x-winbind.x86_64:3.5.4-0.70.el5_6.4
# - samba3x-winbind-devel.x86_64:3.5.4-0.70.el5_6.4
# - samba3x-debuginfo.i386:3.6.6-0.131.el5_9
# - samba3x-winbind.i386:3.6.6-0.131.el5_9
# - samba3x-winbind-devel.i386:3.6.6-0.131.el5_9
# - samba3x.x86_64:3.6.6-0.131.el5_9
# - samba3x-client.x86_64:3.6.6-0.131.el5_9
# - samba3x-common.x86_64:3.6.6-0.131.el5_9
# - samba3x-debuginfo.x86_64:3.6.6-0.131.el5_9
# - samba3x-doc.x86_64:3.6.6-0.131.el5_9
# - samba3x-domainjoin-gui.x86_64:3.6.6-0.131.el5_9
# - samba3x-swat.x86_64:3.6.6-0.131.el5_9
# - samba3x-winbind.x86_64:3.6.6-0.131.el5_9
# - samba3x-winbind-devel.x86_64:3.6.6-0.131.el5_9
#
# Last versions recommanded by security team:
# - samba3x-debuginfo.i386:3.6.23-12.el5_9
# - samba3x-winbind.i386:3.6.23-12.el5_9
# - samba3x-winbind-devel.i386:3.6.23-12.el5_9
# - samba3x.x86_64:3.6.23-12.el5_9
# - samba3x-client.x86_64:3.6.23-12.el5_9
# - samba3x-common.x86_64:3.6.23-12.el5_9
# - samba3x-debuginfo.x86_64:3.6.23-12.el5_9
# - samba3x-doc.x86_64:3.6.23-12.el5_9
# - samba3x-domainjoin-gui.x86_64:3.6.23-12.el5_9
# - samba3x-swat.x86_64:3.6.23-12.el5_9
# - samba3x-winbind.x86_64:3.6.23-12.el5_9
# - samba3x-winbind-devel.x86_64:3.6.23-12.el5_9
# - samba3x-debuginfo.i386:3.6.23-12.el5_9
# - samba3x-winbind.i386:3.6.23-12.el5_9
# - samba3x-winbind-devel.i386:3.6.23-12.el5_9
# - samba3x.x86_64:3.6.23-12.el5_9
# - samba3x-client.x86_64:3.6.23-12.el5_9
# - samba3x-common.x86_64:3.6.23-12.el5_9
# - samba3x-debuginfo.x86_64:3.6.23-12.el5_9
# - samba3x-doc.x86_64:3.6.23-12.el5_9
# - samba3x-domainjoin-gui.x86_64:3.6.23-12.el5_9
# - samba3x-swat.x86_64:3.6.23-12.el5_9
# - samba3x-winbind.x86_64:3.6.23-12.el5_9
# - samba3x-winbind-devel.x86_64:3.6.23-12.el5_9
#
# CVE List:
# - CVE-2015-0240
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install samba3x-debuginfo.i386-3.6.23 -y
sudo yum install samba3x-winbind.i386-3.6.23 -y
sudo yum install samba3x-winbind-devel.i386-3.6.23 -y
sudo yum install samba3x.x86_64-3.6.23 -y
sudo yum install samba3x-client.x86_64-3.6.23 -y
sudo yum install samba3x-common.x86_64-3.6.23 -y
sudo yum install samba3x-debuginfo.x86_64-3.6.23 -y
sudo yum install samba3x-doc.x86_64-3.6.23 -y
sudo yum install samba3x-domainjoin-gui.x86_64-3.6.23 -y
sudo yum install samba3x-swat.x86_64-3.6.23 -y
sudo yum install samba3x-winbind.x86_64-3.6.23 -y
sudo yum install samba3x-winbind-devel.x86_64-3.6.23 -y
sudo yum install samba3x-debuginfo.i386-3.6.23 -y
sudo yum install samba3x-winbind.i386-3.6.23 -y
sudo yum install samba3x-winbind-devel.i386-3.6.23 -y
sudo yum install samba3x.x86_64-3.6.23 -y
sudo yum install samba3x-client.x86_64-3.6.23 -y
sudo yum install samba3x-common.x86_64-3.6.23 -y
sudo yum install samba3x-debuginfo.x86_64-3.6.23 -y
sudo yum install samba3x-doc.x86_64-3.6.23 -y
sudo yum install samba3x-domainjoin-gui.x86_64-3.6.23 -y
sudo yum install samba3x-swat.x86_64-3.6.23 -y
sudo yum install samba3x-winbind.x86_64-3.6.23 -y
sudo yum install samba3x-winbind-devel.x86_64-3.6.23 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/x86_64/2015/RHSA-2015:0253.sh
|
Shell
|
mit
| 3,934 |
cd "$(dirname $0)"
# Download source distribution
curl -L http://www.winimage.com/zLibDll/unzip101h.zip | tar zx
# Generate Lua project
cd ../../
python yap.py --source examples/unzip-1.0.1/ --output examples/projects/unzip.mac --name unzip --platform MacOS
|
dmsovetov/pygling
|
examples/unzip-1.0.1/osx.command
|
Shell
|
mit
| 259 |
#!/usr/bin/env bash
vagrant plugin install vagrant-reload
printf "\033[0;33mDone.${NC}\n"
|
ytake/gardening-builder
|
setup.sh
|
Shell
|
mit
| 92 |
#!/bin/bash
cp ./x-runner.sh $GOPATH/bin
cp ./x-monitor.sh $GOPATH/bin
cp ./day_watch.sh.desktop $HOME/.config/autostart
cp ./busy_beaver.png $HOME/Documents
exec x-runner.sh
|
n4lik/day_watch
|
install.sh
|
Shell
|
mit
| 177 |
#!/bin/bash
node=${1}
if [ -z ${node} ]; then
node="er-dev"
fi
erl -sname ${node} -pa deps/*/ebin ebin -s er
|
maximvl/er
|
start-dev.sh
|
Shell
|
mit
| 115 |
#!/bin/bash
if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then
echo "Usage: $0 <osx64 url> <win32 url> <win64 url>"
exit 1
fi
OSX64_URL="$1"
WIN32_URL="$2"
WIN64_URL="$3"
if [[ "$(uname)" == "Darwin" ]]; then
# First, clean everything out
rm -rf juno*
hdiutil detach /Volumes/Juno 2>/dev/null
# Next, download OSX .dmg
FILENAME=$(basename "$OSX64_URL")
curl -L "$OSX64_URL" > $FILENAME
# Mount it as read-write
echo "Mounting $FILENAME..."
hdiutil attach $FILENAME -shadow
# Go in and sign it!
echo "Signing Juno.app..."
~/unlock_keychain.sh
codesign -f -s "AFB379C0B4CBD9DB9A762797FC2AB5460A2B0DBE" --deep /Volumes/Juno/Juno.app
if [[ $? != 0 ]]; then
exit $?
fi
# Umount, create new .dmg
echo "Unmounting /Volumes/Juno..."
hdiutil detach /Volumes/Juno
echo "Creating ${FILENAME%.*}-signed.dmg..."
hdiutil convert $FILENAME -format UDZO -o ${FILENAME%.*}-signed.dmg -shadow -imagekey zlib-level=9
else
if [[ "$(hostname)" == "win81x86" ]]; then
WIN_URL="$WIN32_URL"
FOLDER_NAME="juno-windows32"
else
WIN_URL="$WIN64_URL"
FOLDER_NAME="juno-windows64"
fi
# First, clean everything out
rm -rf juno* $FOLDER_NAME
# Next, download windows .zip
FILENAME=$(basename "$WIN_URL")
curl -L "$WIN_URL" > $FILENAME
# Next, unzip it into the current directory
echo "Unzipping $FILENAME..."
powershell -nologo -noprofile -command "& { Add-Type -A 'System.IO.Compression.FileSystem'; [IO.Compression.ZipFile]::ExtractToDirectory('$FILENAME', '.'); }"
# Go into the windows directory (we assume that always exists), sign juno.exe
echo "Signing juno.exe..."
~/sign.sh $FOLDER_NAME/juno.exe
if [[ $? != 0 ]]; then
exit $?
fi
# Zip it up
echo "Zipping into ${FILENAME%.*}-signed.zip..."
powershell -nologo -noprofile -command "& { Add-Type -A 'System.IO.Compression.FileSystem'; [IO.Compression.ZipFile]::CreateFromDirectory('$FOLDER_NAME', '${FILENAME%.*}-signed.zip'); }"
fi
|
yuyichao/julia-buildbot
|
commands/sign_juno.sh
|
Shell
|
mit
| 1,924 |
#!/bin/bash
###############################################################################
# This checks that all variables that need to be set by the user actually are
###############################################################################
function check_user_vars {
# the user file is called user.sh
# this is gitignored as its to be configured for each user
if [ ! -e ${SCRIPT_PATH}/user/user.sh ] ; then
echo "user.sh needs to be created"
return -1
fi
source ${SCRIPT_PATH}/user/user.sh
if [ -z ${MANIFEST_GIT} ] ; then
echo "MANIFEST_GIT should be set to the location of the git repository to clone containing the manifest files"
return -1
fi
if [ -z ${MANIFEST_DIR} ] ; then
echo "MANIFEST_DIR should be set to the basename of the dir under ${PROJECT_ROOT} that will hold the manifest files"
return -1
fi
return 0
}
|
DeonPoncini/scripts
|
user/user-check.sh
|
Shell
|
mit
| 923 |
#!/bin/sh
set -e
if [ ! -d "venv" ]; then
./setup.sh
fi
venv/bin/python manage.py runserver
|
nihn/map-points
|
run.sh
|
Shell
|
mit
| 99 |
#!/bin/bash
set -e
if [ "`basename $PWD`" != "tiny" ]; then
echo 'You must run "$0" from the tiny/ directory.'
exit 1
fi
if [ -z "$SHU_CASE" ]; then
echo '$SHU_CASE not defined'
exit 1
fi
source ../steps/path.sh
_case_dir="test/tmp/case-$SHU_CASE/"
connlm-init --log-file="${_case_dir}/exp/log/complex.init.log" \
"${_case_dir}/exp/rnn/output.clm" \
"conf/rnn/complex.topo" \
"${_case_dir}/exp/rnn/complex.init.clm"
connlm-draw --verbose=true --log-file="${_case_dir}/exp/log/complex.draw.log" \
"mdl,-o:${_case_dir}/exp/rnn/complex.init.clm" \
"test/output/case-$SHU_CASE.rnn.dot"
|
wantee/connLM
|
egs/tiny/test/test_rnn_topo.sh
|
Shell
|
mit
| 658 |
#!/bin/bash -e
export BRANCH=master
export IMAGE_NAME=drydock/u14nod
export RES_DOCKER_CREDS=docker-creds
export RES_REPO=u14nod-repo
export RES_IMAGE=u14nod-img
export UP_IMAGE=u14-img
findUpstreamBaseVersion() {
echo "Find Latest Version for" $UP_IMAGE
export versionName=$(cat ./IN/$UP_IMAGE/version.json | jq -r '.version.versionName')
echo "Completed find Latest Version for" $UP_IMAGE
}
dockerLogin() {
echo "Extracting docker creds"
. ./IN/$RES_DOCKER_CREDS/integration.env
echo "logging into Docker with username" $username
docker login -u $username -p $password -e $email
echo "Completed Docker login"
}
checkIfTagBuild() {
echo "Check Tag Version for" $RES_REPO
export IS_GIT_TAG=$(cat ./IN/$RES_REPO/version.json | jq -r '.version.propertyBag.shaData.isGitTag')
if [ "$IS_GIT_TAG" = true ]; then
echo "This is a TAG build"
export GIT_TAG=$(cat ./IN/$RES_REPO/version.json | jq -r '.version.propertyBag.shaData.gitTagName')
export GIT_TAG_MSG=$(cat ./IN/$RES_REPO/version.json | jq -r '.version.propertyBag.shaData.gitTagMessage')
echo "Tag Name: " $GIT_TAG
echo "Tag Message: " $GIT_TAG_MSG
fi
echo "Completed check for Tag, GIT_TAG: " $GIT_TAG
}
createImage() {
if [ "$IS_GIT_TAG" = true ]; then
echo "Pulling " $IMAGE_NAME:tip
sudo docker pull $IMAGE_NAME:tip
sudo docker tag -f $IMAGE_NAME:tip $IMAGE_NAME:$GIT_TAG
#sudo docker tag -f $IMAGE_NAME:tip $IMAGE_NAME:prod
else
echo "Starting Docker build for" $IMAGE_NAME:tip
cd ./IN/$RES_REPO/gitRepo
sed -i "s/{{%TAG%}}/$versionName/g" Dockerfile
sudo docker build -t=$IMAGE_NAME:tip .
echo "Completed Docker build for" $IMAGE_NAME:$GIT_TAG
fi
}
dockerPush() {
if [ "$IS_GIT_TAG" = true ];
then
echo "Pushing Tag " $IMAGE_NAME:prod
sudo docker push $IMAGE_NAME:$GIT_TAG
#sudo docker push -f $IMAGE_NAME:prod
echo "Completed Pushing Tag" $IMAGE_NAME:prod
else
echo "Pushing Tag " $IMAGE_NAME:tip
sudo docker push $IMAGE_NAME:tip
echo "Completed Pushing Tag" $IMAGE_NAME:tip
fi
}
createOutState() {
# this is to make sure we don't trigger if tag build happens
if [ "$IS_GIT_TAG" = true ]; then
echo "Creating a state file for" $RES_IMAGE
echo versionName=$GIT_TAG > /build/state/$RES_IMAGE.env
cat /build/state/$RES_IMAGE.env
echo "Completed creating a state file for" $RES_IMAGE
fi
}
main() {
findUpstreamBaseVersion
dockerLogin
checkIfTagBuild
createImage
dockerPush
createOutState
}
main
|
dry-dock/u14nod
|
build.sh
|
Shell
|
mit
| 2,519 |
#!/bin/bash
set -ueo pipefail
DIRBASE=$1
DIROUT=$2
SAMPLE=NewFQ_Test_Hsa-51
# Compare with expression outcome file
cmp --silent $DIRBASE/$SAMPLE/expr_out/$SAMPLE.cRPKM $DIROUT/expr_out/$SAMPLE.cRPKM || exit 1
cmp --silent $DIRBASE/$SAMPLE/to_combine/$SAMPLE.MULTI3X $DIROUT/to_combine/$SAMPLE.MULTI3X || exit 1
exit 0
|
vastgroup/vast-tools
|
tests/tests.sh
|
Shell
|
mit
| 322 |
#! /bin/bash
# Act as a wrapper for query-jellyfish-counts.py
#
TOP_DIR=$(pwd)
KMERS=$1
SIMULATIONS=$2
OUTPUT_DIR=$3
JELLYFISH=$4
# Assume SIMULATION_DIR is as follows SIMULATION_DIR/${coverage}/${iteration}/jellyfish-counts.jf
mkdir -p ${OUTPUT_DIR}
for i in `find ${SIMULATIONS} -mindepth 1 -maxdepth 1 ! -path "*.txt"`; do
ITERATION=`basename ${i}`
STATS=${OUTPUT_DIR}/${ITERATION}-kmer-stats.txt
BY_SAMPLE=${OUTPUT_DIR}/${ITERATION}-kmer-by-sample.txt
COUNTS=${OUTPUT_DIR}/${ITERATION}-kmer-counts.txt
${TOP_DIR}/bin/query-jellyfish-counts.py ${KMERS} ${i} ${JELLYFISH} ${STATS} ${BY_SAMPLE} ${COUNTS} 1>> ${OUTPUT_DIR}/stats.log 2>&1
gzip ${BY_SAMPLE}
done
|
Read-Lab-Confederation/nyc-subway-anthrax-study
|
data/05-custom-bcg-assay/bin/query-jellyfish-counts.sh
|
Shell
|
mit
| 688 |
#! /bin/bash
SWEETROOT="/home/hpc/pr63so/di69fol/workspace/sweet/benchmarks_plane/polvani/script_swe_plane_polvani_N_bpolvani_g9.81_h10000_f7.2921e-05_p0_a6371220_u2e-05_U8_rob1_fsph0_tsm_ln_erk_tso4_tsob1_C000.0025_PR0.4_PF0.1_M0200_MPI_space01_time001/../../../"
cd "$SWEETROOT"
scons --mode=release --compiler=gnu --debug-symbols=enable --simd=enable --mic=disable --fortran-source=disable --program-binary-name= --sweet-mpi=disable --threading=omp --rexi-thread-parallel-sum=disable --numa-block-allocator=2 --program=swe_plane --parareal=none --libpfasst=disable --pfasst-cpp=disable --libfft=enable --libsph=disable --mkl=disable --plane-spectral-space=enable --plane-spectral-dealiasing=enable --sphere-spectral-space=disable --sphere-spectral-dealiasing=disable --libxml=disable --compiler-c-exec= --compiler-cpp-exec= --compiler-fortran-exec= --gui=disable --quadmath=enable
|
schreiberx/sweet
|
benchmarks_plane/polvani/compile_mac-login-intel.sh
|
Shell
|
mit
| 888 |
set -x
if [ "$TRAVIS_PHP_VERSION" = 'hhvm' ] || [ "$TRAVIS_PHP_VERSION" = 'hhvm-nightly' ]; then
curl -sS https://getcomposer.org/installer > composer-installer.php
hhvm composer-installer.php
hhvm -v ResourceLimit.SocketDefaultTimeout=30 -v Http.SlowQueryThreshold=30000 composer.phar install
else
composer self-update
composer install
fi
|
adlawson/php-veval
|
.travis.install.sh
|
Shell
|
mit
| 360 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.