code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
SDL="SDL2"
INCLUDE_DIR="/usr/include/${SDL}"
SCRIPT=`realpath -s $0`
DIR=`dirname $SCRIPT`
DIR=`dirname $DIR`
mkdir -p $DIR/include/${SDL}
mkdir -p $DIR/src
rm -rf $DIR/include/${SDL}/*.h
rm -rf $DIR/src/sdlew.c
echo "Generating sdlew headers..."
UNSUPPORTED="SDL_MemoryBarrierRelease SDL_MemoryBarrierAcquire SDL_AtomicCAS SDL_AtomicCASPtr \
SDL_iPhoneSetAnimationCallback SDL_iPhoneSetEventPump SDL_AndroidGetJNIEnv SDL_AndroidGetActivity \
SDL_AndroidGetActivity SDL_AndroidGetInternalStoragePath SDL_AndroidGetExternalStorageState \
SDL_AndroidGetExternalStoragePath SDL_CreateShapedWindow SDL_IsShapedWindow tSDL_SetWindowShape \
SDL_GetShapedWindowMode"
for header in $INCLUDE_DIR/*; do
filename=`basename $header`
cat $header \
| sed -r 's/extern DECLSPEC ((const )?[a-z0-9_]+(\s\*)?)\s?SDLCALL /typedef \1 SDLCALL t/i' \
> $DIR/include/${SDL}/$filename
line_num=`cat $DIR/include/${SDL}/$filename | grep -n "Ends C function" | cut -d : -f 1`
if [ ! -z "$line_num" ]; then
functions=`grep -E 'typedef [A-Za-z0-9_ \*]+ SDLCALL' $DIR/include/${SDL}/$filename \
| sed -r 's/typedef [A-Za-z0-9_ \*]+ SDLCALL t([a-z0-9_]+).*/extern t\1 *\1;/i'`
functions=`echo "${functions}" | sed -e 's/[\/&]/\\\&/g'`
echo "$functions" | while read function; do
if [ -z "$function" ]; then
continue;
fi
func_name=`echo $function | cut -d '*' -f 2 | sed -r 's/;//'`
if [ ! -z "`echo "$UNSUPPORTED" | grep $func_name`" ]; then
continue;
fi
if [ "$func_name" == "SDL_memcpy" ]; then
line_num=`cat $DIR/include/${SDL}/$filename | grep -n "SDL_memcpy4" | cut -d : -f 1`
sed -ri "${line_num}s/(.*)/${function}\n\1/" $DIR/include/${SDL}/$filename
else
sed -ri "${line_num}s/(.*)/${function}\n\1/" $DIR/include/${SDL}/$filename
fi
line_num=`cat $DIR/include/${SDL}/$filename | grep -n "Ends C function" | cut -d : -f 1`
done
line_num=`cat $DIR/include/${SDL}/$filename | grep -n "Ends C function" | cut -d : -f 1`
sed -ri "${line_num}s/(.*)/\n\1/" $DIR/include/${SDL}/$filename
fi
if [ $filename == "SDL_stdinc.h" ]; then
cat $header | grep -E '#if(def)? (defined\()?HAVE_' | sed -r 's/#if(def)? //' | while read check; do
func_names=`cat $DIR/include/${SDL}/$filename \
| grep -A 8 "$check\$" \
| grep -v struct \
| grep 'typedef' \
| sed -r 's/typedef [a-z0-9_ \*]+ SDLCALL ([a-z0-9_]+).*/\1/i'`
full_check=`echo "${check}" | sed -e 's/[\/&]/\\\&/g'`
if [ ! -z "`echo $full_check | grep defined`" ]; then
full_check="#if !($full_check)"
else
full_check="#ifndef $full_check"
fi
for func_name in $func_names; do
line_num=`grep -n "extern ${func_name} \*" $DIR/include/${SDL}/$filename | cut -d : -f 1`
let prev_num=line_num-1
if [ -z "`cat $DIR/include/${SDL}/$filename | head -n $prev_num | tail -n 1 | grep '#if'`" ]; then
sed -ri "${line_num}s/(.*)/$full_check \/* GEN_CHECK_MARKER *\/\n\1\n#endif \/* GEN_CHECK_MARKER *\//" $DIR/include/${SDL}/$filename
fi
done
done
fi
done
cat << EOF > $DIR/include/sdlew.h
/*
* Copyright 2014 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
#ifndef __SDL_EW_H__
#define __SDL_EW_H__
#ifdef __cplusplus
extern "C" {
#endif
enum {
SDLEW_SUCCESS = 0,
SDLEW_ERROR_OPEN_FAILED = -1,
SDLEW_ERROR_ATEXIT_FAILED = -2,
SDLEW_ERROR_VERSION = -3,
};
int sdlewInit(void);
#ifdef __cplusplus
}
#endif
#endif /* __SDL_EW_H__ */
EOF
echo "Generating sdlew sources..."
cat << EOF > $DIR/src/sdlew.c
/*
* Copyright 2014 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
#ifdef _MSC_VER
# define snprintf _snprintf
# define popen _popen
# define pclose _pclose
# define _CRT_SECURE_NO_WARNINGS
#endif
#include "sdlew.h"
#include "${SDL}/SDL.h"
#include "${SDL}/SDL_syswm.h"
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#ifdef _WIN32
# define WIN32_LEAN_AND_MEAN
# define VC_EXTRALEAN
# include <windows.h>
/* Utility macros. */
typedef HMODULE DynamicLibrary;
# define dynamic_library_open(path) LoadLibrary(path)
# define dynamic_library_close(lib) FreeLibrary(lib)
# define dynamic_library_find(lib, symbol) GetProcAddress(lib, symbol)
#else
# include <dlfcn.h>
typedef void* DynamicLibrary;
# define dynamic_library_open(path) dlopen(path, RTLD_NOW)
# define dynamic_library_close(lib) dlclose(lib)
# define dynamic_library_find(lib, symbol) dlsym(lib, symbol)
#endif
#define SDL_LIBRARY_FIND_CHECKED(name) \
name = (t##name *)dynamic_library_find(lib, #name); \
assert(name);
#define SDL_LIBRARY_FIND(name) \
name = (t##name *)dynamic_library_find(lib, #name);
static DynamicLibrary lib;
EOF
content=`grep --no-filename -ER "extern tSDL|GEN_CHECK_MARKER" $DIR/include/${SDL}/`
echo "$content" | sed -r 's/extern t([a-z0-9_]+).*/t\1 *\1;/gi' >> $DIR/src/sdlew.c
cat << EOF >> $DIR/src/sdlew.c
static void sdlewExit(void) {
if(lib != NULL) {
/* Ignore errors. */
dynamic_library_close(lib);
lib = NULL;
}
}
/* Implementation function. */
int sdlewInit(void) {
/* Library paths. */
#ifdef _WIN32
/* Expected in c:/windows/system or similar, no path needed. */
const char *path = "SDL2.dll";
#elif defined(__APPLE__)
/* Default installation path. */
const char *path = "/usr/local/cuda/lib/libSDL2.dylib";
#else
const char *path = "libSDL2.so";
#endif
static int initialized = 0;
static int result = 0;
int error;
if (initialized) {
return result;
}
initialized = 1;
error = atexit(sdlewExit);
if (error) {
result = SDLEW_ERROR_ATEXIT_FAILED;
return result;
}
/* Load library. */
lib = dynamic_library_open(path);
if (lib == NULL) {
result = SDLEW_ERROR_OPEN_FAILED;
return result;
}
EOF
echo "$content" | sed -r 's/extern t([a-z0-9_]+).*/ SDL_LIBRARY_FIND(\1);/gi' >> $DIR/src/sdlew.c
cat << EOF >> $DIR/src/sdlew.c
result = SDLEW_SUCCESS;
return result;
}
EOF
sed -i 's/\s\/\* GEN_CHECK_MARKER \*\///g' $DIR/src/sdlew.c
sed -i 's/\s\/\* GEN_CHECK_MARKER \*\///g' $DIR/include/${SDL}/SDL_stdinc.h
|
pawkoz/dyplom
|
blender/extern/sdlew/auto/sdlew_gen.sh
|
Shell
|
gpl-2.0
| 7,357 |
#!/bin/bash
# uninstall owncloud in reverse order
CURRENT_DIR=$(pwd)
apt-get -y purge owncloud owncloud-server owncloud-config-apache
rm -R /etc/apt/sources.list.d/owncloud.list
ocpath='/var/www/owncloud'
rm -R $ocpath
# uninstall apache
cd ../web-server
/bin/bash uninstall-web_server.sh
cd $CURRENT_DIR
# uninstall mysql-server
cd ../sql-server
/bin/bash uninstall-sql_server.sh
cd $CURRENT_DIR
apt-get -y autoremove
|
edvapp/networkbox
|
owncloud/uninstall-owncloud.sh
|
Shell
|
gpl-2.0
| 427 |
#!/bin/bash
# Determine the Path
function realpath() {
local r=$1; local t=$(readlink $r)
while [ $t ]; do
r=$(cd $(dirname $r) && cd $(dirname $t) && pwd -P)/$(basename $t)
t=$(readlink $r)
done
echo $r
}
MY_DIR=`dirname $(realpath $0)`
BACKUP=$MY_DIR/../backups
CONFIG=$MY_DIR/../config
if [ ! -e $BACKUP ]; then
echo Backup directory not found. Cannot continue.
exit 1
fi
if [ ! -f "$BACKUP/krb5.conf.orig" ]; then
cp /etc/krb5.conf $BACKUP/krb5.conf.orig
fi
#### KERBEROS CONFIGURATIONS
cp -f $CONFIG/krb5.conf /etc/krb5.conf
|
fcaviggia/hardening-script-el6
|
config-scripts/krb5.sh
|
Shell
|
gpl-2.0
| 577 |
#
# Disable haldaemon for all run levels
#
/sbin/chkconfig --level 0123456 haldaemon off
#
# Stop haldaemon if currently running
#
/sbin/service haldaemon stop
|
mpreisler/scap-security-guide-debian
|
scap-security-guide-0.1.21/RHEL/6/input/fixes/bash/service_haldaemon_disabled.sh
|
Shell
|
gpl-2.0
| 161 |
#!/bin/sh
TEST_TYPE=klipstest
TESTNAME=west-esp-short-01
TESTHOST=west
EXITONEMPTY=--exitonempty
ARPREPLY=--arpreply
PUBINPUT=../inputs/01-sunrise-sunset-esp-short.pcap
REFPRIVOUTPUT=spi1-cleartext.txt
REF_CONSOLE_OUTPUT=spi1-console.txt
REF26_CONSOLE_OUTPUT=spi1-console.txt
REF_CONSOLE_FIXUPS="kern-list-fixups.sed nocr.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS script-only.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS klips-spi-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS ipsec-look-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS east-prompt-splitline.pl"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS klips-debug-sanitize.sed"
TCPDUMPFLAGS="-n -E 3des-cbc-hmac96:0x4043434545464649494a4a4c4c4f4f515152525454575758"
INIT_SCRIPT=spi1-in.sh
#NETJIGDEBUG=true
|
ZHAW-INES/rioxo-uClinux-dist
|
openswan/testing/klips/west-esp-short-01/testparams.sh
|
Shell
|
gpl-2.0
| 780 |
#!/bin/bash
# load modules needed by anaconda
# load anaconda-lib for the subsequent scripts in this hook
. /lib/anaconda-lib.sh
ARCH=$(uname -m)
KERNEL=$(uname -r)
MODULE_LIST="cramfs squashfs iscsi_tcp "
# if no file matches the glob expand it to the empty string
# we need this when any ko file cannot be found
shopt -s nullglob
SCSI_MODULES=/lib/modules/$KERNEL/kernel/drivers/scsi/device_handler/
for m in $SCSI_MODULES/*.ko; do
# Shell spew to work around not having basename
# Trim the paths off the prefix, then the . suffix
a="${m##*/}"
MODULE_LIST+=" ${a%.*}"
done
shopt -u nullglob
if [ "$ARCH" != "s390" -a "$ARCH" != "s390x" ]; then
MODULE_LIST+=" floppy edd iscsi_ibft "
else
MODULE_LIST+=" hmcdrv "
fi
if [ "$ARCH" = "ppc" ]; then
MODULE_LIST+=" spufs "
fi
MODULE_LIST+=" raid0 raid1 raid5 raid6 raid456 raid10 linear dm-mod dm-zero \
dm-mirror dm-snapshot dm-multipath dm-round-robin dm-crypt cbc \
sha256 lrw xts "
for m in $MODULE_LIST; do
if modprobe $m ; then
debug_msg "$m was loaded"
else
debug_msg "$m was NOT loaded"
fi
done
|
vathpela/anaconda
|
dracut/anaconda-modprobe.sh
|
Shell
|
gpl-2.0
| 1,144 |
wget http://ftp.drupal.org/files/projects/drupal-7.0.tar.gz
tar -zxvf drupal-7.0.tar.gz
chmod 777 -R /var/www
mkdir /var/www/drupal1
mv drupal-7.0/* drupal-7.0/.htaccess /var/www/drupal1/
mkdir /var/www/drupal1/sites/default/files
chown www-data:www-data /var/www/drupal1/sites/default/files/
cp /var/www/drupal1/sites/default/default.settings.php /var/www/drupal1/sites/default/settings.php
chmod 777 -R /var/www/drupal1/sites/default/settings.php
|
dkgndec/snehi
|
sites/default/files/drupal.sh
|
Shell
|
gpl-2.0
| 451 |
#!/bin/sh
function start_resource_frontend(){
unique_resource_name="$1"
# Uncomment to debug
# DEBUG_PREFIX="echo "
cmd="${DEBUG_PREFIX}curl"
# Specify password without making it visible in process
# list (e.g. 'ps awwx')
$cmd \
--insecure \
--cert $certfile \
--key $key \
--pass `awk '/pass/ {print $2}' $MiGuserconf` \
--url "$migserver/cgi-bin/startfe.py?unique_resource_name=$unique_resource_name"
}
function usage(){
echo "Usage..."
echo "start_resource_frontend.sh unique_resource_name"
echo "Example: start_resource_frontend.sh dido.imada.sdu.dk.0"
}
########
# Main #
########
MiGuserconf=~/.MiG/MiGuser.conf
if [ ! -r $MiGuserconf ]; then
echo "start_resource_frontend.sh requires a readable configuration in $MiGuserconf"
usage
exit 1
fi
migserver=`awk '/migserver/ {print $2}' $MiGuserconf`
certfile=`awk '/certfile/ {print $2}' $MiGuserconf`
key=`awk '/key/ {print $2}' $MiGuserconf`
if [ $# -eq 1 ]; then
start_resource_frontend $1
else
usage
exit 1
fi
|
heromod/migrid
|
mig/resource/start_resource_frontend.sh
|
Shell
|
gpl-2.0
| 1,059 |
#!/bin/bash
#PBS -l walltime=4:00:00
#PBS -l nodes=1:ppn=2
#PBS -l vmem=32G
#PBS -N WAF_8_1_1_250_100_0_0_69_82
cd /zhome/fc/e/102910/maritime-vrp/build
LD_LIBRARY_PATH=/zhome/fc/e/102910/gcc/lib64 ./maritime_vrp ../data/old_thesis_data/program_params.json ../data/new/WAF_8_1_1_250_100_0_0_69_82.json
|
OR-Bologna/maritime-vrp
|
opt/launchers/WAF_8_1_1_250_100_0_0_69_82.sh
|
Shell
|
gpl-3.0
| 302 |
#!/usr/bin/env bash
echo filename is: $1
echo
echo First $2 lines of file $1 are:
head -n $2 $1
|
sanger-pathogens/pathogen-informatics-training
|
Notebooks/Unix/bash_scripts/scripts/options_example.sh
|
Shell
|
gpl-3.0
| 98 |
#!/bin/bash
target=~/.bashrc
grep ":./bin:" $target >>/dev/null
result=$?
if [[ result -ne 0 ]];then
cat <<EOT >>$target
if [[ ":\$PATH:" != *":./bin:"* ]]; then
export PATH="\${PATH}:./bin"
fi
EOT
fi
grep ":scripts/designer/bin:" $target | grep PATH >>/dev/null
result=$?
if [[ result -ne 0 ]];then
here=`realpath ../`
cat <<EOT >>$target
if [[ ":\$PATH:" != *":scripts/designer/bin:"* ]]; then
export PATH="\${PATH}:$here/scripts/designer/bin"
export LABTAINER_DIR=$here
fi
EOT
fi
|
cliffe/SecGen
|
modules/utilities/unix/labtainers/files/Labtainers-master/setup_scripts/fix-bashrc.sh
|
Shell
|
gpl-3.0
| 527 |
#!/bin/bash
nosetests
exit
|
philharmonic/philharmonic
|
test.sh
|
Shell
|
gpl-3.0
| 29 |
#!/bin/sh
echo "Existing package script"
|
CZ-NIC/turris-updater
|
tests/system/list-dir.sys_update/output/bin/existing.sh
|
Shell
|
gpl-3.0
| 41 |
#!/bin/sh
PRO_FILE="translations.pro"
echo "TEMPLATE = app" > $PRO_FILE
echo "TRANSLATIONS += exaro_ro_RO.ts exaro_ru_RU.ts exaro_ru_UA.ts exaro_it_IT.ts exaro_ar_DZ.ts exaro_fr_FR.ts" >> $PRO_FILE
for x in `find ../ -name *.cpp|grep -v "moc_"|grep -v "qrc_"`; do
echo "SOURCES += $x" >> $PRO_FILE
done;
#echo "\n\n" >> $PRO_FILE
for x in `find ../ -name *.ui`; do
echo "FORMS += $x" >> $PRO_FILE
done;
lupdate -no-obsolete $PRO_FILE
rm $PRO_FILE
|
shujaatak/exaro
|
translations/prepare.sh
|
Shell
|
gpl-3.0
| 463 |
#!/bin/sh
Select_eth_device ()
{
# Boot type in initramfs's config
bootconf=$(egrep '^BOOT=' /conf/initramfs.conf | tail -1)
# can be superseded by command line (used by Debian-Live's netboot for example)
for ARGUMENT in ${LIVE_BOOT_CMDLINE}
do
case "${ARGUMENT}" in
netboot=*)
NETBOOT="${ARGUMENT#netboot=}"
;;
esac
done
if [ "$bootconf" != "BOOT=nfs" ] && [ -z "$NETBOOT" ] && [ -z "$FETCH" ] && [ -z "$FTPFS" ] && [ -z "$HTTPFS" ]
then
# Not a net boot : nothing to do
return
fi
# we want to do some basic IP
modprobe -q af_packet
# Available Ethernet interfaces ?
l_interfaces=""
# See if we can derive the boot device
Device_from_bootif
if [ -z "$DEVICE" ]
then
echo "Waiting for ethernet card(s) up... If this fails, maybe the ethernet card is not supported by the kernel `uname -r`?"
while [ -z "$l_interfaces" ]
do
l_interfaces="$(cd /sys/class/net/ && ls -d eth* 2>/dev/null)"
done
if [ $(echo $l_interfaces | wc -w) -lt 2 ]
then
# only one interface : no choice
echo "DEVICE=$l_interfaces" >> /conf/param.conf
return
fi
# If user force to use specific device, write it
for ARGUMENT in ${LIVE_BOOT_CMDLINE}
do
case "${ARGUMENT}" in
live-netdev=*)
NETDEV="${ARGUMENT#live-netdev=}"
echo "DEVICE=$NETDEV" >> /conf/param.conf
echo "Found live-netdev parameter, forcing to to use network device $NETDEV."
return
;;
esac
done
else
l_interfaces="$DEVICE"
fi
found_eth_dev=""
while true
do
echo -n "Looking for a connected Ethernet interface ..."
for interface in $l_interfaces
do
# ATTR{carrier} is not set if this is not done
echo -n " $interface ?"
ipconfig -c none -d $interface -t 1 >/dev/null 2>&1
done
echo ''
for step in 1 2 3 4 5
do
for interface in $l_interfaces
do
carrier=$(cat /sys/class/net/$interface/carrier \
2>/dev/null)
# link detected
case "${carrier}" in
1)
echo "Connected $interface found"
# inform initrd's init script :
found_eth_dev="$found_eth_dev $interface"
;;
esac
done
if [ -n "$found_eth_dev" ]
then
echo "DEVICE='$found_eth_dev'" >> /conf/param.conf
return
else
# wait a bit
sleep 1
fi
done
done
}
|
ruibarreira/linuxtrail
|
lib/live/boot/9990-select-eth-device.sh
|
Shell
|
gpl-3.0
| 2,267 |
#!/usr/bin/env bash
#
# A library to simplify using the SBT launcher from other packages.
# Note: This should be used by tools like giter8/conscript etc.
# TODO - Should we merge the main SBT script with this library?
if test -z "$HOME"; then
declare -r script_dir="$(dirname "$script_path")"
else
declare -r script_dir="$HOME/.sbt"
fi
declare -a residual_args
declare -a java_args
declare -a scalac_args
declare -a sbt_commands
declare -a maven_profiles
if test -x "$JAVA_HOME/bin/java"; then
echo -e "Using $JAVA_HOME as default JAVA_HOME."
echo "Note, this will be overridden by -java-home if it is set."
declare java_cmd="$JAVA_HOME/bin/java"
else
declare java_cmd=java
fi
echoerr () {
echo 1>&2 "$@"
}
vlog () {
[[ $verbose || $debug ]] && echoerr "$@"
}
dlog () {
[[ $debug ]] && echoerr "$@"
}
acquire_sbt_jar () {
SBT_VERSION=`awk -F "=" '/sbt\.version/ {print $2}' ./project/build.properties`
URL1=http://typesafe.artifactoryonline.com/typesafe/ivy-releases/org.scala-sbt/sbt-launch/${SBT_VERSION}/sbt-launch.jar
URL2=http://repo.typesafe.com/typesafe/ivy-releases/org.scala-sbt/sbt-launch/${SBT_VERSION}/sbt-launch.jar
JAR=build/sbt-launch-${SBT_VERSION}.jar
sbt_jar=$JAR
if [[ ! -f "$sbt_jar" ]]; then
# Download sbt launch jar if it hasn't been downloaded yet
if [ ! -f "${JAR}" ]; then
# Download
printf "Attempting to fetch sbt\n"
JAR_DL="${JAR}.part"
if [ $(command -v curl) ]; then
(curl --silent ${URL1} > "${JAR_DL}" || curl --silent ${URL2} > "${JAR_DL}") && mv "${JAR_DL}" "${JAR}"
elif [ $(command -v wget) ]; then
(wget --quiet ${URL1} -O "${JAR_DL}" || wget --quiet ${URL2} -O "${JAR_DL}") && mv "${JAR_DL}" "${JAR}"
else
printf "You do not have curl or wget installed, please install sbt manually from http://www.scala-sbt.org/\n"
exit -1
fi
fi
if [ ! -f "${JAR}" ]; then
# We failed to download
printf "Our attempt to download sbt locally to ${JAR} failed. Please install sbt manually from http://www.scala-sbt.org/\n"
exit -1
fi
printf "Launching sbt from ${JAR}\n"
fi
}
execRunner () {
# print the arguments one to a line, quoting any containing spaces
[[ $verbose || $debug ]] && echo "# Executing command line:" && {
for arg; do
if printf "%s\n" "$arg" | grep -q ' '; then
printf "\"%s\"\n" "$arg"
else
printf "%s\n" "$arg"
fi
done
echo ""
}
"$@"
}
addJava () {
dlog "[addJava] arg = '$1'"
java_args=( "${java_args[@]}" "$1" )
}
enableProfile () {
dlog "[enableProfile] arg = '$1'"
maven_profiles=( "${maven_profiles[@]}" "$1" )
export SBT_MAVEN_PROFILES="${maven_profiles[@]}"
}
addSbt () {
dlog "[addSbt] arg = '$1'"
sbt_commands=( "${sbt_commands[@]}" "$1" )
}
addResidual () {
dlog "[residual] arg = '$1'"
residual_args=( "${residual_args[@]}" "$1" )
}
addDebugger () {
addJava "-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=$1"
}
# a ham-fisted attempt to move some memory settings in concert
# so they need not be dicked around with individually.
get_mem_opts () {
local mem=${1:-2048}
local perm=$(( $mem / 4 ))
(( $perm > 256 )) || perm=256
(( $perm < 4096 )) || perm=4096
local codecache=$(( $perm / 2 ))
echo "-Xms${mem}m -Xmx${mem}m -XX:MaxPermSize=${perm}m -XX:ReservedCodeCacheSize=${codecache}m"
}
require_arg () {
local type="$1"
local opt="$2"
local arg="$3"
if [[ -z "$arg" ]] || [[ "${arg:0:1}" == "-" ]]; then
echo "$opt requires <$type> argument" 1>&2
exit 1
fi
}
is_function_defined() {
declare -f "$1" > /dev/null
}
process_args () {
while [[ $# -gt 0 ]]; do
case "$1" in
-h|-help) usage; exit 1 ;;
-v|-verbose) verbose=1 && shift ;;
-d|-debug) debug=1 && shift ;;
-ivy) require_arg path "$1" "$2" && addJava "-Dsbt.ivy.home=$2" && shift 2 ;;
-mem) require_arg integer "$1" "$2" && sbt_mem="$2" && shift 2 ;;
-jvm-debug) require_arg port "$1" "$2" && addDebugger $2 && shift 2 ;;
-batch) exec </dev/null && shift ;;
-sbt-jar) require_arg path "$1" "$2" && sbt_jar="$2" && shift 2 ;;
-sbt-version) require_arg version "$1" "$2" && sbt_version="$2" && shift 2 ;;
-java-home) require_arg path "$1" "$2" && java_cmd="$2/bin/java" && export JAVA_HOME=$2 && shift 2 ;;
-D*) addJava "$1" && shift ;;
-J*) addJava "${1:2}" && shift ;;
-P*) enableProfile "$1" && shift ;;
*) addResidual "$1" && shift ;;
esac
done
is_function_defined process_my_args && {
myargs=("${residual_args[@]}")
residual_args=()
process_my_args "${myargs[@]}"
}
}
run() {
# no jar? download it.
[[ -f "$sbt_jar" ]] || acquire_sbt_jar "$sbt_version" || {
# still no jar? uh-oh.
echo "Download failed. Obtain the sbt-launch.jar manually and place it at $sbt_jar"
exit 1
}
# process the combined args, then reset "$@" to the residuals
process_args "$@"
set -- "${residual_args[@]}"
argumentCount=$#
# run sbt
execRunner "$java_cmd" \
${SBT_OPTS:-$default_sbt_opts} \
$(get_mem_opts $sbt_mem) \
${java_opts} \
${java_args[@]} \
-jar "$sbt_jar" \
"${sbt_commands[@]}" \
"${residual_args[@]}"
}
|
hengyicai/OnlineAggregationUCAS
|
build/sbt-launch-lib.bash
|
Shell
|
apache-2.0
| 5,285 |
#!/bin/bash
# ----------------------------------------------------------------------------
# Copyright 2016 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------- END-OF-FILE ----------------------------------
#==============================================================================
# Syntax: bdedox_genhtmlmain.sh [-h] |
# [-i doxydir] [-o htmldir] [-r header]
# [-p project] [-n projectno]
# [-c cfgfile] [file ... ]
#
# Purpose: This script invokes 'doxygen' to generate the 'main.html' landing
# page for the project being documented.
#==============================================================================
syntax="Syntax: bdedox_genhtmlmain.sh [-h] |"
syntax="${syntax}\n\t [-i doxydir] -o [htmldir] [-r header]"
syntax="${syntax}\n\t [-p projectname] [-n projectnum]"
syntax="${syntax}\n\t [-c cfgfile]"
syntax="${syntax}\nwhere:"
syntax="${syntax}\n\t-i doxydir directory containing doxygen files (input)"
syntax="${syntax}\n\t default: current directory"
syntax="${syntax}\n\t-o htmldir directory containing doxygen files (input)"
syntax="${syntax}\n\t default: ./html"
syntax="${syntax}\n\t-r header HTML header file"
syntax="${syntax}\n\t default: doxydir/BDEQuickLinks.header"
syntax="${syntax}\n\t-p project Project name"
syntax="${syntax}\n\t default: \"\""
syntax="${syntax}\n\t-n projectno Project number"
syntax="${syntax}\n\t default: \"\""
syntax="${syntax}\n\t-c cfgfile Doxygen configuration file"
syntax="${syntax}\n\t default: ./bdedox_doxygen.cfg"
#------------------------------------------------------------------------------
# Parameter Parsing
#------------------------------------------------------------------------------
DOXYGEN_DOXYDIR="."
DOXYGEN_PROJECT_NAME=""
DOXYGEN_PROJECT_NUMBER=""
progDir=${0%/*}
CFGFILE="$progDir/bdedox_doxygen.cfg"
while getopts ":hi:o:p:n:c:" opt; do
case $opt in
h )
echo "${syntax}"
exit 0;;
i )
DOXYGEN_DOXYDIR=${OPTARG}
;;
o )
DOXYGEN_HTMLDIR=${OPTARG}
;;
p )
DOXYGEN_PROJECT_NAME=${OPTARG}
;;
n )
DOXYGEN_PROJECT_NUMBER=${OPTARG}
;;
c )
CFGFILE=${OPTARG}
;;
* )
echo >&2 "${syntax}"
exit 1;;
esac
done
shift $(($OPTIND - 1))
: ${DOXYGEN_HTML_HEADER:=$DOXYGEN_DOXYDIR/BDEQuickLinks.header}
: ${DOXYGEN_HTMLDIR:=$DOXYGEN_DOXYDIR/html}
#------------------------------------------------------------------------------
# Check 'doxygen' existance and version.
#------------------------------------------------------------------------------
DOXYGEN_BIN=`which doxygen`
if [ -x "$DOXYGEN_BIN" ]
then :
else echo >/dev/stderr "!! Not found: $DOXYGEN_BIN"; exit 1
fi
DOXYGEN_VERSION=$($DOXYGEN_BIN --version)
DOXYGEN_VERSION_OK='1.7.1'
case $DOXYGEN_VERSION in
$DOXYGEN_VERSION_OK) : ;;
*) echo >&2 \
"Doxygen version is $DOXYGEN_VERSION; $DOXYGEN_VERSION_OK needed"
exit 1
;;
esac
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
echo "DOXYGEN_DOXYDIR : $DOXYGEN_DOXYDIR"
echo "DOXYGEN_HTMLDIR : $DOXYGEN_HTMLDIR"
echo "DOXYGEN_HTML_HEADER : $DOXYGEN_HTML_HEADER"
echo "DOXYGEN_PROJECT_NAME : $DOXYGEN_PROJECT_NAME"
echo "DOXYGEN_PROJECT_NUMBER: $DOXYGEN_PROJECT_NUMBER"
echo "CFGFILE : $CFGFILE"
[ -r "$DOXYGEN_DOXYDIR" ] || {
echo >&2 "not readable directory: $DOXYGEN_DOXYDIR";
exit 1
}
[ -w "$DOXYGEN_HTMLDIR" ] || {
echo >&2 "not writable directory: $DOXYGEN_HTMLDIR";
exit 1
}
[ "$DOXYGEN_HTML_HEADER" ] &&
[ -r "$DOXYGEN_HTML_HEADER" ] || {
echo >&2 "cannot read header file: $DOXYGEN_HTML_HEADER";
exit 1
}
[ -r "$CFGFILE" ] || {
echo >&2 "cannot read configuration file: $CFGFILE";
exit 1
}
#Create 'main.html' separately from the rest, then copy to final destination.
IPUT_DIR=$(mktemp -d); echo IPUT_DIR=$IPUT_DIR
[ -r "$IPUT_DIR" ] || {
echo >&2 "not readable directory: $IPUT_DIR";
exit 1
}
#trap "rm -rf $IPUT_DIR" EXIT
cat $* >$IPUT_DIR/main.h
OPUT_DIR=$(mktemp -d); echo OPUT_DIR=$OPUT_DIR
[ -w "$OPUT_DIR" ] || {
echo >&2 "not writable directory: $OPUT_DIR";
exit 1
}
#trap "rm -rf $IPUT_DIR $OPUT_DIR" EXIT
FINAL_DEST_DIR=$DOXYGEN_HTMLDIR
DOXYGEN_HTMLDIR=$OPUT_DIR
DOXYGEN_DOXYDIR=$IPUT_DIR
export DOXYGEN_DOXYDIR
export DOXYGEN_HTMLDIR
export DOXYGEN_HTML_HEADER
export DOXYGEN_PROJECT_NAME
export DOXYGEN_PROJECT_NUMBER
eval $DOXYGEN_BIN $CFGFILE
[ -r "$OPUT_DIR/main.html" ] || {
echo >&2 "cannot read: $OPUT_DIR/main.html";
exit 1
}
cp "$FINAL_DEST_DIR/main.html" "$FINAL_DEST_DIR/mainORIG.html" &&
cp "$OPUT_DIR/main.html" "$FINAL_DEST_DIR/main.html" || {
echo >&2 "cannot update: $FINAL_DEST_DIR/main.html";
exit 1
}
exit 0
|
che2/bde-tools
|
contrib/bdedox/bin/bdedox_genhtmlmain.sh
|
Shell
|
apache-2.0
| 5,665 |
#!/bin/sh
pppd call 3glink &
|
ADVANTECH-Corp/meta-advantech
|
meta-tools/recipes-test/3g-script/files/ewm-c106.sh
|
Shell
|
apache-2.0
| 30 |
#!/bin/bash
#
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
# Load the test setup defined in the parent directory
CURRENT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${CURRENT_DIR}/../integration_test_setup.sh" \
|| { echo "integration_test_setup.sh not found!" >&2; exit 1; }
function set_up_jobcount() {
tmp=$(mktemp -d ${TEST_TMPDIR}/testjobs.XXXXXXXX)
# We use hardlinks to this file as a communication mechanism between
# test runs.
touch ${tmp}/counter
mkdir -p dir
cat <<EOF > dir/test.sh
#!/bin/bash
# hard link
z=\$(mktemp -u ${tmp}/tmp.XXXXXXXX)
ln ${tmp}/counter \${z}
# Make sure other test runs have started too.
sleep 1
nlink=\$(ls -l ${tmp}/counter | awk '{print \$2}')
# 4 links = 3 jobs + ${tmp}/counter
if [[ "\$nlink" -gt 4 ]] ; then
echo found "\$nlink" hard links to file, want 4 max.
exit 1
fi
# Ensure that we don't remove before other runs have inspected the file.
sleep 1
rm \${z}
EOF
chmod +x dir/test.sh
cat <<EOF > dir/BUILD
sh_test(
name = "test",
srcs = [ "test.sh" ],
size = "small",
)
EOF
}
# We have to use --spawn_strategy=standalone, because the test actions
# communicate with each other via a hard-linked file.
function test_3_cpus() {
set_up_jobcount
# 3 CPUs, so no more than 3 tests in parallel.
bazel test --spawn_strategy=standalone --test_output=errors \
--local_resources=10000,3,100 --runs_per_test=10 //dir:test
}
function test_3_local_jobs() {
set_up_jobcount
# 3 local test jobs, so no more than 3 tests in parallel.
bazel test --spawn_strategy=standalone --test_output=errors \
--local_test_jobs=3 --local_resources=10000,10,100 \
--runs_per_test=10 //dir:test
}
function test_tmpdir() {
mkdir -p foo
cat > foo/bar_test.sh <<'EOF'
#!/bin/bash
echo TEST_TMPDIR=$TEST_TMPDIR
EOF
chmod +x foo/bar_test.sh
cat > foo/BUILD <<EOF
sh_test(
name = "bar_test",
srcs = ["bar_test.sh"],
)
EOF
bazel test --test_output=all //foo:bar_test >& $TEST_log || \
fail "Running sh_test failed"
expect_log "TEST_TMPDIR=/.*"
bazel test --nocache_test_results --test_output=all --test_tmpdir=$TEST_TMPDIR //foo:bar_test \
>& $TEST_log || fail "Running sh_test failed"
expect_log "TEST_TMPDIR=$TEST_TMPDIR"
# If we run `bazel test //src/test/shell/bazel:bazel_test_test` on Linux, it
# will be sandboxed and this "inner test" creating /foo/bar will actually
# succeed. If we run it on OS X (or in general without sandboxing enabled),
# it will fail to create /foo/bar, since obviously we don't have write
# permissions.
if bazel test --nocache_test_results --test_output=all \
--test_tmpdir=/foo/bar //foo:bar_test >& $TEST_log; then
# We are in a sandbox.
expect_log "TEST_TMPDIR=/foo/bar"
else
# We are not sandboxed.
expect_log "Could not create TEST_TMPDIR"
fi
}
function test_env_vars() {
cat > WORKSPACE <<EOF
workspace(name = "bar")
EOF
mkdir -p foo
cat > foo/testenv.sh <<'EOF'
#!/bin/bash
echo "pwd: $PWD"
echo "src: $TEST_SRCDIR"
echo "ws: $TEST_WORKSPACE"
EOF
chmod +x foo/testenv.sh
cat > foo/BUILD <<EOF
sh_test(
name = "foo",
srcs = ["testenv.sh"],
)
EOF
bazel test --test_output=all //foo &> $TEST_log || fail "Test failed"
expect_log "pwd: .*/foo.runfiles/bar$"
expect_log "src: .*/foo.runfiles$"
expect_log "ws: bar$"
}
function test_run_under_label_with_options() {
mkdir -p testing run || fail "mkdir testing run failed"
cat <<EOF > run/BUILD
sh_binary(
name='under', srcs=['under.sh'],
visibility=["//visibility:public"],
)
EOF
cat <<EOF > run/under.sh
#!/bin/sh
echo running under //run:under "\$*"
EOF
chmod u+x run/under.sh
cat <<EOF > testing/passing_test.sh
#!/bin/sh
exit 0
EOF
chmod u+x testing/passing_test.sh
cat <<EOF > testing/BUILD
sh_test(
name = "passing_test" ,
srcs = [ "passing_test.sh" ])
EOF
bazel test //testing:passing_test --run_under='//run:under -c' \
--test_output=all >& $TEST_log || fail "Expected success"
expect_log 'running under //run:under -c testing/passing_test'
expect_log 'passing_test *PASSED'
expect_log '1 test passes.$'
}
function test_run_under_path() {
mkdir -p testing || fail "mkdir testing failed"
echo "sh_test(name='t1', srcs=['t1.sh'])" > testing/BUILD
cat <<EOF > testing/t1.sh
#!/bin/sh
exit 0
EOF
chmod u+x testing/t1.sh
mkdir -p scripts
cat <<EOF > scripts/hello
#!/bin/sh
echo "hello script!!!" "\$@"
EOF
chmod u+x scripts/hello
PATH=$PATH:$PWD/scripts bazel test //testing:t1 -s --run_under=hello \
--test_output=all >& $TEST_log || fail "Expected success"
expect_log 'hello script!!! testing/t1'
# Make sure it still works if --run_under includes an arg.
PATH=$PATH:$PWD/scripts bazel test //testing:t1 \
-s --run_under='hello "some_arg with" space' \
--test_output=all >& $TEST_log || fail "Expected success"
expect_log 'hello script!!! some_arg with space testing/t1'
# Make sure absolute path works also
bazel test //testing:t1 --run_under=$PWD/scripts/hello \
-s --test_output=all >& $TEST_log || fail "Expected success"
expect_log 'hello script!!! testing/t1'
}
function test_test_timeout() {
mkdir -p dir
cat <<EOF > dir/test.sh
#!/bin/sh
sleep 3
exit 0
EOF
chmod +x dir/test.sh
cat <<EOF > dir/BUILD
sh_test(
name = "test",
timeout = "short",
srcs = [ "test.sh" ],
size = "small",
)
EOF
bazel test --test_timeout=2 //dir:test &> $TEST_log && fail "should have timed out"
expect_log "TIMEOUT"
bazel test --test_timeout=20 //dir:test || fail "expected success"
}
# Makes sure that runs_per_test_detects_flakes detects FLAKY if any of the 5
# attempts passes (which should cover all cases of being picky about the
# first/last/etc ones only being counted).
# We do this using an un-sandboxed test which keeps track of how many runs there
# have been using files which are undeclared inputs/outputs.
function test_runs_per_test_detects_flakes() {
# Directory for counters
local COUNTER_DIR="${TEST_TMPDIR}/counter_dir"
mkdir -p "${COUNTER_DIR}"
for (( i = 1 ; i <= 5 ; i++ )); do
# This file holds the number of the next run
echo 1 > "${COUNTER_DIR}/$i"
cat <<EOF > test$i.sh
#!/bin/bash
i=\$(< "${COUNTER_DIR}/$i")
# increment the hidden state
echo \$((i + 1)) > "${COUNTER_DIR}/$i"
# succeed exactly once.
exit \$((i != $i))
}
EOF
chmod +x test$i.sh
cat <<EOF > BUILD
sh_test(name = "test$i", srcs = [ "test$i.sh" ])
EOF
bazel test --spawn_strategy=standalone --jobs=1 \
--runs_per_test=5 --runs_per_test_detects_flakes \
//:test$i &> $TEST_log || fail "should have succeeded"
expect_log "FLAKY"
done
}
# Tests that the test.xml is extracted from the sandbox correctly.
function test_xml_is_present() {
mkdir -p dir
cat <<'EOF' > dir/test.sh
#!/bin/sh
echo HELLO > $XML_OUTPUT_FILE
exit 0
EOF
chmod +x dir/test.sh
cat <<'EOF' > dir/BUILD
sh_test(
name = "test",
srcs = [ "test.sh" ],
)
EOF
bazel test -s --test_output=streamed //dir:test &> $TEST_log || fail "expected success"
xml_log=bazel-testlogs/dir/test/test.xml
[ -s $xml_log ] || fail "$xml_log was not present after test"
}
# Simple test that we actually enforce testonly, see #1923.
function test_testonly_is_enforced() {
mkdir -p testonly
cat <<'EOF' >testonly/BUILD
genrule(
name = "testonly",
srcs = [],
cmd = "echo testonly | tee $@",
outs = ["testonly.txt"],
testonly = 1,
)
genrule(
name = "not-testonly",
srcs = [":testonly"],
cmd = "echo should fail | tee $@",
outs = ["not-testonly.txt"],
)
EOF
bazel build //testonly &>$TEST_log || fail "Building //testonly failed"
bazel build //testonly:not-testonly &>$TEST_log && fail "Should have failed" || true
expect_log "'//testonly:not-testonly' depends on testonly target '//testonly:testonly'"
}
function test_always_xml_output() {
mkdir -p dir
cat <<EOF > dir/success.sh
#!/bin/sh
exit 0
EOF
cat <<EOF > dir/fail.sh
#!/bin/sh
exit 1
EOF
chmod +x dir/{success,fail}.sh
cat <<EOF > dir/BUILD
sh_test(
name = "success",
srcs = [ "success.sh" ],
)
sh_test(
name = "fail",
srcs = [ "fail.sh" ],
)
EOF
bazel test //dir:all &> $TEST_log && fail "should have failed" || true
[ -f "bazel-testlogs/dir/success/test.xml" ] \
|| fail "No xml file for //dir:success"
[ -f "bazel-testlogs/dir/fail/test.xml" ] \
|| fail "No xml file for //dir:fail"
cat bazel-testlogs/dir/success/test.xml >$TEST_log
expect_log "errors=\"0\""
expect_log_once "testcase"
expect_log "name=\"dir/success\""
cat bazel-testlogs/dir/fail/test.xml >$TEST_log
expect_log "errors=\"1\""
expect_log_once "testcase"
expect_log "name=\"dir/fail\""
}
function test_detailed_test_summary() {
copy_examples
cat > WORKSPACE <<EOF
workspace(name = "io_bazel")
EOF
setup_javatest_support
local java_native_tests=//examples/java-native/src/test/java/com/example/myproject
bazel test --test_summary=detailed "${java_native_tests}:fail" >& $TEST_log \
&& fail "Test $* succeed while expecting failure" \
|| true
expect_log 'FAILED.*com\.example\.myproject\.Fail\.testFail'
}
run_suite "test tests"
|
zhexuany/bazel
|
src/test/shell/bazel/bazel_test_test.sh
|
Shell
|
apache-2.0
| 9,786 |
#!/usr/bin/env bash
go test -c
./janitor.test -c ../../../socialapi/config/dev.toml -test.v=true
RESULT=$?
rm janitor.test
exit $RESULT
|
jack89129/koding
|
go/src/koding/workers/janitor/test.sh
|
Shell
|
apache-2.0
| 140 |
#!/bin/sh
# startDemoMode.sh
# AndroidTool
#
# Created by Morten Just Petersen on 11/16/15.
# Copyright © 2015 Morten Just Petersen. All rights reserved.
thisdir=$1 # $1 is the bundle resources path directly from the calling script file
serial=$2
adb=$thisdir/adb
"$adb" -s $serial shell am broadcast -a com.android.systemui.demo -e command exit
|
mortenjust/androidtool-mac
|
AndroidTool/exitDemoMode.sh
|
Shell
|
apache-2.0
| 354 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/EasyCountDownButton/EasyCountDownButton.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/EasyCountDownButton/EasyCountDownButton.framework"
fi
|
EasySwift/EasySwift
|
Carthage/Checkouts/EasyCountDownButton/EasyCountDownButton/Pods/Target Support Files/Pods-EasyCountDownButtonTest/Pods-EasyCountDownButtonTest-frameworks.sh
|
Shell
|
apache-2.0
| 3,653 |
#!/bin/sh
this_script=$0
delay=3
pid=${pid}
process_status_file=${process_status_file}
ps -p$pid | grep $pid 2>&1 > /dev/null
# Grab the status of the ps | grep command
status=$?
echo "$status" > $process_status_file
# A value of 0 means that it was found running
if [ "$status" = "0" ]; then
while [ "$status" = "0" ]
do
sleep $delay
ps -p$pid | grep $pid 2>&1 > /dev/null
status=$?
echo "$status" > $process_status_file
done
fi
rm $this_script
|
dreedyman/Rio
|
rio-lib/src/main/resources/org/rioproject/impl/exec/resources/proc-status-template.sh
|
Shell
|
apache-2.0
| 518 |
#!/bin/bash
# Copyright 2015-2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
set -ex
cd $(dirname $0)
# Pick up the source dist archive whatever its version is
SDIST_ARCHIVE=$EXTERNAL_GIT_ROOT/input_artifacts/grpcio-*.tar.gz
BDIST_DIR="file://$EXTERNAL_GIT_ROOT/input_artifacts"
if [ ! -f ${SDIST_ARCHIVE} ]
then
echo "Archive ${SDIST_ARCHIVE} does not exist."
exit 1
fi
PIP=pip2
which $PIP || PIP=pip
PYTHON=python2
which $PYTHON || PYTHON=python
# TODO(jtattermusch): this shouldn't be required
$PIP install --upgrade six
GRPC_PYTHON_BINARIES_REPOSITORY="${BDIST_DIR}" \
$PIP install \
${SDIST_ARCHIVE}
$PYTHON distribtest.py
|
VcamX/grpc
|
test/distrib/python/run_distrib_test.sh
|
Shell
|
bsd-3-clause
| 2,117 |
Script started on Fri 18 Nov 2016 11:33:35 PM PST
]0;ubuntu@patrickn96-ourrepo-3961800: ~/workspace/CS100/Assn2/Working[01;32mpatrickn96[00m:[01;34m~/workspace/CS100/Assn2/Working[00m (master) $ make
g++ -c -Wall -ansi command.cpp -o command.o
command.cpp: In member function ‘bool Execute::execute(std::vector<std::basic_string<char> >)’:
command.cpp:60:1: warning: control reaches end of non-void function [-Wreturn-type]
}
^
g++ command.o main.o -o test.out
mkdir -p bin src/rshell.cpp bin/rshell
]0;ubuntu@patrickn96-ourrepo-3961800: ~/workspace/CS100/Assn2/Working[01;32mpatrickn96[00m:[01;34m~/workspace/CS100/Assn2/Working[00m (master) $ tes[K[K[K.a[K/a.out
$ exit
]0;ubuntu@patrickn96-ourrepo-3961800: ~/workspace/CS100/Assn2/Working[01;32mpatrickn96[00m:[01;34m~/workspace/CS100/Assn2/Working[00m (master) $ .a[K/a.out
$ Echo (echo a && exit) || echo c
a
c
$ (echo a && || exit) || ec xit
a
]0;ubuntu@patrickn96-ourrepo-3961800: ~/workspace/CS100/Assn2/Working[01;32mpatrickn96[00m:[01;34m~/workspace/CS100/Assn2/Working[00m (master) $ a[K./a.out
$ (echo a && echo b) || echo c
a
c
$ make
$ exit
$ exit
]0;ubuntu@patrickn96-ourrepo-3961800: ~/workspace/CS100/Assn2/Working[01;32mpatrickn96[00m:[01;34m~/workspace/CS100/Assn2/Working[00m (master) $ exit
exit
Script done on Fri 18 Nov 2016 11:36:10 PM PST
|
pnguy046/rshell
|
tests/exit.sh
|
Shell
|
bsd-3-clause
| 1,407 |
#!/bin/bash
. ../../../prepare.inc.sh
. ../../../toolbox.inc.sh
# ---- do the actual testing ----
result=PASS
echo "++++ BEGINNING TEST" >$OUTPUTFILE
# create a keyring and attach it to the session keyring
marker "ADD KEYRING"
create_keyring wibble @s
expect_keyid keyringid
# stick a key in the keyring
marker "ADD KEY"
create_key user lizard gizzard $keyringid
expect_keyid keyid
# check that the key is in the keyring
marker "LIST KEYRING"
list_keyring $keyringid
expect_keyring_rlist rlist $keyid
# read the contents of the key
marker "PRINT KEY"
print_key $keyid
expect_payload payload "gizzard"
# pipe the contents of the key and add a LF as the key doesn't have one
marker "PIPE KEY"
pipe_key $keyid
echo >>$OUTPUTFILE
expect_payload payload "gizzard"
# read the key as hex
marker "READ KEY"
read_key $keyid
expect_payload payload "67697a7a 617264"
# read the contents of the keyring as hex and match it to the key ID
marker "READ KEYRING"
read_key $keyringid
tmp=`printf %08x $keyid`
if [ "$endian" = "LE" ]
then
tmp=`echo $tmp | sed 's/\(..\)\(..\)\(..\)\(..\)/\4\3\2\1/'`
fi
expect_payload payload $tmp
# remove read permission from the key and try reading it again
# - we should still have read permission because it's searchable in our
# keyrings
marker "REMOVE READ PERM"
set_key_perm $keyid 0x3d0000
print_key $keyid
expect_payload payload "gizzard"
# remove search permission from the key as well
# - we should still have read permission because it's searchable in our
# keyrings
marker "REMOVE SEARCH PERM"
set_key_perm $keyid 0x350000
print_key --fail $keyid
expect_error EACCES
# check that we can read it if we have to rely on possessor perms
# - we should still have read permission because it's searchable in our
# keyrings
marker "CHECK POSSESSOR READ"
set_key_perm $keyid 0x3d000000
print_key $keyid
expect_payload payload "gizzard"
# put read permission back again
marker "REINSTATE READ PERM"
set_key_perm $keyid 0x370000
print_key $keyid
expect_payload payload "gizzard"
# revoke the key
marker "REVOKE KEY"
revoke_key $keyid
print_key --fail $keyid
expect_error EKEYREVOKED
# remove the keyring we added
marker "UNLINK KEYRING"
unlink_key $keyringid @s
echo "++++ FINISHED TEST: $result" >>$OUTPUTFILE
# --- then report the results in the database ---
toolbox_report_result $TEST $result
|
Distrotech/keyutils
|
tests/keyctl/reading/valid/runtest.sh
|
Shell
|
gpl-2.0
| 2,344 |
#!/bin/bash
# runs the tools staticcheck, varcheck, structcheck and deadcode
# see their websites for more info.
# find the dir we exist within...
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# and cd into root project dir
cd ${DIR}/../..
go get -u honnef.co/go/tools/cmd/staticcheck
go get -u github.com/opennota/check/cmd/varcheck
go get -u github.com/opennota/check/cmd/structcheck
# for https://github.com/remyoudompheng/go-misc/pull/14
go get -u github.com/Dieterbe/go-misc/deadcode
ret=0
echo "## running staticcheck"
staticcheck -checks U1000 ./...
r=$?
[ $r -gt $ret ] && ret=$r
echo "## running varcheck"
varcheck ./...
r=$?
[ $r -gt $ret ] && ret=$r
echo "## running structcheck"
structcheck ./...
r=$?
[ $r -gt $ret ] && ret=$r
echo "## running deadcode"
deadcode -test $(find . -type d | grep -v '.git' | grep -v vendor | grep -v docker)
r=$?
[ $r -gt $ret ] && ret=$r
[ $ret -eq 0 ] && echo "all good"
exit $ret
|
replay/metrictank
|
scripts/qa/unused.sh
|
Shell
|
agpl-3.0
| 942 |
#!/bin/bash
## Command line parameters
if [[ $# != 2 ]]; then
cat <<USAGE
usage:
$0 <version> <edition>
Creates tar and zip source package from HEAD of the main repository and submodules.
Files and directories are named after qt-creator-<edition>-src-<version>.
example:
$0 2.2.0-beta opensource
USAGE
exit 1
fi
VERSION=$1
EDITION=$2
PREFIX=qt-creator-${EDITION}-src-${VERSION}
cd `dirname $0`/..
RESULTDIR=`pwd`
TEMPSOURCES=`mktemp -d -t qtcCreatorSourcePackage.XXXXXX`
echo "Temporary directory: ${TEMPSOURCES}"
echo "Creating tar archive..."
echo " Creating tar sources of repositories..."
git archive --format=tar --prefix=${PREFIX}/ HEAD > ${TEMPSOURCES}/__qtcreator_main.tar || exit 1
cd src/shared/qbs || exit 1
git archive --format=tar --prefix=${PREFIX}/src/shared/qbs/ HEAD > ${TEMPSOURCES}/__qtcreator_qbs.tar || exit 1
echo " Combining tar sources..."
cd ${TEMPSOURCES} || exit 1
tar xf __qtcreator_main.tar || exit 1
tar xf __qtcreator_qbs.tar || exit 1
tar czf "${RESULTDIR}/${PREFIX}.tar.gz" ${PREFIX}/ || exit 1
echo "Creating zip archive..."
echo " Filtering binary vs text files..."
# write filter for text files (for use with 'file' command)
echo ".*:.*ASCII
.*:.*directory
.*:.*empty
.*:.*POSIX
.*:.*html
.*:.*text" > __txtpattern || exit 1
# list all files
find ${PREFIX} > __packagedfiles || exit 1
# record file types
file -f __packagedfiles > __filetypes || exit 1
echo " Creating archive..."
# zip text files and binary files separately
cat __filetypes | grep -f __txtpattern -v | cut -d: -f1 | zip -9q "${RESULTDIR}/${PREFIX}.zip" -@ || exit 1
cat __filetypes | grep -f __txtpattern | cut -d: -f1 | zip -l9q "${RESULTDIR}/${PREFIX}.zip" -@ || exit 1
|
colede/qtcreator
|
scripts/createSourcePackages.sh
|
Shell
|
lgpl-2.1
| 1,709 |
#!/usr/bin/env bash
#"INTEL CONFIDENTIAL"
#Copyright 2015 Intel Corporation All Rights Reserved.
#
#The source code contained or described herein and all documents related to the source code ("Material") are owned by Intel Corporation or its suppliers or licensors. Title to the Material remains with Intel Corporation or its suppliers and licensors. The Material contains trade secrets and proprietary and confidential information of Intel or its suppliers and licensors. The Material is protected by worldwide copyright and trade secret laws and treaty provisions. No part of the Material may be used, copied, reproduced, modified, published, uploaded, posted, transmitted, distributed, or disclosed in any way without Intel's prior express written permission.
#
#No license under any patent, copyright, trade secret or other intellectual property right is granted to or conferred upon you by disclosure or delivery of the Materials, either expressly, by implication, inducement, estoppel or otherwise. Any license under such intellectual property rights must be express and approved by Intel in writing.
RESULT_TABLE1="${RESULT_TABLE}1"
RESULT_DIR1="$RESULT_DIR/$RESULT_TABLE1"
RESULT_TABLE2="${RESULT_TABLE}2"
RESULT_DIR2="$RESULT_DIR/$RESULT_TABLE2"
BINARY_PARAMS+=(--hiveconf RESULT_TABLE1=$RESULT_TABLE1 --hiveconf RESULT_DIR1=$RESULT_DIR1 --hiveconf RESULT_TABLE2=$RESULT_TABLE2 --hiveconf RESULT_DIR2=$RESULT_DIR2)
query_run_main_method () {
QUERY_SCRIPT="$QUERY_DIR/$QUERY_NAME.sql"
if [ ! -r "$QUERY_SCRIPT" ]
then
echo "SQL file $QUERY_SCRIPT can not be read."
exit 1
fi
runCmdWithErrorCheck runEngineCmd -f "$QUERY_SCRIPT"
return $?
}
query_run_clean_method () {
runCmdWithErrorCheck runEngineCmd -e "DROP VIEW IF EXISTS $TEMP_TABLE; DROP TABLE IF EXISTS $RESULT_TABLE1; DROP TABLE IF EXISTS $RESULT_TABLE2;"
return $?
}
query_run_validate_method () {
# perform exact result validation if using SF 1, else perform general sanity check
if [ "$BIG_BENCH_SCALE_FACTOR" -eq 1 ]
then
local VALIDATION_PASSED="1"
local VALIDATION_RESULTS_FILENAME_1="$VALIDATION_RESULTS_FILENAME-1"
local VALIDATION_RESULTS_FILENAME_2="$VALIDATION_RESULTS_FILENAME-2"
if [ ! -f "$VALIDATION_RESULTS_FILENAME_1" ]
then
echo "Golden result set file $VALIDATION_RESULTS_FILENAME_1 not found"
VALIDATION_PASSED="0"
fi
if diff -q "$VALIDATION_RESULTS_FILENAME_1" <(hadoop fs -cat "$RESULT_DIR1/*")
then
echo "Validation of $VALIDATION_RESULTS_FILENAME_1 passed: Query returned correct results"
else
echo "Validation of $VALIDATION_RESULTS_FILENAME_1 failed: Query returned incorrect results"
VALIDATION_PASSED="0"
fi
if [ ! -f "$VALIDATION_RESULTS_FILENAME_2" ]
then
echo "Golden result set file $VALIDATION_RESULTS_FILENAME_2 not found"
VALIDATION_PASSED="0"
fi
if diff -q "$VALIDATION_RESULTS_FILENAME_2" <(hadoop fs -cat "$RESULT_DIR2/*")
then
echo "Validation of $VALIDATION_RESULTS_FILENAME_2 passed: Query returned correct results"
else
echo "Validation of $VALIDATION_RESULTS_FILENAME_2 failed: Query returned incorrect results"
VALIDATION_PASSED="0"
fi
if [ "$VALIDATION_PASSED" -eq 1 ]
then
echo "Validation passed: Query results are OK"
else
echo "Validation failed: Query results are not OK"
fi
else
if [ `hadoop fs -cat "$RESULT_DIR1/*" | head -n 10 | wc -l` -ge 1 ]
then
echo "Validation passed: Query 1 returned results"
else
echo "Validation failed: Query 1 did not return results"
fi
if [ `hadoop fs -cat "$RESULT_DIR2/*" | head -n 10 | wc -l` -ge 1 ]
then
echo "Validation passed: Query 2 returned results"
else
echo "Validation failed: Query 2 did not return results"
fi
fi
}
|
XiaominZhang/Big-Data-Benchmark-for-Big-Bench
|
engines/hive/queries/q23/run.sh
|
Shell
|
apache-2.0
| 3,721 |
echo "CHRONIX SPARK ###############################################################"
echo "Starting Zeppelin ..."
./zeppelin/bin/zeppelin-daemon.sh start
|
ChronixDB/chronix.spark
|
chronix-infrastructure-local/startZeppelin.sh
|
Shell
|
apache-2.0
| 153 |
#!/bin/sh
# This is only run for pull requests per suggestion at:
# https://docs.travis-ci.com/user/pull-requests#Pull-Requests-and-Security-Restrictions
echo "Nothing to do here";
|
kldavis4/biblebox-pi
|
ci/script_run_on_pull_requests.sh
|
Shell
|
apache-2.0
| 183 |
#!/bin/sh
#
# Copyright 2019 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
# keep this test for check the compatibility of noschema config.
run_sql "DROP DATABASE IF EXISTS noschema;"
run_sql "create database noschema;"
run_sql "create table noschema.t (x int primary key);"
# Starting importing
run_lightning --no-schema=1
run_sql "SELECT sum(x) FROM noschema.t;"
check_contains 'sum(x): 120'
|
pingcap/tidb
|
br/tests/lightning_no_schema/run.sh
|
Shell
|
apache-2.0
| 920 |
#!/bin/bash
find `pwd`/`dirname $0`/.. \( -name \*.py -a ! -name __\*__.py \) -exec chmod 755 {} \;
find `pwd`/`dirname $0`/.. -name \*.png -a -exec chmod 644 {} \;
chmod 755 `pwd`/`dirname $0`/sanaviron
|
jaliste/sanaviron
|
bin/fixperm.sh
|
Shell
|
apache-2.0
| 204 |
echo I failed because I am fail.
exit 1
|
grammarly/browser-extensions
|
generate/generate/tests/fixtures/fail.sh
|
Shell
|
bsd-3-clause
| 40 |
#!/bin/bash
if [ -z $1 ]; then
echo "usage: setup_edge_location <host>"
exit
fi
HOST=$1
#KEY="/usr/local/cds/cds.pem"
KEY=cds.pem
USER=ubuntu
if [ ! -f $KEY ]; then
echo Cannot access key for edge locations
exit 1
fi
ssh -i $KEY $USER@$HOST "sudo apt-get install nginx; sudo chown -R ubuntu:ubuntu /etc/nginx; sudo mkdir /etc/nginx/cds-enabled/"
scp -i $KEY conf/nginx.conf $USER@$HOST:/etc/nginx/
scp -i $KEY conf/default $USER@$HOST:/etc/nginx/sites-enabled/
ssh -i $KEY $USER@$HOST "sudo mkdir /usr/local/cds; sudo chown ubuntu:ubuntu /usr/local/cds; cd /usr/local/cds/; mkdir -p cache; mkdir -p edge/commands"
scp -i $KEY edge.sh $USER@$HOST:/usr/local/cds/edge/
scp -i $KEY ../manager/commands/get_apps.sh $USER@$HOST:/usr/local/cds/edge/commands/
|
mihaisoloi/conpaas
|
conpaas-services/src/conpaas/services/cds/agent/setup_edge_location.sh
|
Shell
|
bsd-3-clause
| 772 |
#!/usr/bin/env sh
cd ..
rm -rf bones-cov
mkdir bones-cov
jscoverage --no-instrument=test \
--no-instrument=node_modules \
--no-instrument=assets \
--no-instrument=client \
--no-instrument=server/command.prefix.js \
--no-instrument=server/command.suffix.js \
--no-instrument=server/router.prefix.js \
--no-instrument=server/router.suffix.js \
--no-instrument=server/model.prefix.js \
--no-instrument=server/model.suffix.js \
--no-instrument=server/server.prefix.js \
--no-instrument=server/server.suffix.js \
--no-instrument=server/view.prefix.js \
--no-instrument=server/view.suffix.js \
--exclude=examples \
bones \
bones-cov
cd bones-cov
mocha -R html-cov > coverage.html
VIEWER=$(which open);
if [ -x $VIEWER ]; then
$VIEWER coverage.html
fi
|
developmentseed/bones
|
test/coverage.sh
|
Shell
|
bsd-3-clause
| 928 |
#!/usr/bin/env bash
#===-- test-release.sh - Test the LLVM release candidates ------------------===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License.
#
#===------------------------------------------------------------------------===#
#
# Download, build, and test the release candidate for an LLVM release.
#
#===------------------------------------------------------------------------===#
if [ `uname -s` = "FreeBSD" ]; then
MAKE=gmake
else
MAKE=make
fi
projects="llvm cfe dragonegg compiler-rt libcxx test-suite clang-tools-extra"
# Base SVN URL for the sources.
Base_url="http://llvm.org/svn/llvm-project"
Release=""
Release_no_dot=""
RC=""
do_checkout="yes"
do_ada="no"
do_clang="yes"
do_dragonegg="no"
do_fortran="no"
do_objc="yes"
do_64bit="yes"
do_debug="no"
do_asserts="no"
do_compare="yes"
BuildDir="`pwd`"
function usage() {
echo "usage: `basename $0` -release X.Y -rc NUM [OPTIONS]"
echo ""
echo " -release X.Y The release number to test."
echo " -rc NUM The pre-release candidate number."
echo " -final The final release candidate."
echo " -j NUM Number of compile jobs to run. [default: 3]"
echo " -build-dir DIR Directory to perform testing in. [default: pwd]"
echo " -no-checkout Don't checkout the sources from SVN."
echo " -no-64bit Don't test the 64-bit version. [default: yes]"
echo " -enable-ada Build Ada. [default: disable]"
echo " -disable-clang Do not test clang. [default: enable]"
echo " -enable-dragonegg Test dragonegg. [default: disable]"
echo " -enable-fortran Enable Fortran build. [default: disable]"
echo " -disable-objc Disable ObjC build. [default: enable]"
echo " -test-debug Test the debug build. [default: no]"
echo " -test-asserts Test with asserts on. [default: no]"
echo " -no-compare-files Don't test that phase 2 and 3 files are identical."
}
while [ $# -gt 0 ]; do
case $1 in
-release | --release )
shift
Release="$1"
Release_no_dot="`echo $1 | sed -e 's,\.,,'`"
;;
-rc | --rc | -RC | --RC )
shift
RC="rc$1"
;;
-final | --final )
RC=final
;;
-j* )
NumJobs="`echo $1 | sed -e 's,-j\([0-9]*\),\1,g'`"
if [ -z "$NumJobs" ]; then
shift
NumJobs="$1"
fi
;;
-build-dir | --build-dir | -builddir | --builddir )
shift
BuildDir="$1"
;;
-no-checkout | --no-checkout )
do_checkout="no"
;;
-no-64bit | --no-64bit )
do_64bit="no"
;;
-enable-ada | --enable-ada )
do_ada="yes"
;;
-disable-clang | --disable-clang )
do_clang="no"
;;
-enable-dragonegg | --enable-dragonegg )
do_dragonegg="yes"
;;
-enable-fortran | --enable-fortran )
do_fortran="yes"
;;
-disable-objc | --disable-objc )
do_objc="no"
;;
-test-debug | --test-debug )
do_debug="yes"
;;
-test-asserts | --test-asserts )
do_asserts="yes"
;;
-no-compare-files | --no-compare-files )
do_compare="no"
;;
-help | --help | -h | --h | -\? )
usage
exit 0
;;
* )
echo "unknown option: $1"
usage
exit 1
;;
esac
shift
done
# Check required arguments.
if [ -z "$Release" ]; then
echo "error: no release number specified"
exit 1
fi
if [ -z "$RC" ]; then
echo "error: no release candidate number specified"
exit 1
fi
# Figure out how many make processes to run.
if [ -z "$NumJobs" ]; then
NumJobs=`sysctl -n hw.activecpu 2> /dev/null || true`
fi
if [ -z "$NumJobs" ]; then
NumJobs=`sysctl -n hw.ncpu 2> /dev/null || true`
fi
if [ -z "$NumJobs" ]; then
NumJobs=`grep -c processor /proc/cpuinfo 2> /dev/null || true`
fi
if [ -z "$NumJobs" ]; then
NumJobs=3
fi
# Go to the build directory (may be different from CWD)
BuildDir=$BuildDir/$RC
mkdir -p $BuildDir
cd $BuildDir
# Location of log files.
LogDir=$BuildDir/logs
mkdir -p $LogDir
# Find compilers.
if [ "$do_dragonegg" = "yes" ]; then
gcc_compiler="$GCC"
if [ -z "$gcc_compiler" ]; then
gcc_compiler="`which gcc`"
if [ -z "$gcc_compiler" ]; then
echo "error: cannot find gcc to use with dragonegg"
exit 1
fi
fi
gxx_compiler="$GXX"
if [ -z "$gxx_compiler" ]; then
gxx_compiler="`which g++`"
if [ -z "$gxx_compiler" ]; then
echo "error: cannot find g++ to use with dragonegg"
exit 1
fi
fi
fi
# Make sure that the URLs are valid.
function check_valid_urls() {
for proj in $projects ; do
echo "# Validating $proj SVN URL"
if ! svn ls $Base_url/$proj/tags/RELEASE_$Release_no_dot/$RC > /dev/null 2>&1 ; then
echo "llvm $Release release candidate $RC doesn't exist!"
exit 1
fi
done
}
# Export sources to the build directory.
function export_sources() {
check_valid_urls
for proj in $projects ; do
echo "# Exporting $proj $Release-RC$RC sources"
if ! svn export -q $Base_url/$proj/tags/RELEASE_$Release_no_dot/$RC $proj.src ; then
echo "error: failed to export $proj project"
exit 1
fi
done
echo "# Creating symlinks"
cd $BuildDir/llvm.src/tools
if [ ! -h clang ]; then
ln -s ../../cfe.src clang
fi
cd $BuildDir/llvm.src/tools/clang/tools
if [ ! -h clang-tools-extra ]; then
ln -s ../../../../clang-tools-extra.src extra
fi
cd $BuildDir/llvm.src/projects
if [ ! -h test-suite ]; then
ln -s ../../test-suite.src test-suite
fi
if [ ! -h compiler-rt ]; then
ln -s ../../compiler-rt.src compiler-rt
fi
if [ ! -h libcxx ]; then
ln -s ../../libcxx.src libcxx
fi
cd $BuildDir
}
function configure_llvmCore() {
Phase="$1"
Flavor="$2"
ObjDir="$3"
InstallDir="$4"
case $Flavor in
Release | Release-64 )
Optimized="yes"
Assertions="no"
;;
Release+Asserts )
Optimized="yes"
Assertions="yes"
;;
Debug )
Optimized="no"
Assertions="yes"
;;
* )
echo "# Invalid flavor '$Flavor'"
echo ""
return
;;
esac
echo "# Using C compiler: $c_compiler"
echo "# Using C++ compiler: $cxx_compiler"
cd $ObjDir
echo "# Configuring llvm $Release-$RC $Flavor"
echo "# $BuildDir/llvm.src/configure --prefix=$InstallDir \
--enable-optimized=$Optimized \
--enable-assertions=$Assertions"
env CC="$c_compiler" CXX="$cxx_compiler" \
$BuildDir/llvm.src/configure --prefix=$InstallDir \
--enable-optimized=$Optimized \
--enable-assertions=$Assertions \
--disable-timestamps \
2>&1 | tee $LogDir/llvm.configure-Phase$Phase-$Flavor.log
cd $BuildDir
}
function build_llvmCore() {
Phase="$1"
Flavor="$2"
ObjDir="$3"
ExtraOpts=""
if [ "$Flavor" = "Release-64" ]; then
ExtraOpts="EXTRA_OPTIONS=-m64"
fi
cd $ObjDir
echo "# Compiling llvm $Release-$RC $Flavor"
echo "# ${MAKE} -j $NumJobs VERBOSE=1 $ExtraOpts"
${MAKE} -j $NumJobs VERBOSE=1 $ExtraOpts \
2>&1 | tee $LogDir/llvm.make-Phase$Phase-$Flavor.log
echo "# Installing llvm $Release-$RC $Flavor"
echo "# ${MAKE} install"
${MAKE} install \
2>&1 | tee $LogDir/llvm.install-Phase$Phase-$Flavor.log
cd $BuildDir
}
function build_dragonegg() {
Phase="$1"
Flavor="$2"
LLVMInstallDir="$3"
DragonEggObjDir="$4"
LLVM_CONFIG=$LLVMInstallDir/bin/llvm-config
TOP_DIR=$BuildDir/dragonegg.src
echo "# Targeted compiler: $gcc_compiler"
cd $DragonEggObjDir
echo "# Compiling phase $Phase dragonegg $Release-$RC $Flavor"
echo -n "# CXX=$cxx_compiler TOP_DIR=$TOP_DIR GCC=$gcc_compiler "
echo -n "LLVM_CONFIG=$LLVM_CONFIG ${MAKE} -f $TOP_DIR/Makefile "
echo "-j $NumJobs VERBOSE=1"
CXX="$cxx_compiler" TOP_DIR="$TOP_DIR" GCC="$gcc_compiler" \
LLVM_CONFIG="$LLVM_CONFIG" ${MAKE} -f $TOP_DIR/Makefile \
-j $NumJobs VERBOSE=1 \
2>&1 | tee $LogDir/dragonegg-Phase$Phase-$Flavor.log
cd $BuildDir
}
function test_llvmCore() {
Phase="$1"
Flavor="$2"
ObjDir="$3"
cd $ObjDir
${MAKE} -k check-all \
2>&1 | tee $LogDir/llvm.check-Phase$Phase-$Flavor.log
${MAKE} -k unittests \
2>&1 | tee $LogDir/llvm.unittests-Phase$Phase-$Flavor.log
cd $BuildDir
}
set -e # Exit if any command fails
if [ "$do_checkout" = "yes" ]; then
export_sources
fi
(
Flavors="Release"
if [ "$do_debug" = "yes" ]; then
Flavors="Debug $Flavors"
fi
if [ "$do_asserts" = "yes" ]; then
Flavors="$Flavors Release+Asserts"
fi
if [ "$do_64bit" = "yes" ]; then
Flavors="$Flavors Release-64"
fi
for Flavor in $Flavors ; do
echo ""
echo ""
echo "********************************************************************************"
echo " Release: $Release-$RC"
echo " Build: $Flavor"
echo " System Info: "
echo " `uname -a`"
echo "********************************************************************************"
echo ""
c_compiler="$CC"
cxx_compiler="$CXX"
llvmCore_phase1_objdir=$BuildDir/Phase1/$Flavor/llvmCore-$Release-$RC.obj
llvmCore_phase1_installdir=$BuildDir/Phase1/$Flavor/llvmCore-$Release-$RC.install
dragonegg_phase1_objdir=$BuildDir/Phase1/$Flavor/DragonEgg-$Release-$RC.obj
llvmCore_phase2_objdir=$BuildDir/Phase2/$Flavor/llvmCore-$Release-$RC.obj
llvmCore_phase2_installdir=$BuildDir/Phase2/$Flavor/llvmCore-$Release-$RC.install
llvmCore_de_phase2_objdir=$BuildDir/Phase2/$Flavor/llvmCore-DragonEgg-$Release-$RC.obj
llvmCore_de_phase2_installdir=$BuildDir/Phase2/$Flavor/llvmCore-DragonEgg-$Release-$RC.install
dragonegg_phase2_objdir=$BuildDir/Phase2/$Flavor/DragonEgg-$Release-$RC.obj
llvmCore_phase3_objdir=$BuildDir/Phase3/$Flavor/llvmCore-$Release-$RC.obj
llvmCore_phase3_installdir=$BuildDir/Phase3/$Flavor/llvmCore-$Release-$RC.install
llvmCore_de_phase3_objdir=$BuildDir/Phase3/$Flavor/llvmCore-DragonEgg-$Release-$RC.obj
llvmCore_de_phase3_installdir=$BuildDir/Phase3/$Flavor/llvmCore-DragonEgg-$Release-$RC.install
dragonegg_phase3_objdir=$BuildDir/Phase3/$Flavor/DragonEgg-$Release-$RC.obj
rm -rf $llvmCore_phase1_objdir
rm -rf $llvmCore_phase1_installdir
rm -rf $dragonegg_phase1_objdir
rm -rf $llvmCore_phase2_objdir
rm -rf $llvmCore_phase2_installdir
rm -rf $llvmCore_de_phase2_objdir
rm -rf $llvmCore_de_phase2_installdir
rm -rf $dragonegg_phase2_objdir
rm -rf $llvmCore_phase3_objdir
rm -rf $llvmCore_phase3_installdir
rm -rf $llvmCore_de_phase3_objdir
rm -rf $llvmCore_de_phase3_installdir
rm -rf $dragonegg_phase3_objdir
mkdir -p $llvmCore_phase1_objdir
mkdir -p $llvmCore_phase1_installdir
mkdir -p $dragonegg_phase1_objdir
mkdir -p $llvmCore_phase2_objdir
mkdir -p $llvmCore_phase2_installdir
mkdir -p $llvmCore_de_phase2_objdir
mkdir -p $llvmCore_de_phase2_installdir
mkdir -p $dragonegg_phase2_objdir
mkdir -p $llvmCore_phase3_objdir
mkdir -p $llvmCore_phase3_installdir
mkdir -p $llvmCore_de_phase3_objdir
mkdir -p $llvmCore_de_phase3_installdir
mkdir -p $dragonegg_phase3_objdir
############################################################################
# Phase 1: Build llvmCore and clang
echo "# Phase 1: Building llvmCore"
configure_llvmCore 1 $Flavor \
$llvmCore_phase1_objdir $llvmCore_phase1_installdir
build_llvmCore 1 $Flavor \
$llvmCore_phase1_objdir
# Test clang
if [ "$do_clang" = "yes" ]; then
########################################################################
# Phase 2: Build llvmCore with newly built clang from phase 1.
c_compiler=$llvmCore_phase1_installdir/bin/clang
cxx_compiler=$llvmCore_phase1_installdir/bin/clang++
echo "# Phase 2: Building llvmCore"
configure_llvmCore 2 $Flavor \
$llvmCore_phase2_objdir $llvmCore_phase2_installdir
build_llvmCore 2 $Flavor \
$llvmCore_phase2_objdir
########################################################################
# Phase 3: Build llvmCore with newly built clang from phase 2.
c_compiler=$llvmCore_phase2_installdir/bin/clang
cxx_compiler=$llvmCore_phase2_installdir/bin/clang++
echo "# Phase 3: Building llvmCore"
configure_llvmCore 3 $Flavor \
$llvmCore_phase3_objdir $llvmCore_phase3_installdir
build_llvmCore 3 $Flavor \
$llvmCore_phase3_objdir
########################################################################
# Testing: Test phase 3
echo "# Testing - built with clang"
test_llvmCore 3 $Flavor $llvmCore_phase3_objdir
########################################################################
# Compare .o files between Phase2 and Phase3 and report which ones
# differ.
if [ "$do_compare" = "yes" ]; then
echo
echo "# Comparing Phase 2 and Phase 3 files"
for o in `find $llvmCore_phase2_objdir -name '*.o'` ; do
p3=`echo $o | sed -e 's,Phase2,Phase3,'`
if ! cmp --ignore-initial=16 $o $p3 > /dev/null 2>&1 ; then
echo "file `basename $o` differs between phase 2 and phase 3"
fi
done
fi
fi
# Test dragonegg
if [ "$do_dragonegg" = "yes" ]; then
# Build dragonegg using the targeted gcc. This isn't necessary, but
# helps avoid using broken versions of gcc (which are legion), tests
# that the targeted gcc is basically sane and is consistent with the
# later phases in which the targeted gcc + dragonegg are used.
c_compiler="$gcc_compiler"
cxx_compiler="$gxx_compiler"
build_dragonegg 1 $Flavor $llvmCore_phase1_installdir $dragonegg_phase1_objdir
########################################################################
# Phase 2: Build llvmCore with newly built dragonegg from phase 1.
c_compiler="$gcc_compiler -fplugin=$dragonegg_phase1_objdir/dragonegg.so"
cxx_compiler="$gxx_compiler -fplugin=$dragonegg_phase1_objdir/dragonegg.so"
echo "# Phase 2: Building llvmCore with dragonegg"
configure_llvmCore 2 $Flavor \
$llvmCore_de_phase2_objdir $llvmCore_de_phase2_installdir
build_llvmCore 2 $Flavor \
$llvmCore_de_phase2_objdir
build_dragonegg 2 $Flavor $llvmCore_de_phase2_installdir $dragonegg_phase2_objdir
########################################################################
# Phase 3: Build llvmCore with newly built dragonegg from phase 2.
c_compiler="$gcc_compiler -fplugin=$dragonegg_phase2_objdir/dragonegg.so"
cxx_compiler="$gxx_compiler -fplugin=$dragonegg_phase2_objdir/dragonegg.so"
echo "# Phase 3: Building llvmCore with dragonegg"
configure_llvmCore 3 $Flavor \
$llvmCore_de_phase3_objdir $llvmCore_de_phase3_installdir
build_llvmCore 3 $Flavor \
$llvmCore_de_phase3_objdir
build_dragonegg 3 $Flavor $llvmCore_de_phase3_installdir $dragonegg_phase3_objdir
########################################################################
# Testing: Test phase 3
c_compiler="$gcc_compiler -fplugin=$dragonegg_phase3_objdir/dragonegg.so"
cxx_compiler="$gxx_compiler -fplugin=$dragonegg_phase3_objdir/dragonegg.so"
echo "# Testing - built with dragonegg"
test_llvmCore 3 $Flavor $llvmCore_de_phase3_objdir
########################################################################
# Compare .o files between Phase2 and Phase3 and report which ones differ.
echo
echo "# Comparing Phase 2 and Phase 3 files"
for o in `find $llvmCore_de_phase2_objdir -name '*.o'` \
`find $dragonegg_phase2_objdir -name '*.o'` ; do
p3=`echo $o | sed -e 's,Phase2,Phase3,'`
if ! cmp --ignore-initial=16 $o $p3 > /dev/null 2>&1 ; then
echo "file `basename $o` differs between dragonegg phase 2 and phase 3"
fi
done
fi
# Otherwise just test the core.
if [ "$do_clang" != "yes" -a "$do_dragonegg" != "yes" ]; then
echo "# Testing - built with system compiler"
test_llvmCore 1 $Flavor $llvmCore_phase1_objdir
fi
done
) 2>&1 | tee $LogDir/testing.$Release-$RC.log
set +e
# Woo hoo!
echo "### Testing Finished ###"
echo "### Logs: $LogDir"
exit 0
|
lodyagin/bare_cxx
|
tests/utils/release/test-release.sh
|
Shell
|
bsd-3-clause
| 17,363 |
#! /bin/sh
srcdir=`dirname $0`
test -z "$srcdir" && srcdir=.
ORIGDIR=`pwd`
cd $srcdir
autoreconf -v --install || exit 1
cd $ORIGDIR || exit $?
./configure $@ || exit $?
|
bentiss/libratbag-old
|
uLogitech/autogen.sh
|
Shell
|
mit
| 173 |
#!/bin/bash
###########################################################################################
## Copyright 2003, 2015 IBM Corp ##
## ##
## Redistribution and use in source and binary forms, with or without modification, ##
## are permitted provided that the following conditions are met: ##
## 1.Redistributions of source code must retain the above copyright notice, ##
## this list of conditions and the following disclaimer. ##
## 2.Redistributions in binary form must reproduce the above copyright notice, this ##
## list of conditions and the following disclaimer in the documentation and/or ##
## other materials provided with the distribution. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND ANY EXPRESS ##
## OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ##
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ##
## THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ##
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ##
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ##
## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ##
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
############################################################################################
## File : libxml2.sh
##
## Description: Tests the XML C libraries, xmlcatalog, and xmllint.
##
## Author: Andrew Pham, [email protected]
###########################################################################################
## source the utility functions
#cd `dirname $0`
#LTPBIN=${LTPBIN%/shared}/libxml2
source $LTPBIN/tc_utils.source
TESTDIR=${LTPBIN%/shared}/libxml2
cd $TESTDIR
# Initialize output messages
ErrMsg="Failed: Not available."
ErrMsg1="Failed: Unexpected output. Expected:"
################################################################################
# global variables
################################################################################
commands=" libxml2 xmlcatalog "
################################################################################
# the testcase functions
################################################################################
function TC_libxml2()
{
cd libxml2-tests
./runtest >$stdout 2>$stderr
cat $stderr | grep -Ev "extparsedent|758588|759020|759573"
if [ $? == 1 ]; then
cat /dev/null > $stderr
fi
tc_pass_or_fail $? "libxml2 runtest failure"
}
function TC_xmlcatalog()
{
local id=id$$b
# Check if supporting utilities are available
tc_exec_or_break grep || return
xmlcatalog --create --noout $TCTMP/xcatlog >/dev/null 2>$stderr
tc_pass_or_fail $? "--create --nout does not work." || return
tc_register "xmlcatalog --add --noout "
let TST_TOTAL+=1
xmlcatalog --add public $id xadd123 --noout $TCTMP/xcatlog
tc_fail_if_bad $? "$ErrMsg" || return
cat $TCTMP/xcatlog | grep $id >&/dev/null
tc_pass_or_fail $? "$ErrMsg1 $id in catalog." || return
tc_register "xmlcatalog --del --noout "
let TST_TOTAL+=1
xmlcatalog --del xadd123 --noout $TCTMP/xcatlog
tc_fail_if_bad $? "$ErrMsg" || return
grep -q $id $TCTMP/xcatlog
[ $? -ne 0 ]
tc_pass_or_fail $? "$ErrMsg1 $id NOT found in catalog."
}
function TC_xmllint()
{
local id=id123
# Check if supporting utilities are available
tc_exec_or_break echo grep || return
# Creating a test xml file
cat > $TCTMP/test.xml<<-EOF
<?xml version="1.0" encoding="ISO-8859-1" ?>
<Msg>
<text1 title="Testing xmllint">
id123: My testing sample file.
</text1>
</Msg>
EOF
xmllint --htmlout --nowrap $TCTMP/test.xml >&/dev/null
tc_fail_if_bad $? "--htmlout does not work." || return
xmllint --htmlout --nowrap $TCTMP/test.xml | grep $id >&/dev/null
tc_pass_or_fail $? " --htmlout $ErrMsg1 $id" || return
tc_register "xmlcatalog --timing "
let TST_TOTAL+=1
xmllint --timing $TCTMP/test.xml >&$TCTMP/xmltiming.tst
tc_fail_if_bad $? "--timing does not work." || return
cat $TCTMP/xmltiming.tst | grep Freeing >&/dev/null
tc_pass_or_fail $? "--timing: Failed: Unexpected output."
}
################################################################################
# main
################################################################################
set $commands
TST_TOTAL=$#
tc_setup
[ "$TCTMP" ] && rm -rf $TCTMP/*
FRC=0
#
# run all the tests
#
for cmd in $commands ; do
tc_register $cmd
TC_$cmd || FRC=$?
done
exit $FRC
|
PoornimaNayak/autotest-client-tests
|
linux-tools/libxml2/libxml2.sh
|
Shell
|
gpl-2.0
| 5,055 |
#!/bin/sh
wget_timeout=`nvram get apps_wget_timeout`
#wget_options="-nv -t 2 -T $wget_timeout --dns-timeout=120"
wget_options="-q -t 2 -T $wget_timeout --no-check-certificate"
nvram set webs_state_update=0 # INITIALIZING
nvram set webs_state_flag=0 # 0: Don't do upgrade 1: Do upgrade
nvram set webs_state_error=0
nvram set webs_state_url=""
# current firmware information
current_firm=`nvram get firmver`
current_firm=`echo $current_firm | sed s/'\.'//g;`
current_buildno=`nvram get buildno`
current_extendno=`nvram get extendno`
current_extendno=`echo $current_extendno | sed s/-g.*//;`
# get firmware information
forsq=`nvram get apps_sq`
model=`nvram get productid`
if [ "$model" == "RT-AC68U" ] || [ "$model" == "RT-AC56U" ] || [ "$model" == "RT-N18U" ]; then
model_30="1"
else
model_30="0"
fi
tmo=`nvram show | grep rc_support | grep tmo`
if [ "$forsq" == "1" ]; then
if [ "$tmo" != "" ]; then
echo "---- update sq tmo----" > /tmp/webs_upgrade.log
wget $wget_options https://dlcdnets.asus.com/pub/ASUS/LiveUpdate/Release/Wireless_SQ/wlan_update_tmo.zip -O /tmp/wlan_update.txt
elif [ "$model_30" == "1" ]; then
echo "---- update sq normal for model_30 ----" > /tmp/webs_upgrade.log
wget $wget_options https://dlcdnets.asus.com/pub/ASUS/LiveUpdate/Release/Wireless_SQ/wlan_update_30.zip -O /tmp/wlan_update.txt
else
echo "---- update sq normal----" > /tmp/webs_upgrade.log
wget $wget_options https://dlcdnets.asus.com/pub/ASUS/LiveUpdate/Release/Wireless_SQ/wlan_update_v2.zip -O /tmp/wlan_update.txt
fi
else
if [ "$tmo" != "" ]; then
echo "---- update real tmo----" > /tmp/webs_upgrade.log
wget $wget_options https://dlcdnets.asus.com/pub/ASUS/LiveUpdate/Release/Wireless/wlan_update_tmo.zip -O /tmp/wlan_update.txt
elif [ "$model_30" == "1" ]; then
echo "---- update real normal for model_30 ----" > /tmp/webs_upgrade.log
wget $wget_options https://dlcdnets.asus.com/pub/ASUS/LiveUpdate/Release/Wireless/wlan_update_30.zip -O /tmp/wlan_update.txt
else
echo "---- update real normal----" > /tmp/webs_upgrade.log
wget $wget_options https://dlcdnets.asus.com/pub/ASUS/LiveUpdate/Release/Wireless/wlan_update_v2.zip -O /tmp/wlan_update.txt
fi
fi
if [ "$?" != "0" ]; then
nvram set webs_state_error=1
else
# TODO get and parse information
firmver=`grep $model /tmp/wlan_update.txt | sed s/.*#FW//;`
firmver=`echo $firmver | sed s/#.*//;`
buildno=`echo $firmver | sed s/....//;`
firmver=`echo $firmver | sed s/$buildno$//;`
extendno=`grep $model /tmp/wlan_update.txt | sed s/.*#EXT//;`
extendno=`echo $extendno | sed s/#.*//;`
lextendno=`echo $extendno | sed s/-g.*//;`
nvram set webs_state_info=${firmver}_${buildno}_${extendno}
urlpath=`grep $model /tmp/wlan_update.txt | sed s/.*#URL//;`
urlpath=`echo $urlpath | sed s/#.*//;`
nvram set webs_state_url=${urlpath}
rm -f /tmp/wlan_update.*
fi
echo "---- $current_firm $current_buildno $current_extendno----" >> /tmp/webs_upgrade.log
if [ "$firmver" == "" ] || [ "$buildno" == "" ] || [ "$lextendno" == "" ]; then
nvram set webs_state_error=1 # exist no Info
else
if [ "$current_buildno" -lt "$buildno" ]; then
echo "---- buildno: $buildno ----" >> /tmp/webs_upgrade.log
nvram set webs_state_flag=1 # Do upgrade
elif [ "$current_buildno" -eq "$buildno" ]; then
if [ "$current_firm" -lt "$firmver"]; then
echo "---- firmver: $firmver ----" >> /tmp/webs_upgrade.log
nvram set webs_state_flag=1 # Do upgrade
elif [ "$current_firm" -eq "$firmver" ]; then
if [ "$current_extendno" -lt "$lextendno" ]; then
echo "---- lextendno: $lextendno ----" >> /tmp/webs_upgrade.log
nvram set webs_state_flag=1 # Do upgrade
fi
fi
fi
fi
nvram set webs_state_update=1
|
megraf/asuswrt-merlin
|
release/src/router/rom/webs_scripts/ssl_webs_update.sh
|
Shell
|
gpl-2.0
| 3,800 |
#!/bin/sh
RAM_ROOT=/tmp/root
[ -x /usr/bin/ldd ] || ldd() { LD_TRACE_LOADED_OBJECTS=1 $*; }
libs() { ldd $* 2>/dev/null | sed -r 's/(.* => )?(.*) .*/\2/'; }
install_file() { # <file> [ <file> ... ]
for file in "$@"; do
dest="$RAM_ROOT/$file"
[ -f $file -a ! -f $dest ] && {
dir="$(dirname $dest)"
mkdir -p "$dir"
cp $file $dest
}
done
}
install_bin() { # <file> [ <symlink> ... ]
src=$1
files=$1
[ -x "$src" ] && files="$src $(libs $src)"
install_file $files
shift
for link in "$@"; do {
dest="$RAM_ROOT/$link"
dir="$(dirname $dest)"
mkdir -p "$dir"
[ -f "$dest" ] || ln -s $src $dest
}; done
}
supivot() { # <new_root> <old_root>
/bin/mount | grep "on $1 type" 2>&- 1>&- || /bin/mount -o bind $1 $1
mkdir -p $1$2 $1/proc $1/sys $1/dev $1/tmp $1/overlay && \
/bin/mount -o noatime,move /proc $1/proc && \
pivot_root $1 $1$2 || {
/bin/umount -l $1 $1
return 1
}
/bin/mount -o noatime,move $2/sys /sys
/bin/mount -o noatime,move $2/dev /dev
/bin/mount -o noatime,move $2/tmp /tmp
/bin/mount -o noatime,move $2/overlay /overlay 2>&-
return 0
}
run_ramfs() { # <command> [...]
install_bin /bin/busybox /bin/ash /bin/sh /bin/mount /bin/umount \
/sbin/pivot_root /sbin/reboot /bin/sync /bin/dd /bin/grep \
/bin/cp /bin/mv /bin/tar /usr/bin/md5sum "/usr/bin/[" /bin/dd \
/bin/vi /bin/ls /bin/cat /usr/bin/awk /usr/bin/hexdump \
/bin/sleep /bin/zcat /usr/bin/bzcat /usr/bin/printf /usr/bin/wc \
/bin/cut /usr/bin/printf /bin/sync /bin/mkdir /bin/rmdir \
/bin/rm /usr/bin/basename /bin/kill /bin/chmod /usr/bin/find
install_bin /bin/uclient-fetch /bin/wget
install_bin /sbin/mtd
install_bin /sbin/mount_root
install_bin /sbin/snapshot
install_bin /sbin/snapshot_tool
install_bin /usr/sbin/ubiupdatevol
install_bin /usr/sbin/ubiattach
install_bin /usr/sbin/ubiblock
install_bin /usr/sbin/ubiformat
install_bin /usr/sbin/ubidetach
install_bin /usr/sbin/ubirsvol
install_bin /usr/sbin/ubirmvol
install_bin /usr/sbin/ubimkvol
install_bin /usr/sbin/partx
for file in $RAMFS_COPY_BIN; do
install_bin ${file//:/ }
done
install_file /etc/resolv.conf /lib/*.sh /lib/functions/*.sh /lib/upgrade/*.sh $RAMFS_COPY_DATA
[ -L "/lib64" ] && ln -s /lib $RAM_ROOT/lib64
supivot $RAM_ROOT /mnt || {
echo "Failed to switch over to ramfs. Please reboot."
exit 1
}
/bin/mount -o remount,ro /mnt
/bin/umount -l /mnt
grep /overlay /proc/mounts > /dev/null && {
/bin/mount -o noatime,remount,ro /overlay
/bin/umount -l /overlay
}
# spawn a new shell from ramdisk to reduce the probability of cache issues
exec /bin/busybox ash -c "$*"
}
kill_remaining() { # [ <signal> ]
local sig="${1:-TERM}"
echo -n "Sending $sig to remaining processes ... "
local my_pid=$$
local my_ppid=$(cut -d' ' -f4 /proc/$my_pid/stat)
local my_ppisupgraded=
grep -q upgraded /proc/$my_ppid/cmdline >/dev/null && {
local my_ppisupgraded=1
}
local stat
for stat in /proc/[0-9]*/stat; do
[ -f "$stat" ] || continue
local pid name state ppid rest
read pid name state ppid rest < $stat
name="${name#(}"; name="${name%)}"
local cmdline
read cmdline < /proc/$pid/cmdline
# Skip kernel threads
[ -n "$cmdline" ] || continue
if [ $$ -eq 1 ] || [ $my_ppid -eq 1 ] && [ -n "$my_ppisupgraded" ]; then
# Running as init process, kill everything except me
if [ $pid -ne $$ ] && [ $pid -ne $my_ppid ]; then
echo -n "$name "
kill -$sig $pid 2>/dev/null
fi
else
case "$name" in
# Skip essential services
*procd*|*ash*|*init*|*watchdog*|*ssh*|*dropbear*|*telnet*|*login*|*hostapd*|*wpa_supplicant*|*nas*|*relayd*) : ;;
# Killable process
*)
if [ $pid -ne $$ ] && [ $ppid -ne $$ ]; then
echo -n "$name "
kill -$sig $pid 2>/dev/null
fi
;;
esac
fi
done
echo ""
}
run_hooks() {
local arg="$1"; shift
for func in "$@"; do
eval "$func $arg"
done
}
ask_bool() {
local default="$1"; shift;
local answer="$default"
[ "$INTERACTIVE" -eq 1 ] && {
case "$default" in
0) echo -n "$* (y/N): ";;
*) echo -n "$* (Y/n): ";;
esac
read answer
case "$answer" in
y*) answer=1;;
n*) answer=0;;
*) answer="$default";;
esac
}
[ "$answer" -gt 0 ]
}
v() {
[ "$VERBOSE" -ge 1 ] && echo "$@"
}
rootfs_type() {
/bin/mount | awk '($3 ~ /^\/$/) && ($5 !~ /rootfs/) { print $5 }'
}
get_image() { # <source> [ <command> ]
local from="$1"
local conc="$2"
local cmd
case "$from" in
http://*|ftp://*) cmd="wget -O- -q";;
*) cmd="cat";;
esac
if [ -z "$conc" ]; then
local magic="$(eval $cmd \"$from\" 2>/dev/null | dd bs=2 count=1 2>/dev/null | hexdump -n 2 -e '1/1 "%02x"')"
case "$magic" in
1f8b) conc="zcat";;
425a) conc="bzcat";;
esac
fi
eval "$cmd \"$from\" 2>/dev/null ${conc:+| $conc}"
}
get_magic_word() {
(get_image "$@" | dd bs=2 count=1 | hexdump -v -n 2 -e '1/1 "%02x"') 2>/dev/null
}
get_magic_long() {
(get_image "$@" | dd bs=4 count=1 | hexdump -v -n 4 -e '1/1 "%02x"') 2>/dev/null
}
jffs2_copy_config() {
if grep rootfs_data /proc/mtd >/dev/null; then
# squashfs+jffs2
mtd -e rootfs_data jffs2write "$CONF_TAR" rootfs_data
else
# jffs2
mtd jffs2write "$CONF_TAR" rootfs
fi
}
# Flash firmware to MTD partition
#
# $(1): path to image
# $(2): (optional) pipe command to extract firmware, e.g. dd bs=n skip=m
default_do_upgrade() {
sync
if [ "$SAVE_CONFIG" -eq 1 ]; then
get_image "$1" "$2" | mtd $MTD_CONFIG_ARGS -j "$CONF_TAR" write - "${PART_NAME:-image}"
else
get_image "$1" "$2" | mtd write - "${PART_NAME:-image}"
fi
}
do_upgrade() {
v "Performing system upgrade..."
if type 'platform_do_upgrade' >/dev/null 2>/dev/null; then
platform_do_upgrade "$ARGV"
else
default_do_upgrade "$ARGV"
fi
if [ "$SAVE_CONFIG" -eq 1 ] && type 'platform_copy_config' >/dev/null 2>/dev/null; then
platform_copy_config
fi
v "Upgrade completed"
[ -n "$DELAY" ] && sleep "$DELAY"
ask_bool 1 "Reboot" && {
v "Rebooting system..."
umount -a
reboot -f
sleep 5
echo b 2>/dev/null >/proc/sysrq-trigger
}
}
|
Artox/lede-project
|
package/base-files/files/lib/upgrade/common.sh
|
Shell
|
gpl-2.0
| 6,034 |
#!/bin/sh
# business-process.sh
# ----------------------------------------------------------------------------
# Copyright 2016 WSO2, Inc. http://www.wso2.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cygwin=false;
darwin=false;
os400=false;
mingw=false;
case "`uname`" in
CYGWIN*) cygwin=true;;
MINGW*) mingw=true;;
OS400*) os400=true;;
Darwin*) darwin=true
if [ -z "$JAVA_VERSION" ] ; then
JAVA_VERSION="CurrentJDK"
else
echo "Using Java version: $JAVA_VERSION"
fi
if [ -z "$JAVA_HOME" ] ; then
JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/${JAVA_VERSION}/Home
fi
;;
esac
# resolve links - $0 may be a softlink
PRG="$0"
while [ -h "$PRG" ]; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '.*/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
# Get standard environment variables
PRGDIR=`dirname "$PRG"`
# Only set CARBON_HOME if not already set
[ -z "$CARBON_HOME" ] && CARBON_HOME=`cd "$PRGDIR/.." ; pwd`
###########################################################################
NAME=start-bps
# Daemon name, where is the actual executable
BPS_INIT_SCRIPT="$CARBON_HOME/wso2/business-process/bin/wso2server.sh"
# If the daemon is not there, then exit.
sh $BPS_INIT_SCRIPT $* &
trap "sh $BPS_INIT_SCRIPT stop; exit;" INT TERM
while :
do
sleep 60
done
|
wso2/product-ei
|
distribution/src/scripts/business-process.sh
|
Shell
|
apache-2.0
| 1,978 |
#!/bin/bash
#######################################################################
#
# Linux on Hyper-V and Azure Test Code, ver. 1.0.0
# Copyright (c) Microsoft Corporation
#
# All rights reserved.
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
# ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR
# PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
#
#######################################################################
#######################################################################
#
# perf_redis.sh
# Author : SHITAL SAVEKAR <[email protected]>
#
# Description:
# Download and run redis benchmark tests.
# This script needs to be run on client VM.
#
# Supported Distros:
# Ubuntu 16.04
#######################################################################
CONSTANTS_FILE="./constants.sh"
ICA_TESTRUNNING="TestRunning" # The test is running
ICA_TESTCOMPLETED="TestCompleted" # The test completed successfully
ICA_TESTABORTED="TestAborted" # Error during the setup of the test
ICA_TESTFAILED="TestFailed" # Error occurred during the test
touch ./redisTest.log
ConfigureRedisUbuntu()
{
LogMsg "Configuring ${1} for redis test..."
ssh ${1} "apt-get update"
ssh ${1} "apt-get -y install libaio1 sysstat gcc"
ssh ${1} "wget http://download.redis.io/releases/redis-${redisVersion}.tar.gz"
ssh ${1} "tar -xvf redis-${redisVersion}.tar.gz && cd redis-${redisVersion}/ && make && make install"
ssh ${1} "cp -ar redis-${redisVersion}/src/* ."
LogMsg "${1} configured for Redis."
}
LogMsg()
{
echo `date "+%b %d %Y %T"` : "${1}" # Add the time stamp to the log message
echo `date "+%b %d %Y %T"` : "${1}" >> ./redisTest.log
}
UpdateTestState()
{
LogMsg "${1}" > ./state.txt
}
if [ -e ${CONSTANTS_FILE} ]; then
source ${CONSTANTS_FILE}
else
errMsg="Error: missing ${CONSTANTS_FILE} file"
LogMsg "${errMsg}"
UpdateTestState $ICA_TESTABORTED
exit 10
fi
if [ ! ${server} ]; then
errMsg="Please add/provide value for server in constants.sh. server=<server ip>"
LogMsg "${errMsg}"
LogMsg "${errMsg}" >> ./summary.log
UpdateTestState $ICA_TESTABORTED
exit 1
fi
LogMsg "Server=${server}"
if [ ! ${client} ]; then
errMsg="Please add/provide value for client in constants.sh. client=<client ip>"
LogMsg "${errMsg}"
LogMsg "${errMsg}" >> ./summary.log
UpdateTestState $ICA_TESTABORTED
exit 1
fi
LogMsg "Client=${client}"
if [ ! ${test_pipeline_collection} ]; then
errMsg="Please add/provide value for test_pipeline_collection in constants.sh. test_pipeline_collection=(1 2 4 8 16)"
LogMsg "${errMsg}"
LogMsg "${errMsg}" >> ./summary.log
UpdateTestState $ICA_TESTABORTED
exit 1
fi
LogMsg "test_pipeline_collection=${test_pipeline_collection}"
if [ ! ${redisVersion} ]; then
errMsg="Please add/provide value for redisVersion in constants.sh. redisVersion=2.8.17"
LogMsg "${errMsg}"
LogMsg "${errMsg}" >> ./summary.log
UpdateTestState $ICA_TESTABORTED
exit 1
fi
LogMsg "redisVersion=${redisVersion}"
if [ ! ${redis_test_suites} ]; then
errMsg="Please add/provide value for redis_test_suites in constants.sh. redis_test_suites=get,set"
LogMsg "${errMsg}"
LogMsg "${errMsg}" >> ./summary.log
UpdateTestState $ICA_TESTABORTED
exit 1
fi
#Make & build Redis on client and server Machine
LogMsg "Configuring client ${client}..."
ConfigureRedisUbuntu ${client}
LogMsg "Configuring server ${server}..."
ConfigureRedisUbuntu ${server}
pkill -f redis-benchmark
ssh root@${server} pkill -f redis-server > /dev/null
t=0
while [ "x${test_pipeline_collection[$t]}" != "x" ]
do
pipelines=${test_pipeline_collection[$t]}
LogMsg "NEXT TEST: $pipelines pipelines"
# prepare running redis-server
LogMsg "Starting redis-server..."
ssh root@${server} "sar -n DEV 1 900" 2>&1 > redis-server-pipelines-${pipelines}.sar.netio.log &
ssh root@${server} "iostat -x -d 1 900" 2>&1 > redis-server-pipelines-${pipelines}.iostat.diskio.log &
ssh root@${server} "vmstat 1 900" 2>&1 > redis-server-pipelines-${pipelines}.vmstat.memory.cpu.log &
#start running the redis-server on server
ssh root@${server} "./redis-server > /dev/null &"
LogMsg "Server started successfully. Sleeping 10 Secondss.."
sleep 10
# prepare running redis-benchmark
sar -n DEV 1 900 2>&1 > redis-client-pipelines-${pipelines}.sar.netio.log &
iostat -x -d 1 900 2>&1 > redis-client-pipelines-${pipelines}.iostat.diskio.log &
vmstat 1 900 2>&1 > redis-client-pipelines-${pipelines}.vmstat.memory.cpu.log &
#start running the redis-benchmark on client
LogMsg "Starting redis-benchmark on client..."
LogMsg "-> Test running with ${pipelines} pipelines."
./redis-benchmark -h $server -c 1000 -P $pipelines -t $redis_test_suites -d 4000 -n 10000000 > redis-client-pipelines-${pipelines}.set.get.log
LogMsg "-> done"
#cleanup redis-server
LogMsg "Cleaning Server..."
ssh root@${server} pkill -f sar 2>&1 > /dev/null
ssh root@${server} pkill -f iostat 2>&1 > /dev/null
ssh root@${server} pkill -f vmstat 2>&1 > /dev/null
ssh root@${server} pkill -f redis-server 2>&1 > /dev/null
#cleanup redis-benchmark
LogMsg "Cleaning Client..."
pkill -f sar 2>&1 > /dev/null
pkill -f iostat 2>&1 > /dev/null
pkill -f vmstat 2>&1 > /dev/null
pkill -f redis-benchmark 2>&1 > /dev/null
t=$(($t + 1))
LogMsg "Sleeping 30 Seconds..."
sleep 30
done
UpdateTestState ICA_TESTCOMPLETED
|
Azure/azure-linux-automation
|
remote-scripts/perf_redis.sh
|
Shell
|
apache-2.0
| 6,283 |
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012 Vincent C. Passaro ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
#!/bin/bash
######################################################################
#By Tummy a.k.a Vincent C. Passaro #
#Vincent[.]Passaro[@]gmail[.]com #
#www.vincentpassaro.com #
######################################################################
#_____________________________________________________________________
#| Version | Change Information | Author | Date |
#|__________|_______________________|____________________|____________|
#| 1.0 | Initial Script | Vincent C. Passaro | 20-oct-2011|
#| | Creation | | |
#|__________|_______________________|____________________|____________|
#
# 1 May 2012 / Lee Kinser / [email protected]: corrected erroneous
# null redirect
#######################DISA INFORMATION###############################
#Group ID (Vulid): V-22362
#Group Title: GEN001890
#Rule ID: SV-26482r1_rule
#Severity: CAT II
#Rule Version (STIG-ID): GEN001890
#Rule Title: Local initialization files must not have extended ACLs.
#
#Vulnerability Discussion: Local initialization files are used to
#configure the user's shell environment upon login. Malicious
#modification of these files could compromise accounts upon logon.
#
#Responsibility: System Administrator
#IAControls: ECLP-1
#
#Check Content:
#Check user home directories for local initialization files that have
#extended ACLs.
# cut -d : -f 6 /etc/passwd | xargs -n1 -IDIR ls -alL DIR/.login
#DIR/.cschrc DIR/.logout DIR/.profile DIR/.bash_profile DIR/.bashrc
#DIR/.bash_logout DIR/.env DIR/.dtprofile DIR/.dispatch DIR/.emacs DIR/.exrc
#If the permissions include a '+', the file has an extended ACL,
#this is a finding.
#
#Fix Text: Remove the extended ACL from the file.
# setfacl --remove-all <local initialization file with extended ACL>
#######################DISA INFORMATION###############################
#Global Variables#
PDI=GEN001890
HOMEDIR=$( cut -d : -f 6 /etc/passwd | grep -v "^/$" )
BADFACL=$( for line in $HOMEDIR; do getfacl --absolute-names --skip-base $line/.login $line/.cschr $line/.logout $line/.profile $line/.bash_profile $line/.bashrc $line/.bash_logout $line/.env $line/.dtprofile $line/.dispatch $line/.emacs $line/.exrc 2>/dev/null | grep "# file:" | cut -d ":" -f 2; done )
#Start-Lockdown
for file in $BADFACL
do
setfacl --remove-all $file
done
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5-beta/prod/GEN001890.sh
|
Shell
|
apache-2.0
| 3,216 |
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012 Vincent C. Passaro ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
#!/bin/bash
######################################################################
#By Tummy a.k.a Vincent C. Passaro #
#Vincent[.]Passaro[@]gmail[.]com #
#www.vincentpassaro.com #
######################################################################
#_____________________________________________________________________
#| Version | Change Information | Author | Date |
#|__________|_______________________|____________________|____________|
#| 1.0 | Initial Script | Vincent C. Passaro | 20-oct-2011|
#| | Creation | | |
#|__________|_______________________|____________________|____________|
#
#
# - updated by Shannon Mitchell([email protected])
# on 23-jan-2012 to add a check to only lock non-root system accounts if
# they are not currently locked.
#######################DISA INFORMATION###############################
#Group ID (Vulid): V-810
#Group Title: Disabled default system accounts
#Rule ID: SV-810r7_rule
#Severity: CAT II
#Rule Version (STIG-ID): GEN002640
#Rule Title: Default system accounts must be disabled or removed.
#
#Vulnerability Discussion: Vendor accounts and software may contain
#backdoors that will allow unauthorized access to the system. These
#backdoors are common knowledge and present a threat to system security
#if the account is not disabled.
#
#Responsibility: System Administrator
#IAControls: IAAC-1
#
#Check Content:
#Determine if default system accounts (such as those for sys, bin, uucp,
#nuucp, daemon, smtp) have been disabled.
# cat /etc/shadow
#If an account's password field is "*", "*LK*", or is prefixed with a '!',
#the account is locked or disabled.
#If there are any default system accounts that are not locked this is a
#finding.
#
#Fix Text: Lock the default system account(s).
# passwd -l <user>
#######################DISA INFORMATION###############################
#Global Variables#
PDI=GEN002640
#Start-Lockdown
for NAME in `awk -F':' '!/^root/{if($3 < 500)print $1}' /etc/passwd`
do
egrep "^${NAME}:(\!|\*|\*LK\*)" /etc/shadow > /dev/null
if [ $? -ne 0 ]
then
usermod -L $NAME
fi
done
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5-beta/prod/GEN002640.sh
|
Shell
|
apache-2.0
| 3,031 |
#!/bin/sh
# $FreeBSD$
. `dirname $0`/conf.sh
echo "1..5"
balance="split"
ddbs=8192
nblocks1=1024
nblocks2=`expr $nblocks1 / \( $ddbs / 512 \)`
src=`mktemp $base.XXXXXX` || exit 1
dst=`mktemp $base.XXXXXX` || exit 1
dd if=/dev/random of=${src} bs=$ddbs count=$nblocks2 >/dev/null 2>&1
us0=$(attach_md -t malloc -s `expr $nblocks1 + 1`) || exit 1
us1=$(attach_md -t malloc -s `expr $nblocks1 + 1`) || exit 1
us2=$(attach_md -t malloc -s `expr $nblocks1 + 1`) || exit 1
gmirror label -b $balance -s `expr $ddbs / 2` $name /dev/${us0} /dev/${us1} /dev/${us2} || exit 1
devwait
dd if=${src} of=/dev/mirror/${name} bs=$ddbs count=$nblocks2 >/dev/null 2>&1
dd if=/dev/mirror/${name} of=${dst} bs=$ddbs count=$nblocks2 >/dev/null 2>&1
if [ `md5 -q ${src}` != `md5 -q ${dst}` ]; then
echo "not ok 1"
else
echo "ok 1"
fi
gmirror remove $name ${us0}
dd if=/dev/mirror/${name} of=${dst} bs=$ddbs count=$nblocks2 >/dev/null 2>&1
if [ `md5 -q ${src}` != `md5 -q ${dst}` ]; then
echo "not ok 2"
else
echo "ok 2"
fi
gmirror remove $name ${us1}
dd if=/dev/mirror/${name} of=${dst} bs=$ddbs count=$nblocks2 >/dev/null 2>&1
if [ `md5 -q ${src}` != `md5 -q ${dst}` ]; then
echo "not ok 3"
else
echo "ok 3"
fi
gmirror remove $name ${us2}
dd if=/dev/mirror/${name} of=${dst} bs=$ddbs count=$nblocks2 >/dev/null 2>&1
if [ `md5 -q ${src}` != `md5 -q ${dst}` ]; then
echo "not ok 4"
else
echo "ok 4"
fi
# mirror/${name} should be removed.
if [ -c /dev/${name} ]; then
echo "not ok 5"
else
echo "ok 5"
fi
rm -f ${src} ${dst}
|
TigerBSD/TigerBSD
|
FreeBSD/tests/sys/geom/class/mirror/5_test.sh
|
Shell
|
isc
| 1,522 |
#!/bin/zsh
# WARP DIRECTORY
# ==============
# Jump to custom directories in terminal
# because `cd` takes too long...
#
# @github.com/mfaerevaag/wd
# version
readonly WD_VERSION=0.5.0
# colors
readonly WD_BLUE="\033[96m"
readonly WD_GREEN="\033[92m"
readonly WD_YELLOW="\033[93m"
readonly WD_RED="\033[91m"
readonly WD_NOC="\033[m"
## functions
# helpers
wd_yesorno()
{
# variables
local question="${1}"
local prompt="${question} "
local yes_RETVAL="0"
local no_RETVAL="3"
local RETVAL=""
local answer=""
# read-eval loop
while true ; do
printf $prompt
read -r answer
case ${answer:=${default}} in
"Y"|"y"|"YES"|"yes"|"Yes" )
RETVAL=${yes_RETVAL} && \
break
;;
"N"|"n"|"NO"|"no"|"No" )
RETVAL=${no_RETVAL} && \
break
;;
* )
echo "Please provide a valid answer (y or n)"
;;
esac
done
return ${RETVAL}
}
wd_print_msg()
{
if [[ -z $wd_quiet_mode ]]
then
local color=$1
local msg=$2
if [[ $color == "" || $msg == "" ]]
then
print " ${WD_RED}*${WD_NOC} Could not print message. Sorry!"
else
print " ${color}*${WD_NOC} ${msg}"
fi
fi
}
wd_print_usage()
{
cat <<- EOF
Usage: wd [command] [point]
Commands:
<point> Warps to the directory specified by the warp point
<point> <path> Warps to the directory specified by the warp point with path appended
add <point> Adds the current working directory to your warp points
add Adds the current working directory to your warp points with current directory's name
rm <point> Removes the given warp point
rm Removes the given warp point with current directory's name
show <point> Print path to given warp point
show Print warp points to current directory
list Print all stored warp points
ls <point> Show files from given warp point (ls)
path <point> Show the path to given warp point (pwd)
clean Remove points warping to nonexistent directories (will prompt unless --force is used)
-v | --version Print version
-d | --debug Exit after execution with exit codes (for testing)
-c | --config Specify config file (default ~/.warprc)
-q | --quiet Suppress all output
-f | --force Allows overwriting without warning (for add & clean)
help Show this extremely helpful text
EOF
}
wd_exit_fail()
{
local msg=$1
wd_print_msg "$WD_RED" "$msg"
WD_EXIT_CODE=1
}
wd_exit_warn()
{
local msg=$1
wd_print_msg "$WD_YELLOW" "$msg"
WD_EXIT_CODE=1
}
wd_getdir()
{
local name_arg=$1
point=$(wd_show "$name_arg")
dir=${point:28+$#name_arg+7}
if [[ -z $name_arg ]]; then
wd_exit_fail "You must enter a warp point"
break
elif [[ -z $dir ]]; then
wd_exit_fail "Unknown warp point '${name_arg}'"
break
fi
}
# core
wd_warp()
{
local point=$1
local sub=$2
if [[ $point =~ "^\.+$" ]]
then
if [[ $#1 < 2 ]]
then
wd_exit_warn "Warping to current directory?"
else
(( n = $#1 - 1 ))
cd -$n > /dev/null
fi
elif [[ ${points[$point]} != "" ]]
then
if [[ $sub != "" ]]
then
cd ${points[$point]/#\~/$HOME}/$sub
else
cd ${points[$point]/#\~/$HOME}
fi
else
wd_exit_fail "Unknown warp point '${point}'"
fi
}
wd_add()
{
local point=$1
local force=$2
if [[ $point == "" ]]
then
point=$(basename "$PWD")
fi
if [[ $point =~ "^[\.]+$" ]]
then
wd_exit_fail "Warp point cannot be just dots"
elif [[ $point =~ "[[:space:]]+" ]]
then
wd_exit_fail "Warp point should not contain whitespace"
elif [[ $point == *:* ]]
then
wd_exit_fail "Warp point cannot contain colons"
elif [[ ${points[$point]} == "" ]] || [ ! -z "$force" ]
then
wd_remove "$point" > /dev/null
printf "%q:%s\n" "${point}" "${PWD/#$HOME/~}" >> "$WD_CONFIG"
if (whence sort >/dev/null); then
local config_tmp=$(mktemp "${TMPDIR:-/tmp}/wd.XXXXXXXXXX")
# use 'cat' below to ensure we respect $WD_CONFIG as a symlink
sort -o "${config_tmp}" "$WD_CONFIG" && cat "${config_tmp}" > "$WD_CONFIG" && rm "${config_tmp}"
fi
wd_export_static_named_directories
wd_print_msg "$WD_GREEN" "Warp point added"
# override exit code in case wd_remove did not remove any points
# TODO: we should handle this kind of logic better
WD_EXIT_CODE=0
else
wd_exit_warn "Warp point '${point}' already exists. Use 'add --force' to overwrite."
fi
}
wd_remove()
{
local point_list=$1
if [[ "$point_list" == "" ]]
then
point_list=$(basename "$PWD")
fi
for point_name in $point_list ; do
if [[ ${points[$point_name]} != "" ]]
then
local config_tmp=$(mktemp "${TMPDIR:-/tmp}/wd.XXXXXXXXXX")
# Copy and delete in two steps in order to preserve symlinks
if sed -n "/^${point_name}:.*$/!p" "$WD_CONFIG" > "$config_tmp" && command cp "$config_tmp" "$WD_CONFIG" && command rm "$config_tmp"
then
wd_print_msg "$WD_GREEN" "Warp point removed"
else
wd_exit_fail "Something bad happened! Sorry."
fi
else
wd_exit_fail "Warp point was not found"
fi
done
}
wd_list_all()
{
wd_print_msg "$WD_BLUE" "All warp points:"
entries=$(sed "s:${HOME}:~:g" "$WD_CONFIG")
max_warp_point_length=0
while IFS= read -r line
do
arr=(${(s,:,)line})
key=${arr[1]}
length=${#key}
if [[ length -gt max_warp_point_length ]]
then
max_warp_point_length=$length
fi
done <<< "$entries"
while IFS= read -r line
do
if [[ $line != "" ]]
then
arr=(${(s,:,)line})
key=${arr[1]}
val=${arr[2]}
if [[ -z $wd_quiet_mode ]]
then
printf "%${max_warp_point_length}s -> %s\n" "$key" "$val"
fi
fi
done <<< "$entries"
}
wd_ls()
{
wd_getdir "$1"
ls "${dir/#\~/$HOME}"
}
wd_path()
{
wd_getdir "$1"
echo "$(echo "$dir" | sed "s:${HOME}:~:g")"
}
wd_show()
{
local name_arg=$1
# if there's an argument we look up the value
if [[ -n $name_arg ]]
then
if [[ -z $points[$name_arg] ]]
then
wd_print_msg "$WD_BLUE" "No warp point named $name_arg"
else
wd_print_msg "$WD_GREEN" "Warp point: ${WD_GREEN}$name_arg${WD_NOC} -> $points[$name_arg]"
fi
else
# hax to create a local empty array
local wd_matches
wd_matches=()
# do a reverse lookup to check whether PWD is in $points
PWD="${PWD/$HOME/~}"
if [[ ${points[(r)$PWD]} == "$PWD" ]]
then
for name in ${(k)points}
do
if [[ $points[$name] == "$PWD" ]]
then
wd_matches[$(($#wd_matches+1))]=$name
fi
done
wd_print_msg "$WD_BLUE" "$#wd_matches warp point(s) to current directory: ${WD_GREEN}$wd_matches${WD_NOC}"
else
wd_print_msg "$WD_YELLOW" "No warp point to $(echo "$PWD" | sed "s:$HOME:~:")"
fi
fi
}
wd_clean() {
local force=$1
local count=0
local wd_tmp=""
while read -r line
do
if [[ $line != "" ]]
then
arr=(${(s,:,)line})
key=${arr[1]}
val=${arr[2]}
if [ -d "${val/#\~/$HOME}" ]
then
wd_tmp=$wd_tmp"\n"`echo "$line"`
else
wd_print_msg "$WD_YELLOW" "Nonexistent directory: ${key} -> ${val}"
count=$((count+1))
fi
fi
done < "$WD_CONFIG"
if [[ $count -eq 0 ]]
then
wd_print_msg "$WD_BLUE" "No warp points to clean, carry on!"
else
if [ ! -z "$force" ] || wd_yesorno "Removing ${count} warp points. Continue? (y/n)"
then
echo "$wd_tmp" >! "$WD_CONFIG"
wd_print_msg "$WD_GREEN" "Cleanup complete. ${count} warp point(s) removed"
else
wd_print_msg "$WD_BLUE" "Cleanup aborted"
fi
fi
}
wd_export_static_named_directories() {
if [[ ! -z $WD_EXPORT ]]
then
command grep '^[0-9a-zA-Z_-]\+:' "$WD_CONFIG" | sed -e "s,~,$HOME," -e 's/:/=/' | while read -r warpdir ; do
hash -d "$warpdir"
done
fi
}
local WD_CONFIG=${WD_CONFIG:-$HOME/.warprc}
local WD_QUIET=0
local WD_EXIT_CODE=0
local WD_DEBUG=0
# Parse 'meta' options first to avoid the need to have them before
# other commands. The `-D` flag consumes recognized options so that
# the actual command parsing won't be affected.
zparseopts -D -E \
c:=wd_alt_config -config:=wd_alt_config \
q=wd_quiet_mode -quiet=wd_quiet_mode \
v=wd_print_version -version=wd_print_version \
d=wd_debug_mode -debug=wd_debug_mode \
f=wd_force_mode -force=wd_force_mode
if [[ ! -z $wd_print_version ]]
then
echo "wd version $WD_VERSION"
fi
if [[ ! -z $wd_alt_config ]]
then
WD_CONFIG=$wd_alt_config[2]
fi
# check if config file exists
if [ ! -e "$WD_CONFIG" ]
then
# if not, create config file
touch "$WD_CONFIG"
else
wd_export_static_named_directories
fi
# load warp points
typeset -A points
while read -r line
do
arr=(${(s,:,)line})
key=${arr[1]}
# join the rest, in case the path contains colons
val=${(j,:,)arr[2,-1]}
points[$key]=$val
done < "$WD_CONFIG"
# get opts
args=$(getopt -o a:r:c:lhs -l add:,rm:,clean,list,ls:,path:,help,show -- $*)
# check if no arguments were given, and that version is not set
if [[ ($? -ne 0 || $#* -eq 0) && -z $wd_print_version ]]
then
wd_print_usage
# check if config file is writeable
elif [ ! -w "$WD_CONFIG" ]
then
# do nothing
# can't run `exit`, as this would exit the executing shell
wd_exit_fail "\'$WD_CONFIG\' is not writeable."
else
# parse rest of options
local wd_o
for wd_o
do
case "$wd_o"
in
"-a"|"--add"|"add")
wd_add "$2" "$wd_force_mode"
break
;;
"-e"|"export")
wd_export_static_named_directories
break
;;
"-r"|"--remove"|"rm")
# Passes all the arguments as a single string separated by whitespace to wd_remove
wd_remove "${@:2}"
break
;;
"-l"|"list")
wd_list_all
break
;;
"-ls"|"ls")
wd_ls "$2"
break
;;
"-p"|"--path"|"path")
wd_path "$2"
break
;;
"-h"|"--help"|"help")
wd_print_usage
break
;;
"-s"|"--show"|"show")
wd_show "$2"
break
;;
"-c"|"--clean"|"clean")
wd_clean "$wd_force_mode"
break
;;
*)
wd_warp "$wd_o" "$2"
break
;;
--)
break
;;
esac
done
fi
## garbage collection
# if not, next time warp will pick up variables from this run
# remember, there's no sub shell
unset wd_warp
unset wd_add
unset wd_remove
unset wd_show
unset wd_list_all
unset wd_print_msg
unset wd_yesorno
unset wd_print_usage
unset wd_alt_config
unset wd_quiet_mode
unset wd_print_version
unset wd_export_static_named_directories
unset wd_o
unset args
unset points
unset val &> /dev/null # fixes issue #1
if [[ -n $wd_debug_mode ]]
then
exit $WD_EXIT_CODE
else
unset wd_debug_mode
fi
|
mbologna/oh-my-zsh
|
plugins/wd/wd.sh
|
Shell
|
mit
| 12,253 |
#!/bin/sh
# Copyright (C) 2010-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Removal recovery rules for AC_CONFIG_HEADERS should not remove files
# with 'make -n'.
. test-init.sh
cat >>configure.ac <<'EOF'
AC_CONFIG_HEADERS([config.h])
AC_OUTPUT
EOF
: >Makefile.am
$ACLOCAL
$AUTOCONF
$AUTOHEADER
$AUTOMAKE
./configure
$MAKE
rm -f config.h
$MAKE -n
test -f stamp-h1
test ! -e config.h
:
|
DDTChen/CookieVLC
|
vlc/extras/tools/automake/t/autohdrdry.sh
|
Shell
|
gpl-2.0
| 1,007 |
#!/bin/bash
/usr/bin/env php ../composer.phar global require "fxp/composer-asset-plugin:~1.0.0"
/usr/bin/env php ../composer.phar install --prefer-dist --optimize-autoloader
|
rinodung/yii2-shop-cms
|
install.sh
|
Shell
|
gpl-3.0
| 174 |
#!/bin/sh
#
# Copyright 2004-2006 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Simple installation script for the dtn reference implementation
#
echo "***"
echo "*** Installing dtn..."
echo "***"
echo ""
if [ ! -f daemon/dtnd ] ; then
echo "This script must be run from the top level DTN2 directory"
exit 1
fi
#
# Select and create a user account
#
echo -n "user account to use for the dtn daemon [dtn]: "
read DTN_USER
[ "$DTN_USER" = "" ] && DTN_USER=dtn
grep $DTN_USER /etc/passwd >/dev/null 2> /dev/null
if [ ! $? = 0 ]; then
echo -n "create account for $DTN_USER? [y]: "
read y
if [ "$y" = "" -o "$y" = "y" -o "$y" = "yes" ]; then
echo "creating account for $DTN_USER..."
adduser $DTN_USER
else
echo "can't find account for $DTN_USER... please create and re-run install"
exit 1
fi
fi
#
# Now run the makefile rule to do the real installation
#
echo "installing files"
make install
echo "running dtn daemon to create initial database..."
/usr/bin/dtnd --init-db
echo "installation complete."
|
LeoIannacone/dtn
|
tools/install.sh
|
Shell
|
apache-2.0
| 1,614 |
#!/usr/bin/env bash
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
set -e
set -x
N_JOBS=$(sysctl -n hw.ncpu)
N_JOBS=$((N_JOBS+1))
echo ""
echo "Bazel will use ${N_JOBS} concurrent job(s)."
echo ""
# Run configure.
export TF_NEED_CUDA=0
export CC_OPT_FLAGS='-mavx'
export PYTHON_BIN_PATH=$(which python2)
yes "" | $PYTHON_BIN_PATH configure.py
which bazel
bazel test --test_tag_filters=-no_oss,-gpu,-benchmark-test,-nomac \
--test_timeout 300,450,1200,3600 \
--test_size_filters=small,medium --config=opt \
--jobs=${N_JOBS} --build_tests_only --test_output=errors -k -- \
//tensorflow/contrib/... -//tensorflow/contrib/lite/...
|
rabipanda/tensorflow
|
tensorflow/tools/ci_build/osx/cpu/run_contrib.sh
|
Shell
|
apache-2.0
| 1,291 |
#! /bin/sh
# $Id: nasm_test.sh 1137 2004-09-04 01:24:57Z peter $
${srcdir}/out_test.sh nasm_test modules/parsers/nasm/tests "nasm-compat parser" "-f bin" ""
exit $?
|
roisagiv/webrtc-ios
|
third_party/yasm/source/patched-yasm/modules/parsers/nasm/tests/nasm_test.sh
|
Shell
|
bsd-3-clause
| 165 |
#!/bin/sh
git shortlog --format="%s@@@%H@@@%h@@@" --no-merges $1 | perl release_notes_filter.pl
|
phillipross/pgjdbc
|
release_notes.sh
|
Shell
|
bsd-3-clause
| 97 |
#! /bin/sh
#$PREPARETIPS > tips.cpp
$EXTRACTRC *.rc `find . -name \*.ui` *.kcfg >> ./rc.cpp || exit 11
$XGETTEXT `find . -name \*.h -o -name \*.cpp` -o $podir/umbrello.pot
rm -f tips.cpp rc.cpp
|
behlingc/umbrello-behlingc
|
umbrello/Messages.sh
|
Shell
|
gpl-2.0
| 194 |
#!/bin/sh
#
# Copyright (c) 2008 Christian Couder
#
test_description='test git rev-parse --verify'
exec </dev/null
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
add_line_into_file()
{
_line=$1
_file=$2
if [ -f "$_file" ]; then
echo "$_line" >> $_file || return $?
MSG="Add <$_line> into <$_file>."
else
echo "$_line" > $_file || return $?
git add $_file || return $?
MSG="Create file <$_file> with <$_line> inside."
fi
test_tick
git commit --quiet -m "$MSG" $_file
}
HASH1=
HASH2=
HASH3=
HASH4=
test_expect_success 'set up basic repo with 1 file (hello) and 4 commits' '
add_line_into_file "1: Hello World" hello &&
HASH1=$(git rev-parse --verify HEAD) &&
add_line_into_file "2: A new day for git" hello &&
HASH2=$(git rev-parse --verify HEAD) &&
add_line_into_file "3: Another new day for git" hello &&
HASH3=$(git rev-parse --verify HEAD) &&
add_line_into_file "4: Ciao for now" hello &&
HASH4=$(git rev-parse --verify HEAD)
'
test_expect_success 'works with one good rev' '
rev_hash1=$(git rev-parse --verify $HASH1) &&
test "$rev_hash1" = "$HASH1" &&
rev_hash2=$(git rev-parse --verify $HASH2) &&
test "$rev_hash2" = "$HASH2" &&
rev_hash3=$(git rev-parse --verify $HASH3) &&
test "$rev_hash3" = "$HASH3" &&
rev_hash4=$(git rev-parse --verify $HASH4) &&
test "$rev_hash4" = "$HASH4" &&
rev_main=$(git rev-parse --verify main) &&
test "$rev_main" = "$HASH4" &&
rev_head=$(git rev-parse --verify HEAD) &&
test "$rev_head" = "$HASH4"
'
test_expect_success 'fails with any bad rev or many good revs' '
test_must_fail git rev-parse --verify 2>error &&
grep "single revision" error &&
test_must_fail git rev-parse --verify foo 2>error &&
grep "single revision" error &&
test_must_fail git rev-parse --verify HEAD bar 2>error &&
grep "single revision" error &&
test_must_fail git rev-parse --verify baz HEAD 2>error &&
grep "single revision" error &&
test_must_fail git rev-parse --verify $HASH2 HEAD 2>error &&
grep "single revision" error
'
test_expect_success 'fails silently when using -q' '
test_must_fail git rev-parse --verify --quiet 2>error &&
test_must_be_empty error &&
test_must_fail git rev-parse -q --verify foo 2>error &&
test_must_be_empty error &&
test_must_fail git rev-parse --verify -q HEAD bar 2>error &&
test_must_be_empty error &&
test_must_fail git rev-parse --quiet --verify baz HEAD 2>error &&
test_must_be_empty error &&
test_must_fail git rev-parse -q --verify $HASH2 HEAD 2>error &&
test_must_be_empty error
'
test_expect_success 'fails silently when using -q with deleted reflogs' '
ref=$(git rev-parse HEAD) &&
git update-ref --create-reflog -m "message for refs/test" refs/test "$ref" &&
git reflog delete --updateref --rewrite refs/test@{1} &&
test_must_fail git rev-parse -q --verify refs/test@{1} >error 2>&1 &&
test_must_be_empty error
'
test_expect_success 'fails silently when using -q with not enough reflogs' '
ref=$(git rev-parse HEAD) &&
git update-ref --create-reflog -m "message for refs/test2" refs/test2 "$ref" &&
test_must_fail git rev-parse -q --verify refs/test2@{999} >error 2>&1 &&
test_must_be_empty error
'
test_expect_success 'succeeds silently with -q and reflogs that do not go far back enough in time' '
ref=$(git rev-parse HEAD) &&
git update-ref --create-reflog -m "message for refs/test3" refs/test3 "$ref" &&
git rev-parse -q --verify refs/test3@{1.year.ago} >actual 2>error &&
test_must_be_empty error &&
echo "$ref" >expect &&
test_cmp expect actual
'
test_expect_success 'no stdout output on error' '
test -z "$(git rev-parse --verify)" &&
test -z "$(git rev-parse --verify foo)" &&
test -z "$(git rev-parse --verify baz HEAD)" &&
test -z "$(git rev-parse --verify HEAD bar)" &&
test -z "$(git rev-parse --verify $HASH2 HEAD)"
'
test_expect_success 'use --default' '
git rev-parse --verify --default main &&
git rev-parse --verify --default main HEAD &&
git rev-parse --default main --verify &&
git rev-parse --default main --verify HEAD &&
git rev-parse --verify HEAD --default main &&
test_must_fail git rev-parse --verify foo --default main &&
test_must_fail git rev-parse --default HEAD --verify bar &&
test_must_fail git rev-parse --verify --default HEAD baz &&
test_must_fail git rev-parse --default foo --verify &&
test_must_fail git rev-parse --verify --default bar
'
test_expect_success 'main@{n} for various n' '
N=$(git reflog | wc -l) &&
Nm1=$(($N-1)) &&
Np1=$(($N+1)) &&
git rev-parse --verify main@{0} &&
git rev-parse --verify main@{1} &&
git rev-parse --verify main@{$Nm1} &&
test_must_fail git rev-parse --verify main@{$N} &&
test_must_fail git rev-parse --verify main@{$Np1}
'
test_expect_success SYMLINKS 'ref resolution not confused by broken symlinks' '
ln -s does-not-exist .git/refs/heads/broken &&
test_must_fail git rev-parse --verify broken
'
test_expect_success 'options can appear after --verify' '
git rev-parse --verify HEAD >expect &&
git rev-parse --verify -q HEAD >actual &&
test_cmp expect actual
'
test_expect_success 'verify respects --end-of-options' '
git update-ref refs/heads/-tricky HEAD &&
git rev-parse --verify HEAD >expect &&
git rev-parse --verify --end-of-options -tricky >actual &&
test_cmp expect actual
'
test_done
|
felipec/git
|
t/t1503-rev-parse-verify.sh
|
Shell
|
gpl-2.0
| 5,357 |
#! /bin/bash
CL_PATH=`find ../ -name '*.jar' | tr "\n" :`
echo "CLASSPATH=$CL_PATH"
java -classpath $CL_PATH com.navercorp.pinpoint.tools.NetworkAvailabilityChecker ../pinpoint.config
|
Allive1/pinpoint
|
tools/src/main/script/networktest.sh
|
Shell
|
apache-2.0
| 183 |
#!/bin/bash
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
dir=$(dirname $0)/..
set -e
"$dir"/build/gendeps.sh
"$dir"/build/build.sh
"$dir"/build/lint.sh
|
treejames/shaka-player
|
build/all.sh
|
Shell
|
apache-2.0
| 687 |
#!/bin/bash
# This script is meant to be called by the "script" step defined in
# .travis.yml. See http://docs.travis-ci.com/ for more details.
# The behavior of the script is controlled by environment variabled defined
# in the .travis.yml in the top level folder of the project.
# License: 3-clause BSD
set -e
python --version
python -c "import sklearn; print('sklearn %s' % sklearn.__version__)"
make test
|
dhuppenkothen/hmmlearn
|
continuous_integration/test_script.sh
|
Shell
|
bsd-3-clause
| 413 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#set -x
#Check first ROOT volume
iteration=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select count(*) from account;")
echo "Created/removed for the first ROOT volume:\n"
for ((i=3; i<$iteration+1; i++))
do
volume_name=$(mysql -h $1 --user=root --skip-column-names -U cloud -e "select name from volumes where account_id=$i and volume_type='ROOT' and name like 'i-%' limit 0,1;")
volume_id=$(mysql -h $1 --user=root --skip-column-names -U cloud -e "select id from volumes where account_id=$i and volume_type='ROOT' and name like 'i-%'limit 0,1;")
created_time=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select TIME_TO_SEC(created) from event where description like '%$volume_name%' and type='VOLUME.CREATE' and level='INFO';")
destroyed_time=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select TIME_TO_SEC(created) from event where description like '%$volume_name%' and type='VOLUME.DELETE' and level='INFO';")
if [ "$volume_name" != "" ] && [ "$destroyed_time" != "" ]
then
event_time=`expr $destroyed_time - $created_time`
cloud_usage_time=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select ROUND(SUM(raw_usage*3600)) from cloud_usage where usage_type=6 and description like '%Volume Id: $volume_id%';")
if [ "$cloud_usage_time" = "NULL" ]
then
echo "Allocated time is missing in cloud_usage table for volume $volume_name belonging to account $i"
else
temp=`expr $event_time - $cloud_usage_time`
if [ $temp -ne 0 ] && [ $temp != "-86400" ]
then
echo "For account $i difference in time for volume $volume_name is $temp"
else
echo "Test passed for the ROOT volume $volume_name belonging to account $i"
fi
fi
else
echo "Skipping verification for account $i (the account either a) misses root volume $volume_name b) volume wasn't deleted 3) Delete volume failed "
fi
done
#Check second ROOT volume
iteration=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select count(*) from account;")
echo "Created/removed for the second ROOT volume:\n"
for ((i=3; i<$iteration+1; i++))
do
volume_name=$(mysql -h $1 --user=root --skip-column-names -U cloud -e "select name from volumes where account_id=$i and volume_type='ROOT' and name like 'i-%' limit 1,1;")
volume_id=$(mysql -h $1 --user=root --skip-column-names -U cloud -e "select id from volumes where account_id=$i and volume_type='ROOT' and name like 'i-%'limit 1,1;")
created_time=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select TIME_TO_SEC(created) from event where description like '%$volume_name%' and type='VOLUME.CREATE' and level='INFO';")
destroyed_time=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select TIME_TO_SEC(created) from event where description like '%$volume_name%' and type='VOLUME.DELETE' and level='INFO';")
if [ "$volume_name" != "" ] && [ "$destroyed_time" != "" ]
then
event_time=`expr $destroyed_time - $created_time`
cloud_usage_time=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select ROUND(SUM(raw_usage*3600)) from cloud_usage where usage_type=6 and description like '%Volume Id: $volume_id%';")
if [ "$cloud_usage_time" = "NULL" ]
then
echo "Allocated time is missing in cloud_usage table for volume $volume_name belonging to account $i"
else
temp=`expr $event_time - $cloud_usage_time`
if [ $temp -ne 0 ] && [ $temp != "-86400" ]
then
echo "For account $i difference in time for volume $volume_name is $temp"
else
echo "Test passed for the ROOT volume $volume_name belonging to account $i"
fi
fi
else
echo "Skipping verification for account $i (the account either a) misses root volume $volume_name b) volume wasn't deleted 3) Delete volume failed "
fi
done
#Check first DATADISK volume
iteration=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select count(*) from account;")
echo "Created/removed for the first DATADISK volume:\n"
for ((i=3; i<$iteration+1; i++))
do
volume_name=$(mysql -h $1 --user=root --skip-column-names -U cloud -e "select name from volumes where account_id=$i and volume_type='DATADISK' and name like 'i-%' limit 0,1;")
volume_id=$(mysql -h $1 --user=root --skip-column-names -U cloud -e "select id from volumes where account_id=$i and volume_type='DATADISK' and name like 'i-%'limit 0,1;")
created_time=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select TIME_TO_SEC(created) from event where description like '%$volume_name%' and type='VOLUME.CREATE' and level='INFO';")
destroyed_time=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select TIME_TO_SEC(created) from event where description like '%$volume_name%' and type='VOLUME.DELETE' and level='INFO';")
if [ "$volume_name" != "" ] && [ "$destroyed_time" != "" ]
then
event_time=`expr $destroyed_time - $created_time`
cloud_usage_time=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select ROUND(SUM(raw_usage*3600)) from cloud_usage where usage_type=6 and description like '%Volume Id: $volume_id%';")
if [ "$cloud_usage_time" = "NULL" ]
then
echo "Allocated time is missing in cloud_usage table for volume $volume_name belonging to account $i"
else
temp=`expr $event_time - $cloud_usage_time`
if [ $temp -ne 0 ] && [ $temp != "-86400" ]
then
echo "For account $i difference in time for volume $volume_name is $temp"
else
echo "Test passed for the DATADISK volume $volume_name belonging to account $i"
fi
fi
else
echo "Skipping verification for account $i (the account either a) misses root volume $volume_name b) volume wasn't deleted 3) Delete volume failed "
fi
done
#Check second DATADISK volume
iteration=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select count(*) from account;")
echo "Created/removed for the second DATADISK volume:\n"
for ((i=3; i<$iteration+1; i++))
do
volume_name=$(mysql -h $1 --user=root --skip-column-names -U cloud -e "select name from volumes where account_id=$i and volume_type='DATADISK' and name like 'i-%' limit 1,1;")
volume_id=$(mysql -h $1 --user=root --skip-column-names -U cloud -e "select id from volumes where account_id=$i and volume_type='DATADISK' and name like 'i-%'limit 1,1;")
created_time=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select TIME_TO_SEC(created) from event where description like '%$volume_name%' and type='VOLUME.CREATE' and level='INFO';")
destroyed_time=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select TIME_TO_SEC(created) from event where description like '%$volume_name%' and type='VOLUME.DELETE' and level='INFO';")
if [ "$volume_name" != "" ] && [ "$destroyed_time" != "" ]
then
event_time=`expr $destroyed_time - $created_time`
cloud_usage_time=$(mysql -h $1 --user=root --skip-column-names -U cloud_usage -e "select ROUND(SUM(raw_usage*3600)) from cloud_usage where usage_type=6 and description like '%Volume Id: $volume_id%';")
if [ "$cloud_usage_time" = "NULL" ]
then
echo "Allocated time is missing in cloud_usage table for volume $volume_name belonging to account $i"
else
temp=`expr $event_time - $cloud_usage_time`
if [ $temp -ne 0 ] && [ $temp != "-86400" ]
then
echo "For account $i difference in time for volume $volume_name is $temp"
else
echo "Test passed for the DATADISK volume $volume_name belonging to account $i"
fi
fi
else
echo "Skipping verification for account $i (the account either a) misses root volume $volume_name b) volume wasn't deleted 3) Delete volume failed "
fi
done
|
argv0/cloudstack
|
test/scripts/usage/volume_usage.sh
|
Shell
|
apache-2.0
| 8,259 |
#!/bin/bash
set -e
MEME_ETC_DIR=${PREFIX}/etc
cpanm HTML::PullParser
cpanm HTML::Parse
cpanm CGI::Application
cpanm XML::Parser::Expat --configure-args "EXPATLIBPATH=$PREFIX/lib" --configure-args "EXPATHINCPATH=$PREFIX/include"
perl scripts/dependencies.pl
./configure --prefix="$PREFIX"
make clean
make AM_CFLAGS='-DNAN="(0.0/0.0)"'
# tests will only work inside the build dir, but
# https://github.com/conda/conda-build/issues/1453
# so you need `conda build --prefix-length 1`
# for it to work properly
# make test
make install
|
JenCabral/bioconda-recipes
|
recipes/meme/build.sh
|
Shell
|
mit
| 535 |
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -ex
TEST_ROOT_DIR=$(git rev-parse --show-toplevel)
UNPACKED_SPARK_TGZ="$TEST_ROOT_DIR/target/spark-dist-unpacked"
IMAGE_TAG_OUTPUT_FILE="$TEST_ROOT_DIR/target/image-tag.txt"
DEPLOY_MODE="minikube"
IMAGE_REPO="docker.io/kubespark"
IMAGE_TAG="N/A"
JAVA_IMAGE_TAG="N/A"
SPARK_TGZ="N/A"
MVN="$TEST_ROOT_DIR/build/mvn"
DOCKER_FILE="N/A"
EXCLUDE_TAGS=""
# Parse arguments
while (( "$#" )); do
case $1 in
--unpacked-spark-tgz)
UNPACKED_SPARK_TGZ="$2"
shift
;;
--image-repo)
IMAGE_REPO="$2"
shift
;;
--image-tag)
IMAGE_TAG="$2"
shift
;;
--java-image-tag)
JAVA_IMAGE_TAG="$2"
shift
;;
--image-tag-output-file)
IMAGE_TAG_OUTPUT_FILE="$2"
shift
;;
--deploy-mode)
DEPLOY_MODE="$2"
shift
;;
--spark-tgz)
SPARK_TGZ="$2"
shift
;;
--docker-file)
DOCKER_FILE="$2"
shift
;;
--test-exclude-tags)
EXCLUDE_TAGS="$2"
shift
;;
*)
break
;;
esac
shift
done
rm -rf "$UNPACKED_SPARK_TGZ"
if [[ $SPARK_TGZ == "N/A" && $IMAGE_TAG == "N/A" ]];
then
# If there is no spark image tag to test with and no src dir, build from current
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
SPARK_INPUT_DIR="$(cd "$SCRIPT_DIR/"../../../../ >/dev/null 2>&1 && pwd )"
DOCKER_FILE_BASE_PATH="$SPARK_INPUT_DIR/resource-managers/kubernetes/docker/src/main/dockerfiles/spark"
elif [[ $IMAGE_TAG == "N/A" ]];
then
# If there is a test src tarball and no image tag we will want to build from that
mkdir -p $UNPACKED_SPARK_TGZ
tar -xzvf $SPARK_TGZ --strip-components=1 -C $UNPACKED_SPARK_TGZ;
SPARK_INPUT_DIR="$UNPACKED_SPARK_TGZ"
DOCKER_FILE_BASE_PATH="$SPARK_INPUT_DIR/kubernetes/dockerfiles/spark"
fi
# If there is a specific Spark image skip building and extraction/copy
if [[ $IMAGE_TAG == "N/A" ]];
then
VERSION=$("$MVN" help:evaluate -Dexpression=project.version \
| grep -v "INFO"\
| grep -v "WARNING"\
| tail -n 1)
IMAGE_TAG=${VERSION}_$(uuidgen)
cd $SPARK_INPUT_DIR
if [[ $DOCKER_FILE == "N/A" ]]; then
# OpenJDK base-image tag (e.g. 8-jre-slim, 11-jre-slim)
JAVA_IMAGE_TAG_BUILD_ARG="-b java_image_tag=$JAVA_IMAGE_TAG"
else
if [[ $DOCKER_FILE = /* ]]; then
JAVA_IMAGE_TAG_BUILD_ARG="-f $DOCKER_FILE"
else
JAVA_IMAGE_TAG_BUILD_ARG="-f $DOCKER_FILE_BASE_PATH/$DOCKER_FILE"
fi
fi
# Build PySpark image
LANGUAGE_BINDING_BUILD_ARGS="-p $DOCKER_FILE_BASE_PATH/bindings/python/Dockerfile"
# Build SparkR image
tags=(${EXCLUDE_TAGS//,/ })
if [[ ! ${tags[@]} =~ "r" ]]; then
LANGUAGE_BINDING_BUILD_ARGS="$LANGUAGE_BINDING_BUILD_ARGS -R $DOCKER_FILE_BASE_PATH/bindings/R/Dockerfile"
fi
# Unset SPARK_HOME to let the docker-image-tool script detect SPARK_HOME. Otherwise, it cannot
# indicate the unpacked directory as its home. See SPARK-28550.
unset SPARK_HOME
case $DEPLOY_MODE in
cloud)
# Build images
$SPARK_INPUT_DIR/bin/docker-image-tool.sh -r $IMAGE_REPO -t $IMAGE_TAG $JAVA_IMAGE_TAG_BUILD_ARG $LANGUAGE_BINDING_BUILD_ARGS build
# Push images appropriately
if [[ $IMAGE_REPO == gcr.io* ]] ;
then
gcloud docker -- push $IMAGE_REPO/spark:$IMAGE_TAG
else
$SPARK_INPUT_DIR/bin/docker-image-tool.sh -r $IMAGE_REPO -t $IMAGE_TAG push
fi
;;
docker-desktop | docker-for-desktop)
# Only need to build as this will place it in our local Docker repo which is all
# we need for Docker for Desktop to work so no need to also push
$SPARK_INPUT_DIR/bin/docker-image-tool.sh -r $IMAGE_REPO -t $IMAGE_TAG $JAVA_IMAGE_TAG_BUILD_ARG $LANGUAGE_BINDING_BUILD_ARGS build
;;
minikube)
# Only need to build and if we do this with the -m option for minikube we will
# build the images directly using the minikube Docker daemon so no need to push
$SPARK_INPUT_DIR/bin/docker-image-tool.sh -m -r $IMAGE_REPO -t $IMAGE_TAG $JAVA_IMAGE_TAG_BUILD_ARG $LANGUAGE_BINDING_BUILD_ARGS build
;;
*)
echo "Unrecognized deploy mode $DEPLOY_MODE" && exit 1
;;
esac
cd -
fi
rm -f $IMAGE_TAG_OUTPUT_FILE
echo -n $IMAGE_TAG > $IMAGE_TAG_OUTPUT_FILE
|
apache/spark
|
resource-managers/kubernetes/integration-tests/scripts/setup-integration-test-env.sh
|
Shell
|
apache-2.0
| 5,089 |
#!/bin/sh -ex
# The MIT License
#
# Copyright (c) 2004-2009, Sun Microsystems, Inc., Kohsuke Kawaguchi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# build a flashing animation image from a still picture
# Usage <src gif> <dst gif>
t=/tmp/flash$$
src=$1
dst=$2
for p in 20 40 60 80 100
do
convert $src -fill white -colorize ${p}% $t.$p.gif
done
convert -delay 10 $src $t.20.gif $t.40.gif $t.60.gif $t.80.gif $t.100.gif $t.80.gif $t.60.gif $t.40.gif $t.20.gif -loop 0 $dst
rm $t.*.gif
|
Vlatombe/jenkins
|
war/images/makeFlash.sh
|
Shell
|
mit
| 1,506 |
#! /bin/sh
# Test suite for exclude.
# Copyright (C) 2009-2011 Free Software Foundation, Inc.
# This file is part of the GNUlib Library.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
TMP=excltmp.$$
LIST=flist.$$
ERR=0
# Test exclude precedence
cat > $LIST <<EOT
foo*
bar
Baz
EOT
cat > $TMP <<EOT
bar: 1
bar: 0
EOT
./test-exclude$EXEEXT $LIST -include $LIST -- bar |
tr -d '\015' >$TMP.1
./test-exclude$EXEEXT -include $LIST -no-include $LIST -- bar |
tr -d '\015' >>$TMP.1
diff -c $TMP $TMP.1 || ERR=1
rm -f $TMP $TMP.1 $LIST
exit $ERR
|
ghmajx/asuswrt-merlin
|
release/src/router/coreutils-8.13/gnulib-tests/test-exclude7.sh
|
Shell
|
gpl-2.0
| 1,135 |
#!/bin/sh
#
# We also need the containerd client and its transitive dependencies
# and we conveniently have a checkout already. We actually prefer to
# reuse containerd's vendoring for consistency anyway.
set -eu
ctrd=$1
cp -r $ctrd/vendor/* vendor/
# We need containerd itself of course
mkdir -p vendor/github.com/containerd
cp -r $ctrd vendor/github.com/containerd/containerd
# Stop go finding nested vendorings
rm -rf vendor/github.com/containerd/containerd/vendor
|
radu-matei/linuxkit
|
pkg/init/cmd/service/skanky-vendor.sh
|
Shell
|
apache-2.0
| 469 |
# see http://benlimmer.com/2013/12/26/automatically-publish-javadoc-to-gh-pages-with-travis-ci/ for details
if [ "$TRAVIS_REPO_SLUG" == "google/dagger" ] && \
[ "$TRAVIS_JDK_VERSION" == "oraclejdk7" ] && \
[ "$TRAVIS_PULL_REQUEST" == "false" ] && \
[ "$TRAVIS_BRANCH" == "master" ]; then
echo -e "Publishing javadoc...\n"
mvn javadoc:aggregate -P!examples
TARGET="$(pwd)/target"
cd $HOME
git clone --quiet --branch=gh-pages https://${GH_TOKEN}@github.com/google/dagger gh-pages > /dev/null
cd gh-pages
git config --global user.email "[email protected]"
git config --global user.name "travis-ci"
git rm -rf api/latest
mkdir -p api
mv ${TARGET}/site/apidocs api/latest
git add -f api/latest
git commit -m "Lastest javadoc on successful travis build $TRAVIS_BUILD_NUMBER auto-pushed to gh-pages"
git push -fq origin gh-pages > /dev/null
echo -e "Published Javadoc to gh-pages.\n"
fi
|
DavidMihola/dagger
|
util/generate-latest-docs.sh
|
Shell
|
apache-2.0
| 929 |
#!/bin/bash
# This script likely won't work as is for you, but is a good template for
# iterating over all rubies and regenerating HTML fixtures.
set -e
source /usr/local/share/chruby/chruby.sh
function switch_ruby() {
chruby $1
}
function regen() {
bundle check || bundle install
GENERATE=1 bundle exec rspec ./spec/rspec/core/formatters/ || true
}
for ruby in \
jruby-1.7.9 \
1.9.3-p392 \
2.0.0-p247 \
2.1.0-p0 \
rbx-2.2.3 \
ree-1.8.7-2012.02;
do
switch_ruby $ruby
ruby -v
if [ $(echo $ruby | grep jruby) ]
then
export JRUBY_OPTS=--1.8
regen
export JRUBY_OPTS=--1.9
regen
else
regen
fi
done
|
a-suenami/rspec-core
|
script/regen_fixtures.sh
|
Shell
|
mit
| 650 |
python setup.py install --single-version-externally-managed --record=record.txt
|
gvlproject/bioconda-recipes
|
recipes/menetools/1.0.4_1/build.sh
|
Shell
|
mit
| 80 |
#!/bin/bash
# cache directory cleanup script example
#
# removes all old cached files with mtime older than index mtime
#
# there MUST be your ACTUAL index names and FULL PATHS to indexfiles
indexnames=( test1 test2 )
indexfiles=( /usr/local/sphinx/test1.spd /benchmarks/work/test/test2.spd )
cachedir=/tmp/cache
for element in $(seq 0 $((${#indexnames[@]} - 1)))
do
echo "processing index ${indexnames[$element]}"
find "$cachedir/${indexnames[$element]}" \( ! -newer "${indexfiles[$element]}" \) -type f -print0 | xargs -0 -r rm -f
done
|
ArcherCraftStore/ArcherVMPeridot
|
sphinx-2.1.8-release-win32/contrib/scripts/cachecleanup.sh
|
Shell
|
apache-2.0
| 559 |
#!/bin/sh
#
SELF=$0
SELFDIR=`dirname ${SELF}`
BUILDDIR=$1
DESTDIR=$2
CMD=$3
FILE=$4
NAME=$5
shift 5
OPTIONS="$@"
test -z "${BUILDDIR}" && {
echo "${SELF}:BUILDDIR: '${BUILDDIR}'" >&2;
exit 1;
}
test -z "${DESTDIR}" && {
echo "${SELF}:DESTDIR: '${DESTDIR}'" >&2;
exit 1;
}
test -z "${CMD}" && {
echo "${SELF}:CMD: '${CMD}'" >&2;
exit 1;
}
test -z "${FILE}" && {
echo "${SELF}:FILE: '${FILE}'" >&2;
exit 1;
}
test -z "${NAME}" && {
echo "${SELF}:NAME: '${NAME}'" >&2;
exit 1;
}
CURDIR=`pwd`
cd ${BUILDDIR} && {
ABS_BUILDDIR=`pwd`
cd ${CURDIR}
} || {
echo "${SELF}:cannot cd into '${BUILDDIR}'" >&2;
exit 1;
}
cd ${DESTDIR} && {
${ABS_BUILDDIR}/${CMD} ${OPTIONS} ${FILE} ${NAME} >&2 || exit 1;
cd ${CURDIR}
} || {
echo "${SELF}:cannot cd into '${BUILDDIR}'" >&2;
exit 1;
}
exit 0;
|
zarboz/XBMC-PVR-mac
|
tools/darwin/depends/samba/samba-3.6.6/source4/heimdal_build/asn1_compile_wrapper.sh
|
Shell
|
gpl-2.0
| 808 |
error() {
echo " ! $*" >&2
exit 1
}
status() {
echo "-----> $*"
}
protip() {
echo
echo "PRO TIP: $*" | indent
echo "See https://devcenter.heroku.com/articles/nodejs-support" | indent
echo
}
# sed -l basically makes sed replace and buffer through stdin to stdout
# so you get updates while the command runs and dont wait for the end
# e.g. npm install | indent
indent() {
c='s/^/ /'
case $(uname) in
Darwin) sed -l "$c";; # mac/bsd sed: -l buffers on line boundaries
*) sed -u "$c";; # unix/gnu sed: -u unbuffered (arbitrary) chunks of data
esac
}
cat_npm_debug_log() {
test -f $build_dir/npm-debug.log && cat $build_dir/npm-debug.log
}
export_env_dir() {
env_dir=$1
whitelist_regex=${2:-''}
blacklist_regex=${3:-'^(PATH|GIT_DIR|CPATH|CPPATH|LD_PRELOAD|LIBRARY_PATH)$'}
if [ -d "$env_dir" ]; then
for e in $(ls $env_dir); do
echo "$e" | grep -E "$whitelist_regex" | grep -qvE "$blacklist_regex" &&
export "$e=$(cat $env_dir/$e)"
:
done
fi
}
|
ajgon/heroku-hook
|
spec/fixtures/buildpacks/heroku-buildpack-nodejs/bin/common.sh
|
Shell
|
mit
| 1,026 |
#!/bin/bash
PYTHON="coverage run -p -a"
URL=http://127.0.0.1/file
mkdir -p tmp
$PYTHON shadowsocks/local.py -c tests/aes.json &
LOCAL=$!
$PYTHON shadowsocks/server.py -c tests/aes.json --forbidden-ip "" &
SERVER=$!
sleep 3
time curl -o tmp/expected $URL
time curl -o tmp/result --socks5-hostname 127.0.0.1:1081 $URL
kill -s SIGINT $LOCAL
kill -s SIGINT $SERVER
sleep 2
diff tmp/expected tmp/result || exit 1
|
yaoliyc/shadowsocks
|
tests/test_large_file.sh
|
Shell
|
apache-2.0
| 417 |
#!/bin/sh
ENV_NAME="test-environment"
set -e
conda_create ()
{
hash -r
conda config --set always_yes yes --set changeps1 no
conda update -q conda
conda config --add channels pypi
conda info -a
deps='pip numpy scipy nose coverage scikit-learn!=0.19.0 matplotlib numba'
conda create -q -n $ENV_NAME "python=$TRAVIS_PYTHON_VERSION" $deps
conda update --all
}
src="$HOME/env/miniconda$TRAVIS_PYTHON_VERSION"
if [ ! -d "$src" ]; then
mkdir -p $HOME/env
pushd $HOME/env
# Download miniconda packages
wget http://repo.continuum.io/miniconda/Miniconda-3.16.0-Linux-x86_64.sh -O miniconda.sh;
# Install both environments
bash miniconda.sh -b -p $src
export PATH="$src/bin:$PATH"
conda_create
source activate $ENV_NAME
conda install -c conda-forge ffmpeg
pip install python-coveralls
source deactivate
popd
else
echo "Using cached dependencies"
fi
|
r9y9/librosa
|
.travis_dependencies.sh
|
Shell
|
isc
| 978 |
set -ex
cd ..
mkdir build-${TYPE}
cd build-${TYPE}
echo "Running qmake"
qmake "CONFIG+=${TYPE}" ../tears
echo "Running make"
make
echo "Running tests"
make -s check
|
truenull/tears
|
scripts/run-travis-build.sh
|
Shell
|
isc
| 168 |
#!/bin/sh
cd "`dirname "$0"`"
mkdir -p "./data/etc"
cp ../testrepo.txt data/etc/tve.list
if [ -z "$GNUPGHOME" ]; then
GNUPGHOME="$HOME/.gnupg"
fi
if ! TVEROOT="./data" TVEDB="./data/tve.db" GNUPGHOME="$GNUPGHOME" HOME="./data/home" ../run-update.sh; then
echo "Update failed." 1>&2
exit 1
fi
if ! T="`TVEROOT="./data" TVEDB="./data/tve.db" HOME="./data/home" ../search/search`"; then
echo "Database not updated properly." 1>&2
exit 1
fi
echo "Updates successfully run."
|
singpolyma/theveeb-ecosystem
|
test/test-update.sh
|
Shell
|
isc
| 477 |
DIR="$( cd "$( dirname "$0" )" && pwd )"
cd $DIR/../
export NODE_PATH=.
export NODE_ENV=development
npm run start
|
Yuliang-Lee/nodejs-express-mongodb-gulp-start-kit
|
bin/dev-start.sh
|
Shell
|
mit
| 116 |
#!/bin/sh
# Base16 Tomorrow - Shell color setup script
# Chris Kempson (http://chriskempson.com)
if [ "${TERM%%-*}" = 'linux' ]; then
# This script doesn't support linux console (use 'vconsole' template instead)
return 2>/dev/null || exit 0
fi
color00="1d/1f/21" # Base 00 - Black
color01="cc/66/66" # Base 08 - Red
color02="b5/bd/68" # Base 0B - Green
color03="f0/c6/74" # Base 0A - Yellow
color04="81/a2/be" # Base 0D - Blue
color05="b2/94/bb" # Base 0E - Magenta
color06="8a/be/b7" # Base 0C - Cyan
color07="c5/c8/c6" # Base 05 - White
color08="96/98/96" # Base 03 - Bright Black
color09=$color01 # Base 08 - Bright Red
color10=$color02 # Base 0B - Bright Green
color11=$color03 # Base 0A - Bright Yellow
color12=$color04 # Base 0D - Bright Blue
color13=$color05 # Base 0E - Bright Magenta
color14=$color06 # Base 0C - Bright Cyan
color15="ff/ff/ff" # Base 07 - Bright White
color16="de/93/5f" # Base 09
color17="a3/68/5a" # Base 0F
color18="28/2a/2e" # Base 01
color19="37/3b/41" # Base 02
color20="b4/b7/b4" # Base 04
color21="e0/e0/e0" # Base 06
color_foreground="37/3b/41" # Base 02
color_background="ff/ff/ff" # Base 07
color_cursor="37/3b/41" # Base 02
if [ -n "$TMUX" ]; then
# tell tmux to pass the escape sequences through
# (Source: http://permalink.gmane.org/gmane.comp.terminal-emulators.tmux.user/1324)
printf_template="\033Ptmux;\033\033]4;%d;rgb:%s\007\033\\"
printf_template_var="\033Ptmux;\033\033]%d;rgb:%s\007\033\\"
printf_template_custom="\033Ptmux;\033\033]%s%s\007\033\\"
elif [ "${TERM%%-*}" = "screen" ]; then
# GNU screen (screen, screen-256color, screen-256color-bce)
printf_template="\033P\033]4;%d;rgb:%s\007\033\\"
printf_template_var="\033P\033]%d;rgb:%s\007\033\\"
printf_template_custom="\033P\033]%s%s\007\033\\"
else
printf_template="\033]4;%d;rgb:%s\033\\"
printf_template_var="\033]%d;rgb:%s\033\\"
printf_template_custom="\033]%s%s\033\\"
fi
# 16 color space
printf $printf_template 0 $color00
printf $printf_template 1 $color01
printf $printf_template 2 $color02
printf $printf_template 3 $color03
printf $printf_template 4 $color04
printf $printf_template 5 $color05
printf $printf_template 6 $color06
printf $printf_template 7 $color07
printf $printf_template 8 $color08
printf $printf_template 9 $color09
printf $printf_template 10 $color10
printf $printf_template 11 $color11
printf $printf_template 12 $color12
printf $printf_template 13 $color13
printf $printf_template 14 $color14
printf $printf_template 15 $color15
# 256 color space
printf $printf_template 16 $color16
printf $printf_template 17 $color17
printf $printf_template 18 $color18
printf $printf_template 19 $color19
printf $printf_template 20 $color20
printf $printf_template 21 $color21
# foreground / background / cursor color
if [ -n "$ITERM_SESSION_ID" ]; then
# iTerm2 proprietary escape codes
printf $printf_template_custom Pg 373b41 # forground
printf $printf_template_custom Ph ffffff # background
printf $printf_template_custom Pi 373b41 # bold color
printf $printf_template_custom Pj c5c8c6 # selection color
printf $printf_template_custom Pk 373b41 # selected text color
printf $printf_template_custom Pl 373b41 # cursor
printf $printf_template_custom Pm ffffff # cursor text
else
printf $printf_template_var 10 $color_foreground
printf $printf_template_var 11 $color_background
printf $printf_template_custom 12 ";7" # cursor (reverse video)
fi
# clean up
unset printf_template
unset printf_template_var
unset color00
unset color01
unset color02
unset color03
unset color04
unset color05
unset color06
unset color07
unset color08
unset color09
unset color10
unset color11
unset color12
unset color13
unset color14
unset color15
unset color16
unset color17
unset color18
unset color19
unset color20
unset color21
unset color_foreground
unset color_background
unset color_cursor
|
ArjanFrans/dotfiles
|
colorschemes/base16-builder/output/shell/base16-tomorrow.light.sh
|
Shell
|
mit
| 3,872 |
#!/usr/bin/env bash
set -e
set -o pipefail
echo
NAME=pyramid-scheme
DATE=$(date +%y%m%d.%H%M)
VERSION=$(cat ./VERSION)
if command -v git &> /dev/null && git rev-parse &> /dev/null; then
GITCOMMIT=$(git rev-parse --short HEAD)
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
GITCOMMIT="$GITCOMMIT-dirty"
fi
elif [ "$HASH" ]; then
GITCOMMIT="$HASH"
else
echo >&2 'error: .git directory missing and HASH not specified'
echo >&2 ' Please either build with the .git directory accessible, or specify the'
echo >&2 ' exact (--short) commit hash you are building using HASH for'
echo >&2 ' future accountability in diagnosing build issues. Thanks!'
exit 1
fi
if [ ! "$GOPATH" ]; then
echo >&2 'error: missing GOPATH; please see http://golang.org/doc/code.html#GOPATH'
exit 1
fi
# Use these flags when compiling the tests and final binary
LDFLAGS='
-w
-X github.com/masahide/'$NAME'/version.GITCOMMIT "'$GITCOMMIT'"
-X github.com/masahide/'$NAME'/version.VERSION "'$VERSION.$DATE'"
'
LDFLAGS_STATIC='-linkmode external'
EXTLDFLAGS_STATIC='-static'
EXTLDFLAGS_STATIC_CUSTOM="$EXTLDFLAGS_STATIC -lpthread -Wl,--unresolved-symbols=ignore-in-object-files"
LDFLAGS_STATIC_CUSTOM="
$LDFLAGS_STATIC
-extldflags \"$EXTLDFLAGS_STATIC_CUSTOM\"
"
HAVE_GO_TEST_COVER=
if \
go help testflag | grep -- -cover > /dev/null \
&& go tool -n cover > /dev/null 2>&1 \
; then
HAVE_GO_TEST_COVER=1
fi
bundle() {
dir=bin
echo "---> Making binary: (in bin/ )"
mkdir -p $dir
binary $(pwd)/$dir
}
binary() {
DEST=$1
echo go build -o $DEST/$NAME -ldflags "$LDFLAGS"
go build -o $DEST/$NAME -ldflags "$LDFLAGS" &&
echo $VERSION.$DATE-$GITCOMMIT >$DEST/VERSION &&
cp $DEST/VERSION $DEST/version-$VERSION.$DATE-$GITCOMMIT.txt
echo "Created binary: $DEST/$NAME"
}
main() {
# We want this to fail if the bin already exist and cannot be removed.
# This is to avoid mixing bin from different versions of the code.
mkdir -p bin
if [ -e "bin" ]; then
echo "bin already exists. Removing."
rm -fr bin && mkdir bin || exit 1
echo
fi
bundle
echo
}
main "$@"
|
masahide/pyramid-scheme
|
hack/make.sh
|
Shell
|
mit
| 2,096 |
alias d="docker"
alias dst="docker stop"
alias drm="docker rm"
alias dps="docker ps -a"
alias dmi="docker images"
alias drun="docker run --rm -it"
alias doco="docker-compose"
alias dcr="docker-compose run --rm"
dclean() {
docker rm $(docker ps --filter "status=exited" -q)
}
dcleani() {
docker rm $(docker ps -aq)
docker rmi $(docker images | grep none | awk '{ print $3; }')
}
dbash() {
docker exec -it $1 /bin/bash
}
dstoprm() {
docker stop $1 ; docker rm $1
}
|
bernardeli/dotfiles
|
shell/docker.sh
|
Shell
|
mit
| 477 |
#!/usr/bin/env bash
sudo chmod +rx /sepalUsers
if [[ "${DEPLOY_ENVIRONMENT}" == "DEV" ]]
then
echo "Starting nodemon"
[[ -d node_modules ]] || npm install
NODE_TLS_REJECT_UNAUTHORIZED=0 exec nodemon \
--watch "${MODULE}"/src \
--watch "${SHARED}" \
--inspect=0.0.0.0:9236 \
src/main.js \
--amqp-uri amqp://${RABBITMQ_HOST}:${RABBITMQ_PORT} \
--redis-uri redis://user-storage-redis \
--home-dir /sepalUsers \
--min-delay-seconds ${MIN_DELAY_SECONDS} \
--max-delay-seconds ${MAX_DELAY_SECONDS} \
--delay-increase-factor ${DELAY_INCREASE_FACTOR} \
--concurrency ${CONCURRENCY} \
--max-retries ${MAX_RETRIES} \
--initial-retry-delay-seconds ${INITIAL_RETRY_DELAY_SECONDS}
else
echo "Starting node"
exec node \
src/main.js \
--amqp-uri amqp://${RABBITMQ_HOST}:${RABBITMQ_PORT} \
--redis-uri redis://user-storage-redis \
--home-dir /sepalUsers \
--min-delay-seconds ${MIN_DELAY_SECONDS} \
--max-delay-seconds ${MAX_DELAY_SECONDS} \
--delay-increase-factor ${DELAY_INCREASE_FACTOR} \
--concurrency ${CONCURRENCY} \
--max-retries ${MAX_RETRIES} \
--initial-retry-delay-seconds ${INITIAL_RETRY_DELAY_SECONDS}
fi
|
openforis/sepal
|
modules/user-storage/start.sh
|
Shell
|
mit
| 1,206 |
#!/usr/bin/env bash
VERSION="1.2.0"
rm -rf dist
rm ai.so
python ./setup.py build_ext --inplace
cxfreeze --include-modules=encodings.ascii,encodings.utf_8 -O -c --target-name=mysticmine monorail.py
cp -R ../data/800x600 dist/data
cp error_mm.log dist
cp quest.stat dist
cp ../LICENSE.txt dist
cp ../assets/graphics/icon48x48.png dist
mv dist mysticmine_${VERSION}
mkdir -p ../installer/linux/
tar zcvf ../installer/linux/mysticmine.tar.gz mysticmine_${VERSION}
mv mysticmine_${VERSION} dist
|
koonsolo/MysticMine
|
monorail/create_freeze.sh
|
Shell
|
mit
| 499 |
#!/bin/bash
#
# Extract arguments from an SPF record recursively.
#
# VERSION :0.1.1
# DATE :2018-04-16
# URL :https://github.com/szepeviktor/debian-server-tools
# AUTHOR :Viktor Szépe <[email protected]>
# LICENSE :The MIT License (MIT)
# BASH-VERSION :4.2+
# DOCS :http://www.openspf.org/SPF_Record_Syntax
# LOCATION :/usr/local/bin/spf-survey.sh
Do_spf()
{
local DOMAIN="$1"
local SPF_RECORD
local MECHANISM
SPF_RECORD="$(host -t TXT "$DOMAIN" | sed -n -e 's|.* descriptive text "\(v=spf1 .*\)"$|\1|p')" #'
while read -r -d " " MECHANISM; do
case "$MECHANISM" in
"v=spf1")
continue
;;
"ip4:"*)
echo "${MECHANISM#ip4:}"
;;
"ip6:"*)
echo ":${MECHANISM#ip6:}"
;;
"include:"*)
# Recurse into include
Do_spf "${MECHANISM#include:}"
;;
#"a"|"mx"|"ptr")
# # TODO
# # Get records
# # Resolve IP addresses, handle CNAMEs
# Do_spf IPs
# ;;
"?all"|"~all"|"-all")
# "?" Neutral, "~" SoftFail, "-" Fail
continue
;;
*)
echo "Unknown mechanism in SPF: ${MECHANISM}" 1>&2
exit 100
;;
esac
done <<<"$SPF_RECORD"
}
set -e
Do_spf "$1"
|
szepeviktor/debian-server-tools
|
mail/spf-survey.sh
|
Shell
|
mit
| 1,516 |
#!/bin/sh
# https://github.com/lokori/findbugs-security-docker
docker pull lokori/findbugs-sec
docker run --rm -v `pwd`:/workdir/src lokori/findbugs-sec -html:fancy-hist.xsl -output /workdir/src/reports/findsec-report.html src
|
lokori/docker-devsec-demo
|
run-findsecbugs.sh
|
Shell
|
mit
| 232 |
sudo apt-get -y install ghost-phisher
|
sslavov93/kali-linux-tools
|
scripts/Ghost_Phisher.sh
|
Shell
|
mit
| 38 |
alias reload='source ~/.zshrc && echo "sourced ~/.zshrc"'
# if grep --color "a" <<<"a" &>/dev/null; then
# export GREP_OPTIONS='--color=always'
# fi
# Directory
CLICOLOR=1
LS_COLORS='di=1:fi=0:ln=31:pi=5:so=5:bd=5:cd=5:or=31:mi=0:ex=35:*.rpm=90'
export LS_COLORS
alias ls='ls -hBGlah -F'
alias ll='ls'
alias lsd='ls -l ${colorflag} | grep "^d"'
alias lsl='ls -l ${colorflag} | grep "^l"'
#alias pause="watch -n 10 'netstat -a'"
alias pause="watch -n 10 'date'"
alias md='mkdir -p'
alias rd='rmdir'
alias ..='cd ..' # Go up one directory
alias ...='cd ../..' # Go up two directories
alias ....='cd ../../..' # Go up two directories
alias ..-='cd -' # Go back
# Shortcuts
alias d="cd ~/Dropbox"
alias dl="cd ~/Downloads"
alias dt="cd ~/Desktop"
alias p="cd ~/Projects"
alias h="history"
alias j="jobs"
alias v="vim"
alias vi="vim"
alias o="open"
alias oo="open ."
alias q='exit'
alias tf='tail -F -n200'
# Misc
# Get week number
alias week='date +%V'
# URL-encode strings
alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1]);"'
# Disk usage with human sizes and minimal depth
alias du1='du -h --max-depth=1'
alias fn='find . -name'
alias hi='history | tail -20'
|
amsross/dotfiles
|
zsh/custom/general.aliases.zsh
|
Shell
|
mit
| 1,224 |
#!/bin/bash
#Updates local code from git by pulling, and fab kickrestarts
git pull
fab kickrestart:debug=True
|
sisirkoppaka/articur8
|
update_local_from_git.sh
|
Shell
|
mit
| 110 |
echo "Initializing test..."
case $1 in
"ct")
echo "Performing Create Test"
./tests pct 1
./tests pct 2
./tests pct 4
./tests pct 8
./tests pct 16
./tests pct 32
./tests pct 64
python graph_test.py
;;
"dtt")
echo "Performing Drop Table Test"
./tests pdt 1
./tests pdt 2
./tests pdt 4
./tests pdt 8
./tests pdt 16
./tests pdt 32
./tests pdt 64
python graph_test.py
;;
"irt")
echo "Performing Insert Row Test"
./tests pirt 1
./tests pirt 2
./tests pirt 4
./tests pirt 8
./tests pirt 16
./tests pirt 32
./tests pirt 64
python graph_test.py
;;
"st")
echo "Performing Select Test"
./tests pst 1
./tests pst 2
./tests pst 4
./tests pst 8
./tests pst 16
./tests pst 32
./tests pst 64
python graph_test.py
;;
"st")
echo "Performing Mixed Test"
./tests pmt 1
./tests pmt 2
./tests pmt 4
./tests pmt 8
./tests pmt 16
./tests pmt 32
./tests pmt 64
python graph_test.py
;;
*)
echo "No test selected options are:"
echo "ct: Create Test"
echo "dtt: Drop Table Test"
echo "irt: Insert Row Test"
echo "st: Select Test"
esac
|
Dar13/OpenMemDB
|
database/tests/performance_test.sh
|
Shell
|
mit
| 1,155 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
else
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries
local basename
basename="$(basename "$1" | sed -E s/\\..+// && exit ${PIPESTATUS[0]})"
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}/${basename}.framework/${basename}" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework 'Pods-ECUIControls_Tests/ECExtension.framework'
install_framework 'Pods-ECUIControls_Tests/ECUIControls.framework'
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework 'Pods-ECUIControls_Tests/ECExtension.framework'
install_framework 'Pods-ECUIControls_Tests/ECUIControls.framework'
fi
|
skyhacker2/ECUIControls
|
Example/Pods/Target Support Files/Pods-ECUIControls_Tests/Pods-ECUIControls_Tests-frameworks.sh
|
Shell
|
mit
| 2,756 |
#!/bin/bash
echo "Type the name of your site without the .com This will rename the folder you're in and be used in the first git commit:"
read sitename
echo "Cool hang tight. This might take a few seconds."
(cd ..; mv jade-bower-h5bp-scss-linter-gulp-starter $sitename)
echo "Folder renamed."
echo -ne '\n' 'build' '\n' 'y' '\n' '404.html' '\n' 'y' '\n' | divshot init
echo "divshot site initialized"
rm -rf .git
echo "Old git repo removed."
git init
echo "New git repo initialized."
git add -A
git commit -m "Initial commit for $sitename"
rm README.md
mv new-repo-readme.md README.md
git add -A
git commit -m "Removed readme from original static project's readme and moved new one to be default."
git remote add origin [email protected]:brettwise/$sitename.git
git push -u origin master
npm install
echo "npm packages installed."
(cd src/sass;bower install)
echo "Sass related Bower packages installed."
(cd src/js;bower install)
echo "JS related Bower packages installed."
subl .
gulp
|
brettwise/prototype-waypaverlabs
|
gogo-static-site.sh
|
Shell
|
mit
| 985 |
_qbot() {
COMPREPLY=()
local word="${COMP_WORDS[COMP_CWORD]}"
if [ "$COMP_CWORD" -eq 1 ]; then
COMPREPLY=( $(compgen -W "$(qbot commands)" -- "$word") )
else
local command="${COMP_WORDS[1]}"
local completions="$(qbot completions "$command")"
COMPREPLY=( $(compgen -W "$completions" -- "$word") )
fi
}
complete -F _qbot qbot
|
raisebook/qbot
|
bin/sub/completions/qbot.bash
|
Shell
|
mit
| 352 |
#! /bin/bash
docker-compose -f docker-compose.test.yml kill db-test cache-test
|
kkemple/awesome-enterprise-web-service
|
bin/docker-stop-test-dependencies.sh
|
Shell
|
mit
| 80 |
#!/bin/bash
pushd secure/
shopt -s extglob
for file in !(*.gpg)
do
rm $file
done
shopt -u extglob
popd
|
beardandcode/infrastructure
|
tools/secure/clean.sh
|
Shell
|
mit
| 110 |
#!/bin/sh
# CYBERWATCH SAS - 2016
#
# Security fix for RHSA-2012:0720
#
# Security announcement date: 2012-06-12 14:16:49 UTC
# Script generation date: 2016-10-31 21:18:34 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - kernel-doc.noarch:2.6.18-238.39.1.el5
# - kernel.x86_64:2.6.18-238.39.1.el5
# - kernel-debug.x86_64:2.6.18-238.39.1.el5
# - kernel-debug-debuginfo.x86_64:2.6.18-238.39.1.el5
# - kernel-debug-devel.x86_64:2.6.18-238.39.1.el5
# - kernel-debuginfo.x86_64:2.6.18-238.39.1.el5
# - kernel-debuginfo-common.x86_64:2.6.18-238.39.1.el5
# - kernel-devel.x86_64:2.6.18-238.39.1.el5
# - kernel-headers.x86_64:2.6.18-238.39.1.el5
# - kernel-xen.x86_64:2.6.18-238.39.1.el5
# - kernel-xen-debuginfo.x86_64:2.6.18-238.39.1.el5
# - kernel-xen-devel.x86_64:2.6.18-238.39.1.el5
#
# Last versions recommanded by security team:
# - kernel-doc.noarch:2.6.18-408.el5
# - kernel.x86_64:2.6.18-238.57.1.el5
# - kernel-debug.x86_64:2.6.18-238.57.1.el5
# - kernel-debug-debuginfo.x86_64:2.6.18-238.57.1.el5
# - kernel-debug-devel.x86_64:2.6.18-238.57.1.el5
# - kernel-debuginfo.x86_64:2.6.18-238.57.1.el5
# - kernel-debuginfo-common.x86_64:2.6.18-238.57.1.el5
# - kernel-devel.x86_64:2.6.18-238.57.1.el5
# - kernel-headers.x86_64:2.6.18-238.57.1.el5
# - kernel-xen.x86_64:2.6.18-238.57.1.el5
# - kernel-xen-debuginfo.x86_64:2.6.18-238.57.1.el5
# - kernel-xen-devel.x86_64:2.6.18-238.57.1.el5
#
# CVE List:
# - CVE-2012-0217
# - CVE-2012-1583
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install kernel-doc.noarch-2.6.18 -y
sudo yum install kernel.x86_64-2.6.18 -y
sudo yum install kernel-debug.x86_64-2.6.18 -y
sudo yum install kernel-debug-debuginfo.x86_64-2.6.18 -y
sudo yum install kernel-debug-devel.x86_64-2.6.18 -y
sudo yum install kernel-debuginfo.x86_64-2.6.18 -y
sudo yum install kernel-debuginfo-common.x86_64-2.6.18 -y
sudo yum install kernel-devel.x86_64-2.6.18 -y
sudo yum install kernel-headers.x86_64-2.6.18 -y
sudo yum install kernel-xen.x86_64-2.6.18 -y
sudo yum install kernel-xen-debuginfo.x86_64-2.6.18 -y
sudo yum install kernel-xen-devel.x86_64-2.6.18 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/x86_64/2012/RHSA-2012:0720.sh
|
Shell
|
mit
| 2,290 |
alias tempo='curl -4 "http://wttr.in/Jo%C3%A3o%20Pessoa?lang=pt"'
purge_space() {
docker_purge
sudo rm -rf ~/Downloads/* -y
sudo rm -rf /Users/sergio/Library/Caches/* -y
sudo rm -rf /Users/sergio/Library/Application Support/Steam/* -y
}
|
sergiovilar/dotfiles
|
misc/aliases.zsh
|
Shell
|
mit
| 246 |
#!/usr/bin/env bash
MAPFILE=${HOME}/.arlobot/rosmaps/${1}
if [ $# -eq 0 ]
then
echo "You must provide a map file name."
echo "Run listMaps.sh for a list of your maps."
exit
fi
if [ ! -f ${MAPFILE} ]
then
MAPFILE=${MAPFILE}.yaml
fi
echo ${MAPFILE}
if [ ! -f ${MAPFILE} ]
then
echo "File does not exist!"
exit 1
fi
pgrep -f robot.launch
if [ $? -eq 0 ]
then
if [ $(jq '.use_xv11' ${HOME}/.arlobot/personalDataForBehavior.json) == true ]
then
roslaunch arlobot_launchers load_map_xv11.launch map_file:=${MAPFILE}
else
roslaunch arlobot_launchers load_map.launch map_file:=${MAPFILE}
fi
else
echo "Robot must be running to start this."
exit 1
fi
|
chrisl8/Metatron
|
scripts/load-map.sh
|
Shell
|
mit
| 710 |
#!/bin/bash
if [ "$1" == "start" ]; then
cd server
echo "starting server..."
node index.js >> log.txt&
if [ "$(pidof node)" != "" ]; then
echo "Great success!!"
else
echo "Oops, looks like an error occured. Probably maybe."
fi
elif [ "$1" == "stop" ]; then
echo "stopping server..."
kill $(pidof node)
if [ "$(pidof node)" == "" ]; then
echo "Great success!!"
else
echo "Oops, looks like an error occured. Probably maybe."
fi
elif [ "$1" == "check" ]; then
if [ "$(pidof node)" != "" ]; then
echo "Server is running"
else
echo "Server is not running"
fi
else
echo "Possible arguments are 'start', 'stop' and 'check'"
fi
|
YRSNorwich/Datamon
|
server.sh
|
Shell
|
mit
| 686 |
#!/bin/bash -ex
sudo groupadd hubert || true
sudo useradd -d /home/hubert -m -s /bin/bash -g hubert -G admin,passwordlesssudo,sudo,wheel hubert
sudo passwd hubert
|
hwong/provisioning
|
legacy/scripts/add_users.sh
|
Shell
|
mit
| 165 |
# wget ftp://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf/clinvar_20130118.vcf.gz
# wget ftp://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf/clinvar_20130118.vcf.gz.tbi
# wget ftp://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf/clinvar_20131230.vcf.gz
# wget ftp://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf/clinvar_20131230.vcf.gz.tbi
# wget ftp://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf/clinvar_20140303.vcf.gz
# wget ftp://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf/clinvar_20140303.vcf.gz.tbi
wget ftp://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf_GRCh37/clinvar_20140807.vcf.gz
wget ftp://ftp.ncbi.nlm.nih.gov/pub/clinvar/vcf_GRCh37/clinvar_20140807.vcf.gz.tbi
|
bgossele/geminicassandra
|
geminicassandra/annotation_provenance/make-clinvar.sh
|
Shell
|
mit
| 621 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/AFNetworking/AFNetworking.framework"
install_framework "$BUILT_PRODUCTS_DIR/AnimatedGIFImageSerialization/AnimatedGIFImageSerialization.framework"
install_framework "$BUILT_PRODUCTS_DIR/BlocksKit/BlocksKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/CocoaLumberjack/CocoaLumberjack.framework"
install_framework "$BUILT_PRODUCTS_DIR/KVOController/KVOController.framework"
install_framework "$BUILT_PRODUCTS_DIR/Mantle/Mantle.framework"
install_framework "$BUILT_PRODUCTS_DIR/Masonry/Masonry.framework"
install_framework "$BUILT_PRODUCTS_DIR/NSDate-Extensions/NSDate_Extensions.framework"
install_framework "$BUILT_PRODUCTS_DIR/OMGHTTPURLRQ/OMGHTTPURLRQ.framework"
install_framework "$BUILT_PRODUCTS_DIR/PiwikTracker/PiwikTracker.framework"
install_framework "$BUILT_PRODUCTS_DIR/PromiseKit/PromiseKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/Reachability/Reachability.framework"
install_framework "$BUILT_PRODUCTS_DIR/SDWebImage/SDWebImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/SSDataSources/SSDataSources.framework"
install_framework "$BUILT_PRODUCTS_DIR/YapDatabase/YapDatabase.framework"
install_framework "$BUILT_PRODUCTS_DIR/hpple/hpple.framework"
install_framework "$BUILT_PRODUCTS_DIR/libextobjc/libextobjc.framework"
install_framework "$BUILT_PRODUCTS_DIR/FLAnimatedImage/FLAnimatedImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/GCDWebServer/GCDWebServer.framework"
install_framework "$BUILT_PRODUCTS_DIR/HexColors/HexColors.framework"
install_framework "$BUILT_PRODUCTS_DIR/NYTPhotoViewer/NYTPhotoViewer.framework"
install_framework "$BUILT_PRODUCTS_DIR/SWStepSlider/SWStepSlider.framework"
install_framework "$BUILT_PRODUCTS_DIR/TSMessages/TSMessages.framework"
install_framework "$BUILT_PRODUCTS_DIR/TUSafariActivity/TUSafariActivity.framework"
install_framework "$BUILT_PRODUCTS_DIR/Tweaks/Tweaks.framework"
install_framework "$BUILT_PRODUCTS_DIR/VTAcknowledgementsViewController/VTAcknowledgementsViewController.framework"
fi
if [[ "$CONFIGURATION" == "AlphaDebug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/AFNetworking/AFNetworking.framework"
install_framework "$BUILT_PRODUCTS_DIR/AnimatedGIFImageSerialization/AnimatedGIFImageSerialization.framework"
install_framework "$BUILT_PRODUCTS_DIR/BlocksKit/BlocksKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/CocoaLumberjack/CocoaLumberjack.framework"
install_framework "$BUILT_PRODUCTS_DIR/KVOController/KVOController.framework"
install_framework "$BUILT_PRODUCTS_DIR/Mantle/Mantle.framework"
install_framework "$BUILT_PRODUCTS_DIR/Masonry/Masonry.framework"
install_framework "$BUILT_PRODUCTS_DIR/NSDate-Extensions/NSDate_Extensions.framework"
install_framework "$BUILT_PRODUCTS_DIR/OMGHTTPURLRQ/OMGHTTPURLRQ.framework"
install_framework "$BUILT_PRODUCTS_DIR/PiwikTracker/PiwikTracker.framework"
install_framework "$BUILT_PRODUCTS_DIR/PromiseKit/PromiseKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/Reachability/Reachability.framework"
install_framework "$BUILT_PRODUCTS_DIR/SDWebImage/SDWebImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/SSDataSources/SSDataSources.framework"
install_framework "$BUILT_PRODUCTS_DIR/YapDatabase/YapDatabase.framework"
install_framework "$BUILT_PRODUCTS_DIR/hpple/hpple.framework"
install_framework "$BUILT_PRODUCTS_DIR/libextobjc/libextobjc.framework"
install_framework "$BUILT_PRODUCTS_DIR/FLAnimatedImage/FLAnimatedImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/GCDWebServer/GCDWebServer.framework"
install_framework "$BUILT_PRODUCTS_DIR/HexColors/HexColors.framework"
install_framework "$BUILT_PRODUCTS_DIR/NYTPhotoViewer/NYTPhotoViewer.framework"
install_framework "$BUILT_PRODUCTS_DIR/SWStepSlider/SWStepSlider.framework"
install_framework "$BUILT_PRODUCTS_DIR/TSMessages/TSMessages.framework"
install_framework "$BUILT_PRODUCTS_DIR/TUSafariActivity/TUSafariActivity.framework"
install_framework "$BUILT_PRODUCTS_DIR/Tweaks/Tweaks.framework"
install_framework "$BUILT_PRODUCTS_DIR/VTAcknowledgementsViewController/VTAcknowledgementsViewController.framework"
fi
if [[ "$CONFIGURATION" == "Debug Test" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/AFNetworking/AFNetworking.framework"
install_framework "$BUILT_PRODUCTS_DIR/AnimatedGIFImageSerialization/AnimatedGIFImageSerialization.framework"
install_framework "$BUILT_PRODUCTS_DIR/BlocksKit/BlocksKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/CocoaLumberjack/CocoaLumberjack.framework"
install_framework "$BUILT_PRODUCTS_DIR/KVOController/KVOController.framework"
install_framework "$BUILT_PRODUCTS_DIR/Mantle/Mantle.framework"
install_framework "$BUILT_PRODUCTS_DIR/Masonry/Masonry.framework"
install_framework "$BUILT_PRODUCTS_DIR/NSDate-Extensions/NSDate_Extensions.framework"
install_framework "$BUILT_PRODUCTS_DIR/OMGHTTPURLRQ/OMGHTTPURLRQ.framework"
install_framework "$BUILT_PRODUCTS_DIR/PiwikTracker/PiwikTracker.framework"
install_framework "$BUILT_PRODUCTS_DIR/PromiseKit/PromiseKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/Reachability/Reachability.framework"
install_framework "$BUILT_PRODUCTS_DIR/SDWebImage/SDWebImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/SSDataSources/SSDataSources.framework"
install_framework "$BUILT_PRODUCTS_DIR/YapDatabase/YapDatabase.framework"
install_framework "$BUILT_PRODUCTS_DIR/hpple/hpple.framework"
install_framework "$BUILT_PRODUCTS_DIR/libextobjc/libextobjc.framework"
install_framework "$BUILT_PRODUCTS_DIR/FLAnimatedImage/FLAnimatedImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/GCDWebServer/GCDWebServer.framework"
install_framework "$BUILT_PRODUCTS_DIR/HexColors/HexColors.framework"
install_framework "$BUILT_PRODUCTS_DIR/NYTPhotoViewer/NYTPhotoViewer.framework"
install_framework "$BUILT_PRODUCTS_DIR/SWStepSlider/SWStepSlider.framework"
install_framework "$BUILT_PRODUCTS_DIR/TSMessages/TSMessages.framework"
install_framework "$BUILT_PRODUCTS_DIR/TUSafariActivity/TUSafariActivity.framework"
install_framework "$BUILT_PRODUCTS_DIR/Tweaks/Tweaks.framework"
install_framework "$BUILT_PRODUCTS_DIR/VTAcknowledgementsViewController/VTAcknowledgementsViewController.framework"
fi
if [[ "$CONFIGURATION" == "AdHoc" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/AFNetworking/AFNetworking.framework"
install_framework "$BUILT_PRODUCTS_DIR/AnimatedGIFImageSerialization/AnimatedGIFImageSerialization.framework"
install_framework "$BUILT_PRODUCTS_DIR/BlocksKit/BlocksKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/CocoaLumberjack/CocoaLumberjack.framework"
install_framework "$BUILT_PRODUCTS_DIR/KVOController/KVOController.framework"
install_framework "$BUILT_PRODUCTS_DIR/Mantle/Mantle.framework"
install_framework "$BUILT_PRODUCTS_DIR/Masonry/Masonry.framework"
install_framework "$BUILT_PRODUCTS_DIR/NSDate-Extensions/NSDate_Extensions.framework"
install_framework "$BUILT_PRODUCTS_DIR/OMGHTTPURLRQ/OMGHTTPURLRQ.framework"
install_framework "$BUILT_PRODUCTS_DIR/PiwikTracker/PiwikTracker.framework"
install_framework "$BUILT_PRODUCTS_DIR/PromiseKit/PromiseKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/Reachability/Reachability.framework"
install_framework "$BUILT_PRODUCTS_DIR/SDWebImage/SDWebImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/SSDataSources/SSDataSources.framework"
install_framework "$BUILT_PRODUCTS_DIR/YapDatabase/YapDatabase.framework"
install_framework "$BUILT_PRODUCTS_DIR/hpple/hpple.framework"
install_framework "$BUILT_PRODUCTS_DIR/libextobjc/libextobjc.framework"
install_framework "$BUILT_PRODUCTS_DIR/FLAnimatedImage/FLAnimatedImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/GCDWebServer/GCDWebServer.framework"
install_framework "$BUILT_PRODUCTS_DIR/HexColors/HexColors.framework"
install_framework "$BUILT_PRODUCTS_DIR/NYTPhotoViewer/NYTPhotoViewer.framework"
install_framework "$BUILT_PRODUCTS_DIR/SWStepSlider/SWStepSlider.framework"
install_framework "$BUILT_PRODUCTS_DIR/TSMessages/TSMessages.framework"
install_framework "$BUILT_PRODUCTS_DIR/TUSafariActivity/TUSafariActivity.framework"
install_framework "$BUILT_PRODUCTS_DIR/Tweaks/Tweaks.framework"
install_framework "$BUILT_PRODUCTS_DIR/VTAcknowledgementsViewController/VTAcknowledgementsViewController.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/AFNetworking/AFNetworking.framework"
install_framework "$BUILT_PRODUCTS_DIR/AnimatedGIFImageSerialization/AnimatedGIFImageSerialization.framework"
install_framework "$BUILT_PRODUCTS_DIR/BlocksKit/BlocksKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/CocoaLumberjack/CocoaLumberjack.framework"
install_framework "$BUILT_PRODUCTS_DIR/KVOController/KVOController.framework"
install_framework "$BUILT_PRODUCTS_DIR/Mantle/Mantle.framework"
install_framework "$BUILT_PRODUCTS_DIR/Masonry/Masonry.framework"
install_framework "$BUILT_PRODUCTS_DIR/NSDate-Extensions/NSDate_Extensions.framework"
install_framework "$BUILT_PRODUCTS_DIR/OMGHTTPURLRQ/OMGHTTPURLRQ.framework"
install_framework "$BUILT_PRODUCTS_DIR/PiwikTracker/PiwikTracker.framework"
install_framework "$BUILT_PRODUCTS_DIR/PromiseKit/PromiseKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/Reachability/Reachability.framework"
install_framework "$BUILT_PRODUCTS_DIR/SDWebImage/SDWebImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/SSDataSources/SSDataSources.framework"
install_framework "$BUILT_PRODUCTS_DIR/YapDatabase/YapDatabase.framework"
install_framework "$BUILT_PRODUCTS_DIR/hpple/hpple.framework"
install_framework "$BUILT_PRODUCTS_DIR/libextobjc/libextobjc.framework"
install_framework "$BUILT_PRODUCTS_DIR/FLAnimatedImage/FLAnimatedImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/GCDWebServer/GCDWebServer.framework"
install_framework "$BUILT_PRODUCTS_DIR/HexColors/HexColors.framework"
install_framework "$BUILT_PRODUCTS_DIR/NYTPhotoViewer/NYTPhotoViewer.framework"
install_framework "$BUILT_PRODUCTS_DIR/SWStepSlider/SWStepSlider.framework"
install_framework "$BUILT_PRODUCTS_DIR/TSMessages/TSMessages.framework"
install_framework "$BUILT_PRODUCTS_DIR/TUSafariActivity/TUSafariActivity.framework"
install_framework "$BUILT_PRODUCTS_DIR/Tweaks/Tweaks.framework"
install_framework "$BUILT_PRODUCTS_DIR/VTAcknowledgementsViewController/VTAcknowledgementsViewController.framework"
fi
if [[ "$CONFIGURATION" == "Test" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/AFNetworking/AFNetworking.framework"
install_framework "$BUILT_PRODUCTS_DIR/AnimatedGIFImageSerialization/AnimatedGIFImageSerialization.framework"
install_framework "$BUILT_PRODUCTS_DIR/BlocksKit/BlocksKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/CocoaLumberjack/CocoaLumberjack.framework"
install_framework "$BUILT_PRODUCTS_DIR/KVOController/KVOController.framework"
install_framework "$BUILT_PRODUCTS_DIR/Mantle/Mantle.framework"
install_framework "$BUILT_PRODUCTS_DIR/Masonry/Masonry.framework"
install_framework "$BUILT_PRODUCTS_DIR/NSDate-Extensions/NSDate_Extensions.framework"
install_framework "$BUILT_PRODUCTS_DIR/OMGHTTPURLRQ/OMGHTTPURLRQ.framework"
install_framework "$BUILT_PRODUCTS_DIR/PiwikTracker/PiwikTracker.framework"
install_framework "$BUILT_PRODUCTS_DIR/PromiseKit/PromiseKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/Reachability/Reachability.framework"
install_framework "$BUILT_PRODUCTS_DIR/SDWebImage/SDWebImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/SSDataSources/SSDataSources.framework"
install_framework "$BUILT_PRODUCTS_DIR/YapDatabase/YapDatabase.framework"
install_framework "$BUILT_PRODUCTS_DIR/hpple/hpple.framework"
install_framework "$BUILT_PRODUCTS_DIR/libextobjc/libextobjc.framework"
install_framework "$BUILT_PRODUCTS_DIR/FLAnimatedImage/FLAnimatedImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/GCDWebServer/GCDWebServer.framework"
install_framework "$BUILT_PRODUCTS_DIR/HexColors/HexColors.framework"
install_framework "$BUILT_PRODUCTS_DIR/NYTPhotoViewer/NYTPhotoViewer.framework"
install_framework "$BUILT_PRODUCTS_DIR/SWStepSlider/SWStepSlider.framework"
install_framework "$BUILT_PRODUCTS_DIR/TSMessages/TSMessages.framework"
install_framework "$BUILT_PRODUCTS_DIR/TUSafariActivity/TUSafariActivity.framework"
install_framework "$BUILT_PRODUCTS_DIR/Tweaks/Tweaks.framework"
install_framework "$BUILT_PRODUCTS_DIR/VTAcknowledgementsViewController/VTAcknowledgementsViewController.framework"
fi
if [[ "$CONFIGURATION" == "Beta" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/AFNetworking/AFNetworking.framework"
install_framework "$BUILT_PRODUCTS_DIR/AnimatedGIFImageSerialization/AnimatedGIFImageSerialization.framework"
install_framework "$BUILT_PRODUCTS_DIR/BlocksKit/BlocksKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/CocoaLumberjack/CocoaLumberjack.framework"
install_framework "$BUILT_PRODUCTS_DIR/KVOController/KVOController.framework"
install_framework "$BUILT_PRODUCTS_DIR/Mantle/Mantle.framework"
install_framework "$BUILT_PRODUCTS_DIR/Masonry/Masonry.framework"
install_framework "$BUILT_PRODUCTS_DIR/NSDate-Extensions/NSDate_Extensions.framework"
install_framework "$BUILT_PRODUCTS_DIR/OMGHTTPURLRQ/OMGHTTPURLRQ.framework"
install_framework "$BUILT_PRODUCTS_DIR/PiwikTracker/PiwikTracker.framework"
install_framework "$BUILT_PRODUCTS_DIR/PromiseKit/PromiseKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/Reachability/Reachability.framework"
install_framework "$BUILT_PRODUCTS_DIR/SDWebImage/SDWebImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/SSDataSources/SSDataSources.framework"
install_framework "$BUILT_PRODUCTS_DIR/YapDatabase/YapDatabase.framework"
install_framework "$BUILT_PRODUCTS_DIR/hpple/hpple.framework"
install_framework "$BUILT_PRODUCTS_DIR/libextobjc/libextobjc.framework"
install_framework "$BUILT_PRODUCTS_DIR/FLAnimatedImage/FLAnimatedImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/GCDWebServer/GCDWebServer.framework"
install_framework "$BUILT_PRODUCTS_DIR/HexColors/HexColors.framework"
install_framework "$BUILT_PRODUCTS_DIR/NYTPhotoViewer/NYTPhotoViewer.framework"
install_framework "$BUILT_PRODUCTS_DIR/SWStepSlider/SWStepSlider.framework"
install_framework "$BUILT_PRODUCTS_DIR/TSMessages/TSMessages.framework"
install_framework "$BUILT_PRODUCTS_DIR/TUSafariActivity/TUSafariActivity.framework"
install_framework "$BUILT_PRODUCTS_DIR/Tweaks/Tweaks.framework"
install_framework "$BUILT_PRODUCTS_DIR/VTAcknowledgementsViewController/VTAcknowledgementsViewController.framework"
fi
if [[ "$CONFIGURATION" == "Alpha" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/AFNetworking/AFNetworking.framework"
install_framework "$BUILT_PRODUCTS_DIR/AnimatedGIFImageSerialization/AnimatedGIFImageSerialization.framework"
install_framework "$BUILT_PRODUCTS_DIR/BlocksKit/BlocksKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/CocoaLumberjack/CocoaLumberjack.framework"
install_framework "$BUILT_PRODUCTS_DIR/KVOController/KVOController.framework"
install_framework "$BUILT_PRODUCTS_DIR/Mantle/Mantle.framework"
install_framework "$BUILT_PRODUCTS_DIR/Masonry/Masonry.framework"
install_framework "$BUILT_PRODUCTS_DIR/NSDate-Extensions/NSDate_Extensions.framework"
install_framework "$BUILT_PRODUCTS_DIR/OMGHTTPURLRQ/OMGHTTPURLRQ.framework"
install_framework "$BUILT_PRODUCTS_DIR/PiwikTracker/PiwikTracker.framework"
install_framework "$BUILT_PRODUCTS_DIR/PromiseKit/PromiseKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/Reachability/Reachability.framework"
install_framework "$BUILT_PRODUCTS_DIR/SDWebImage/SDWebImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/SSDataSources/SSDataSources.framework"
install_framework "$BUILT_PRODUCTS_DIR/YapDatabase/YapDatabase.framework"
install_framework "$BUILT_PRODUCTS_DIR/hpple/hpple.framework"
install_framework "$BUILT_PRODUCTS_DIR/libextobjc/libextobjc.framework"
install_framework "$BUILT_PRODUCTS_DIR/FLAnimatedImage/FLAnimatedImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/GCDWebServer/GCDWebServer.framework"
install_framework "$BUILT_PRODUCTS_DIR/HexColors/HexColors.framework"
install_framework "$BUILT_PRODUCTS_DIR/NYTPhotoViewer/NYTPhotoViewer.framework"
install_framework "$BUILT_PRODUCTS_DIR/SWStepSlider/SWStepSlider.framework"
install_framework "$BUILT_PRODUCTS_DIR/TSMessages/TSMessages.framework"
install_framework "$BUILT_PRODUCTS_DIR/TUSafariActivity/TUSafariActivity.framework"
install_framework "$BUILT_PRODUCTS_DIR/Tweaks/Tweaks.framework"
install_framework "$BUILT_PRODUCTS_DIR/VTAcknowledgementsViewController/VTAcknowledgementsViewController.framework"
fi
|
anirudh24seven/wikipedia-ios
|
Pods/Target Support Files/Pods-Foundation-Wikipedia/Pods-Foundation-Wikipedia-frameworks.sh
|
Shell
|
mit
| 20,249 |
# OSX-only stuff. Abort if not OSX.
is_osx || return 1
# APPLE, Y U PUT /usr/bin B4 /usr/local/bin?!
PATH="/usr/local/bin:$(path_remove /usr/local/bin)"
export PATH
# Trim new lines and copy to clipboard
alias c="tr -d '\n' | pbcopy"
# Make 'less' more.
[[ "$(type -P lesspipe.sh)" ]] && eval "$(lesspipe.sh)"
# Start ScreenSaver. This will lock the screen if locking is enabled.
alias ss="open /System/Library/Frameworks/ScreenSaver.framework/Versions/A/Resources/ScreenSaverEngine.app"
# Create a new Parallels VM from template, replacing the existing one.
#function vm_template() {
# local name="$@"
# local basename="$(basename "$name" ".zip")"
# local dest_dir="$HOME/Documents/Parallels"
# local dest="$dest_dir/$basename"
# local src_dir="$dest_dir/Templates"
# local src="$src_dir/$name"
# if [[ ! "$name" || ! -e "$src" ]]; then
# echo "You must specify a valid VM template from this list:";
# shopt -s nullglob
# for f in "$src_dir"/*.pvm "$src_dir"/*.pvm.zip; do
# echo " * $(basename "$f")"
# done
# shopt -u nullglob
# return 1
# fi
# if [[ -e "$dest" ]]; then
# echo "Deleting old VM"
# rm -rf "$dest"
# fi
# echo "Restoring VM template"
# if [[ "$name" == "$basename" ]]; then
# cp -R "$src" "$dest"
# else
# unzip -q "$src" -d "$dest_dir" && rm -rf "$dest_dir/__MACOSX"
# fi && \
# echo "Starting VM" && \
# open -g "$dest"
#}
# Export Localization.prefPane text substitution rules.
#function txt_sub_backup() {
# local prefs=~/Library/Preferences/.GlobalPreferences.plist
# local backup=$DOTFILES/conf/osx/NSUserReplacementItems.plist
# /usr/libexec/PlistBuddy -x -c "Print NSUserReplacementItems" "$prefs" > "$backup" &&
# echo "File ~${backup#$HOME} written."
#}
# Import Localization.prefPane text substitution rules.
#function txt_sub_restore() {
# local prefs=~/Library/Preferences/.GlobalPreferences.plist
# local backup=$DOTFILES/conf/osx/NSUserReplacementItems.plist
# if [[ ! -e "$backup" ]]; then
# echo "Error: file ~${backup#$HOME} does not exist!"
# return 1
# fi
# cmds=(
# "Delete NSUserReplacementItems"
# "Add NSUserReplacementItems array"
# "Merge '$backup' NSUserReplacementItems"
# )
# for cmd in "${cmds[@]}"; do /usr/libexec/PlistBuddy -c "$cmd" "$prefs"; done
#}
|
asharpe/dotfiles
|
source/50_osx.sh
|
Shell
|
mit
| 2,287 |
#!/bin/sh
# Container
ACCOUNT=""
CONTAINER="gameserver"
VERSION="latest"
# Shell Variables
OPT=""
OPT1=""
OPT2=""
DEBUG="TRUE"
AUTH="FALSE"
XMENU="N"
## Set Echo Command Flavor
PROMPT=""
OS=`uname -a | cut -f1 -d" "`
if [ "$OS" = "Darwin" ] ; then
PROMPT="echo"
else
PROMPT="echo -e"
fi ;
#
# Shell Functions
# for Menu Operations
#
docker_auth () {
$PROMPT "Docker Userid: \c" ; read user ;
$PROMPT "Docker Password: \c" ; read -s pass ;
echo "" ;
docker login -u $user -p $pass
TMP=`cat ~/.docker/config.json | grep \"auth\": | wc -l | sed -e 's/^[ \t]*//'`
#echo ".${TMP}."
if [ "$TMP" == "1" ] ;
then
AUTH="TRUE" ;
ACCOUNT=$user ;
else
AUTH="FALSE";
fi ;
}
docker_pull() {
if [ "$AUTH" != "TRUE" ] ;
then echo "Login Required!" ;
else
docker pull -a $ACCOUNT/$CONTAINER ;
#docker pull $ACCOUNT/$CONTAINER:$VERSION ;
fi ;
}
docker_build() {
if [ "$AUTH" != "TRUE" ] ;
then echo "Login Required!" ;
else
docker build -t $ACCOUNT/$CONTAINER:$VERSION .
fi ;
}
docker_release() {
if [ "$AUTH" != "TRUE" ] ;
then echo "Login Required!" ;
else
echo "Building Versions: latest and $VERSION"
docker build -t $ACCOUNT/$CONTAINER:latest -t $ACCOUNT/$CONTAINER:$VERSION .
echo "Pushing Builds to Docker Hub"
docker push $ACCOUNT/$CONTAINER:latest ;
docker push $ACCOUNT/$CONTAINER:$VERSION ;
fi ;
}
docker_push() {
if [ "$AUTH" != "TRUE" ] ;
then echo "Login Required!" ;
else
docker push $ACCOUNT/$CONTAINER:$VERSION ;
fi ;
}
docker_run() {
if [ "$AUTH" != "TRUE" ] ;
then echo "Login Required!" ;
else
docker run -dt --name $CONTAINER $ACCOUNT/$CONTAINER:$VERSION ;
fi ;
}
docker_restart () {
docker restart $CONTAINER
}
docker_images() {
docker images
}
docker_rmi() {
IMG_ID=`docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" | grep $CONTAINER | tr -s ' ' | tr ' ' '|' | cut -f 1 -d '|' | head -1`
while [ "$IMG_ID" != "" ]
do
echo "Removing Image: $IMG_ID"
docker rmi -f $IMG_ID
IMG_ID=`docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" | grep $CONTAINER | tr -s ' ' | tr ' ' '|' | cut -f 1 -d '|' | head -1`
done
}
docker_rmi_all() {
IMG_ID=`docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" | tr -s ' ' | tr ' ' '|' | cut -f 1 -d '|' | tail -n +2 | head -1`
while [ "$IMG_ID" != "" ]
do
echo "Removing Image: $IMG_ID"
docker rmi -f $IMG_ID
IMG_ID=`docker images --format "table {{.ID}}\t{{.Repository}}\t{{.Tag}}" | tr -s ' ' | tr ' ' '|' | cut -f 1 -d '|' | tail -n +2 | head -1`
done
}
docker_ps() {
echo "Running Containers:"
echo " "
docker ps --format "table {{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}\t"
}
docker_restart() {
docker restart $CONTAINER
}
docker_stop() {
docker stop $CONTAINER
docker rm $CONTAINER
}
docker_stop_all () {
INST_ID=`docker ps --format "table {{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}\t" | tr -s ' ' | tr ' ' '|' | cut -f 2 -d '|' | tail -n +2 | head -1`
while [ "$INST_ID" != "" ]
do
echo "Stopping Instance: $INST_ID"
docker stop $INST_ID > /dev/null 2>&1
docker rm $INST_ID > /dev/null 2>&1
INST_ID=`docker ps --format "table {{.ID}}\t{{.Names}}\t{{.Image}}\t{{.Status}}\t" | tr -s ' ' | tr ' ' '|' | cut -f 2 -d '|' | tail -n +2 | head -1`
done
}
docker_cmd () {
$PROMPT "CMD: \c" ; read cmd ;
echo $cmd
docker exec -it $CONTAINER $cmd
}
docker_install() {
if [ "$AUTH" != "TRUE" ] ;
then echo "Login Required!" ;
else
docker_uninstall
docker_rmi
docker_pull
docker_run
fi ;
}
docker_uninstall() {
if [ "$AUTH" != "TRUE" ] ;
then echo "Login Required!" ;
else
docker stop $CONTAINER
docker rm $CONTAINER
docker_rmi
fi ;
}
set_version() {
$PROMPT "Set Container Version: \c" ; read VERSION ;
}
set_account() {
$PROMPT "Set Container Account: \c" ; read ACCOUNT ;
}
okay_pause() {
$PROMPT "\n[Okay] \c";
read ans ;
}
##
## MAIN MENU LOOP
##
while [ "$OPT" != "X" ]
do
clear
echo ""
echo "============================================" ;
echo " D O C K E R M E N U " ;
echo "============================================" ;
echo "> $CONTAINER - $ACCOUNT/$CONTAINER:$VERSION " ;
echo " "
echo "[1] login - Login to Docker " ;
echo "[2] images - Show Docker Images " ;
echo "[3] build - Build Container Image " ;
echo "[4] run - Run Container " ;
echo "[5] pull - Pull Container Image " ;
echo "[6] push - Push Build to Docker Hub " ;
echo "[7] ps - Show Running Containers " ;
echo "[8] rmi - Remove Container Image " ;
echo "[9] release - Release to Docker Hub " ;
if [ "$XMENU" = "N" ] ; then
echo " "
echo "[+] More Options " ;
else
echo " "
echo "[i] install - Install Container " ;
echo "[u] uninstall - Uninstall Container " ;
echo "[r] restart - Restart Container " ;
echo "[s] stop - Stop Running Container " ;
echo "[b] bash - Enter Container Shell " ;
echo "[c] cleanup - Remove Local Images " ;
echo "[v] version - Set Container Version " ;
echo "[a] account - Set Container Account " ;
echo " "
echo "[-] Fewer Options " ;
fi ;
echo "[X] Exit Menu " ;
echo " "
$PROMPT "Selection: \c"
read OPT OPT1 OPT2
case $OPT in
1|login) echo " " ; docker_auth ; okay_pause ;;
2|images) echo " " ; docker_images ; okay_pause ;;
3|build) echo " " ; docker_build ; okay_pause ;;
4|run) echo " " ; docker_run ; okay_pause ;;
5|pull) echo " " ; docker_pull ; okay_pause ;;
6|push) echo " " ; docker_push ; okay_pause ;;
7|ps) echo " " ; docker_ps ; okay_pause ;;
8|rmi) echo " " ; docker_stop ; docker_rmi ; okay_pause ;;
9|release) echo " " ; docker_release ; okay_pause ;;
i|I|install) echo " " ; docker_install ; okay_pause ;;
u|U|uninstall) echo " " ; docker_uninstall ; okay_pause ;;
r|R|restart) echo " " ; docker_restart ; echo "Container Restarted!" ; okay_pause ;;
s|S|stop) echo " " ; docker_stop ; echo "Container Stopped!" ; okay_pause ;;
c|C|cleanup) echo " " ; docker_stop_all; docker_rmi_all ; okay_pause ;;
v|V|version) echo " " ; set_version ; okay_pause ;;
a|A|account) echo " " ; set_account ; okay_pause ;;
cmd) echo " " ; docker_cmd ; okay_pause ;;
debug) echo " " ; if [ "$OPT1" = "" -o "$OPT1" = "on" ] ; then DEBUG="TRUE" ; echo "Debug ON" ;
else DEBUG="FALSE" ; echo "Debug OFF" ; fi ; okay_pause ;;
b|B|bash) clear ; docker exec -it $CONTAINER bash ; ;;
+) XMENU="Y" ;;
-) XMENU="N" ;;
x|X) clear ; OPT="X" ; echo "Exiting " ;;
esac
done
|
srohilla/CodeSpanners
|
CodeSpartansRestlet/docker.sh
|
Shell
|
mit
| 6,964 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.