code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
rm -rf public/assets
rake assets:precompile --trace RAILS_ENV=production
DATE=`date +%Y%m%d%H%M`
tar jcvf ../release/ozjapanese_com_au_$DATE.tar.bz2\
app\
config.ru\
doc\
lib\
public\
config\
db\
Rakefile\
script\
vendor\
public
rm -rf public/assets
|
KenichiTakemura/ozjapanese_com_au
|
init_release.sh
|
Shell
|
lgpl-3.0
| 273 |
#!/bin/bash
# zsudo - sudo z okienkiem zenity o hasło
# by jedral.one.pl
# run: zsudo polecenie
# przydatne dla [Alt]+[F2], coś jak consolehelper dla każdego :P
zenity --title="zenity sudo" --width=250 --text="Próbujesz uruchomić aplikację:
\" $@ \"
z uprawnieniami administratora !
Podaj swoje hasło dla sudo:" \
--entry --entry-text "****" --hide-text \
|sudo -S "$@"
# uwagi:
# zahaszuj linijkę "require tty" poleceniem visudo
# Defaults requiretty
|
borzole/borzole
|
bin/zsudo.sh
|
Shell
|
lgpl-3.0
| 471 |
#!/usr/bin/env bash
docker run --rm -p 80:80 -p 443:443 \
-v `pwd`/httpd.conf:/usr/local/apache2/conf/httpd.conf:ro \
-v `pwd`/httpd-vhosts.conf:/usr/local/apache2/conf/extra/httpd-vhosts.conf:ro \
-v `pwd`/localhost.crt:/usr/local/apache2/conf/extra/localhost.crt:ro \
-v `pwd`/localhost.key:/usr/local/apache2/conf/extra/localhost.key:ro \
-v `pwd`/.localhost.htpasswd:/usr/local/apache2/conf/extra/.localhost.htpasswd:ro \
httpd:alpine
|
picodotdev/blog-ejemplos
|
WebBasicAuth/docker-apache.sh
|
Shell
|
unlicense
| 451 |
#!/bin/bash
# ssh
ufw allow from 198.150.93.0/24 to any port 22
ufw allow from 198.150.94.0/24 to any port 22
ufw allow from 198.150.95.0/24 to any port 22
ufw allow from 192.251.16.0/24 to any port 22
ufw allow from 198.51.130.0/24 to any port 22
ufw allow from 192.237.253.134 to any port 22
ufw allow from 198.61.224.217 to any port 22
ufw allow from 192.237.169.56 to any port 22
ufw allow from 192.251.163.0/24 to any port 22
# mysql
ufw allow from 10.208.2.123 to any port 3306
|
carthagecollege/archer
|
security/ufw.external.sh
|
Shell
|
unlicense
| 485 |
#!/bin/bash
# -------------------------------------------------------------------------- #
# Copyright 2002-2015, OpenNebula Project (OpenNebula.org), C12G Labs #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#--------------------------------------------------------------------------- #
#-------------------------------------------------------------------------------
# Install program for OpenNebula. It will install it relative to
# $ONE_LOCATION if defined with the -d option, otherwise it'll be installed
# under /. In this case you may specified the oneadmin user/group, so you do
# not need run the OpenNebula daemon with root privileges
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# COMMAND LINE PARSING
#-------------------------------------------------------------------------------
usage() {
echo
echo "Usage: install.sh [-u install_user] [-g install_group] [-k keep conf]"
echo " [-d ONE_LOCATION] [-c cli|ec2] [-r] [-h]"
echo
echo "-u: user that will run opennebula, defaults to user executing install.sh"
echo "-g: group of the user that will run opennebula, defaults to user"
echo " executing install.sh"
echo "-k: keep configuration files of existing OpenNebula installation, useful"
echo " when upgrading. This flag should not be set when installing"
echo " OpenNebula for the first time"
echo "-d: target installation directory, if not defined it'd be root. Must be"
echo " an absolute path."
echo "-c: install client utilities: OpenNebula cli and ec2 client files"
echo "-s: install OpenNebula Sunstone"
echo "-G: install OpenNebula Gate"
echo "-f: install OpenNebula Flow"
echo "-r: remove Opennebula, only useful if -d was not specified, otherwise"
echo " rm -rf \$ONE_LOCATION would do the job"
echo "-l: creates symlinks instead of copying files, useful for development"
echo "-h: prints this help"
}
#-------------------------------------------------------------------------------
PARAMETERS="hkrlcsou:g:d:"
if [ $(getopt --version | tr -d " ") = "--" ]; then
TEMP_OPT=`getopt $PARAMETERS "$@"`
else
TEMP_OPT=`getopt -o $PARAMETERS -n 'install.sh' -- "$@"`
fi
if [ $? != 0 ] ; then
usage
exit 1
fi
eval set -- "$TEMP_OPT"
INSTALL_ETC="yes"
UNINSTALL="no"
LINK="no"
CLIENT="no"
ONEGATE="no"
SUNSTONE="no"
ONEFLOW="no"
ONEADMIN_USER=`id -u`
ONEADMIN_GROUP=`id -g`
SRC_DIR=$PWD
while true ; do
case "$1" in
-h) usage; exit 0;;
-k) INSTALL_ETC="no" ; shift ;;
-r) UNINSTALL="yes" ; shift ;;
-l) LINK="yes" ; shift ;;
-c) CLIENT="yes"; INSTALL_ETC="no" ; shift ;;
-G) ONEGATE="yes"; shift ;;
-s) SUNSTONE="yes"; shift ;;
-f) ONEFLOW="yes"; shift ;;
-u) ONEADMIN_USER="$2" ; shift 2;;
-g) ONEADMIN_GROUP="$2"; shift 2;;
-d) ROOT="$2" ; shift 2 ;;
--) shift ; break ;;
*) usage; exit 1 ;;
esac
done
#-------------------------------------------------------------------------------
# Definition of locations
#-------------------------------------------------------------------------------
CONF_LOCATION="$HOME/.one"
if [ -z "$ROOT" ] ; then
BIN_LOCATION="/usr/bin"
LIB_LOCATION="/usr/lib/one"
ETC_LOCATION="/etc/one"
LOG_LOCATION="/var/log/one"
VAR_LOCATION="/var/lib/one"
ONEGATE_LOCATION="$LIB_LOCATION/onegate"
SUNSTONE_LOCATION="$LIB_LOCATION/sunstone"
ONEFLOW_LOCATION="$LIB_LOCATION/oneflow"
SYSTEM_DS_LOCATION="$VAR_LOCATION/datastores/0"
DEFAULT_DS_LOCATION="$VAR_LOCATION/datastores/1"
RUN_LOCATION="/var/run/one"
LOCK_LOCATION="/var/lock/one"
INCLUDE_LOCATION="/usr/include"
SHARE_LOCATION="/usr/share/one"
MAN_LOCATION="/usr/share/man/man1"
VM_LOCATION="/var/lib/one/vms"
if [ "$CLIENT" = "yes" ]; then
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $ETC_LOCATION"
DELETE_DIRS=""
CHOWN_DIRS=""
elif [ "$SUNSTONE" = "yes" ]; then
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $VAR_LOCATION \
$SUNSTONE_LOCATION $ETC_LOCATION"
DELETE_DIRS="$MAKE_DIRS"
CHOWN_DIRS=""
elif [ "$ONEGATE" = "yes" ]; then
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $VAR_LOCATION \
$ONEGATE_LOCATION $ETC_LOCATION"
DELETE_DIRS="$MAKE_DIRS"
CHOWN_DIRS=""
elif [ "$ONEFLOW" = "yes" ]; then
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $VAR_LOCATION $ONEFLOW_LOCATION \
$ETC_LOCATION"
DELETE_DIRS="$MAKE_DIRS"
CHOWN_DIRS=""
else
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $ETC_LOCATION $VAR_LOCATION \
$INCLUDE_LOCATION $SHARE_LOCATION \
$LOG_LOCATION $RUN_LOCATION $LOCK_LOCATION \
$SYSTEM_DS_LOCATION $DEFAULT_DS_LOCATION $MAN_LOCATION \
$VM_LOCATION $ONEGATE_LOCATION $ONEFLOW_LOCATION"
DELETE_DIRS="$LIB_LOCATION $ETC_LOCATION $LOG_LOCATION $VAR_LOCATION \
$RUN_LOCATION $SHARE_DIRS"
CHOWN_DIRS="$LOG_LOCATION $VAR_LOCATION $RUN_LOCATION $LOCK_LOCATION"
fi
else
BIN_LOCATION="$ROOT/bin"
LIB_LOCATION="$ROOT/lib"
ETC_LOCATION="$ROOT/etc"
VAR_LOCATION="$ROOT/var"
ONEGATE_LOCATION="$LIB_LOCATION/onegate"
SUNSTONE_LOCATION="$LIB_LOCATION/sunstone"
ONEFLOW_LOCATION="$LIB_LOCATION/oneflow"
SYSTEM_DS_LOCATION="$VAR_LOCATION/datastores/0"
DEFAULT_DS_LOCATION="$VAR_LOCATION/datastores/1"
INCLUDE_LOCATION="$ROOT/include"
SHARE_LOCATION="$ROOT/share"
MAN_LOCATION="$ROOT/share/man/man1"
VM_LOCATION="$VAR_LOCATION/vms"
if [ "$CLIENT" = "yes" ]; then
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $ETC_LOCATION"
DELETE_DIRS="$MAKE_DIRS"
elif [ "$ONEGATE" = "yes" ]; then
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $VAR_LOCATION \
$ONEGATE_LOCATION $ETC_LOCATION"
DELETE_DIRS="$MAKE_DIRS"
elif [ "$SUNSTONE" = "yes" ]; then
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $VAR_LOCATION \
$SUNSTONE_LOCATION $ETC_LOCATION"
DELETE_DIRS="$MAKE_DIRS"
elif [ "$ONEFLOW" = "yes" ]; then
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $VAR_LOCATION $ONEFLOW_LOCATION \
$ETC_LOCATION"
DELETE_DIRS="$MAKE_DIRS"
else
MAKE_DIRS="$BIN_LOCATION $LIB_LOCATION $ETC_LOCATION $VAR_LOCATION \
$INCLUDE_LOCATION $SHARE_LOCATION $SYSTEM_DS_LOCATION \
$DEFAULT_DS_LOCATION $MAN_LOCATION \
$VM_LOCATION $ONEGATE_LOCATION $ONEFLOW_LOCATION"
DELETE_DIRS="$MAKE_DIRS"
CHOWN_DIRS="$ROOT"
fi
CHOWN_DIRS="$ROOT"
fi
SHARE_DIRS="$SHARE_LOCATION/examples \
$SHARE_LOCATION/websockify"
ETC_DIRS="$ETC_LOCATION/vmm_exec \
$ETC_LOCATION/hm \
$ETC_LOCATION/auth \
$ETC_LOCATION/auth/certificates \
$ETC_LOCATION/ec2query_templates \
$ETC_LOCATION/sunstone-views \
$ETC_LOCATION/cli"
LIB_DIRS="$LIB_LOCATION/ruby \
$LIB_LOCATION/ruby/opennebula \
$LIB_LOCATION/ruby/cloud/ \
$LIB_LOCATION/ruby/cloud/econe \
$LIB_LOCATION/ruby/cloud/econe/views \
$LIB_LOCATION/ruby/cloud/marketplace \
$LIB_LOCATION/ruby/cloud/CloudAuth \
$LIB_LOCATION/ruby/onedb \
$LIB_LOCATION/ruby/onedb/shared \
$LIB_LOCATION/ruby/onedb/local \
$LIB_LOCATION/ruby/vendors \
$LIB_LOCATION/ruby/vendors/rbvmomi \
$LIB_LOCATION/ruby/vendors/rbvmomi/lib \
$LIB_LOCATION/ruby/vendors/rbvmomi/lib/rbvmomi \
$LIB_LOCATION/ruby/vendors/rbvmomi/lib/rbvmomi/utils \
$LIB_LOCATION/ruby/vendors/rbvmomi/lib/rbvmomi/vim \
$LIB_LOCATION/mads \
$LIB_LOCATION/sh \
$LIB_LOCATION/ruby/cli \
$LIB_LOCATION/ruby/cli/one_helper"
VAR_DIRS="$VAR_LOCATION/remotes \
$VAR_LOCATION/remotes/im \
$VAR_LOCATION/remotes/im/kvm.d \
$VAR_LOCATION/remotes/im/xen3.d \
$VAR_LOCATION/remotes/im/xen4.d \
$VAR_LOCATION/remotes/im/kvm-probes.d \
$VAR_LOCATION/remotes/im/xen3-probes.d \
$VAR_LOCATION/remotes/im/xen4-probes.d \
$VAR_LOCATION/remotes/im/vmware.d \
$VAR_LOCATION/remotes/im/vcenter.d \
$VAR_LOCATION/remotes/im/ec2.d \
$VAR_LOCATION/remotes/im/sl.d \
$VAR_LOCATION/remotes/im/az.d \
$VAR_LOCATION/remotes/vmm \
$VAR_LOCATION/remotes/vmm/kvm \
$VAR_LOCATION/remotes/vmm/xen3 \
$VAR_LOCATION/remotes/vmm/xen4 \
$VAR_LOCATION/remotes/vmm/vmware \
$VAR_LOCATION/remotes/vmm/vcenter \
$VAR_LOCATION/remotes/vmm/ec2 \
$VAR_LOCATION/remotes/vmm/sl \
$VAR_LOCATION/remotes/vmm/az \
$VAR_LOCATION/remotes/vnm \
$VAR_LOCATION/remotes/vnm/802.1Q \
$VAR_LOCATION/remotes/vnm/dummy \
$VAR_LOCATION/remotes/vnm/ebtables \
$VAR_LOCATION/remotes/vnm/fw \
$VAR_LOCATION/remotes/vnm/ovswitch \
$VAR_LOCATION/remotes/vnm/ovswitch_brcompat \
$VAR_LOCATION/remotes/vnm/vmware \
$VAR_LOCATION/remotes/tm/ \
$VAR_LOCATION/remotes/tm/dummy \
$VAR_LOCATION/remotes/tm/shared \
$VAR_LOCATION/remotes/tm/fs_lvm \
$VAR_LOCATION/remotes/tm/qcow2 \
$VAR_LOCATION/remotes/tm/ssh \
$VAR_LOCATION/remotes/tm/vmfs \
$VAR_LOCATION/remotes/tm/lvm \
$VAR_LOCATION/remotes/tm/ceph \
$VAR_LOCATION/remotes/tm/dev \
$VAR_LOCATION/remotes/hooks \
$VAR_LOCATION/remotes/hooks/ft \
$VAR_LOCATION/remotes/datastore \
$VAR_LOCATION/remotes/datastore/dummy \
$VAR_LOCATION/remotes/datastore/fs \
$VAR_LOCATION/remotes/datastore/vmfs \
$VAR_LOCATION/remotes/datastore/lvm \
$VAR_LOCATION/remotes/datastore/ceph \
$VAR_LOCATION/remotes/datastore/dev \
$VAR_LOCATION/remotes/auth \
$VAR_LOCATION/remotes/auth/plain \
$VAR_LOCATION/remotes/auth/ssh \
$VAR_LOCATION/remotes/auth/x509 \
$VAR_LOCATION/remotes/auth/ldap \
$VAR_LOCATION/remotes/auth/server_x509 \
$VAR_LOCATION/remotes/auth/server_cipher \
$VAR_LOCATION/remotes/auth/dummy"
SUNSTONE_DIRS="$SUNSTONE_LOCATION/routes \
$SUNSTONE_LOCATION/models \
$SUNSTONE_LOCATION/models/OpenNebulaJSON \
$SUNSTONE_LOCATION/public \
$SUNSTONE_LOCATION/public/js \
$SUNSTONE_LOCATION/public/js/plugins \
$SUNSTONE_LOCATION/public/js/user-plugins \
$SUNSTONE_LOCATION/public/css \
$SUNSTONE_LOCATION/public/locale \
$SUNSTONE_LOCATION/public/locale/ca \
$SUNSTONE_LOCATION/public/locale/cs_CZ \
$SUNSTONE_LOCATION/public/locale/da \
$SUNSTONE_LOCATION/public/locale/de \
$SUNSTONE_LOCATION/public/locale/el_GR \
$SUNSTONE_LOCATION/public/locale/en_US \
$SUNSTONE_LOCATION/public/locale/es_ES \
$SUNSTONE_LOCATION/public/locale/da \
$SUNSTONE_LOCATION/public/locale/fa_IR \
$SUNSTONE_LOCATION/public/locale/fr_FR \
$SUNSTONE_LOCATION/public/locale/it_IT \
$SUNSTONE_LOCATION/public/locale/nl_NL \
$SUNSTONE_LOCATION/public/locale/pl \
$SUNSTONE_LOCATION/public/locale/pt_BR \
$SUNSTONE_LOCATION/public/locale/pt_PT \
$SUNSTONE_LOCATION/public/locale/ru_RU \
$SUNSTONE_LOCATION/public/locale/sk_SK \
$SUNSTONE_LOCATION/public/locale/zh_TW \
$SUNSTONE_LOCATION/public/locale/zh_CN \
$SUNSTONE_LOCATION/public/vendor \
$SUNSTONE_LOCATION/public/vendor/crypto-js \
$SUNSTONE_LOCATION/public/vendor/noVNC \
$SUNSTONE_LOCATION/public/vendor/noVNC/web-socket-js \
$SUNSTONE_LOCATION/public/vendor/4.0 \
$SUNSTONE_LOCATION/public/vendor/4.0/flot \
$SUNSTONE_LOCATION/public/vendor/4.0/datatables \
$SUNSTONE_LOCATION/public/vendor/4.0/foundation_datatables \
$SUNSTONE_LOCATION/public/vendor/4.0/jquery_layout \
$SUNSTONE_LOCATION/public/vendor/4.0/fontawesome \
$SUNSTONE_LOCATION/public/vendor/4.0/fontawesome/css \
$SUNSTONE_LOCATION/public/vendor/4.0/fontawesome/fonts \
$SUNSTONE_LOCATION/public/vendor/4.0/jgrowl \
$SUNSTONE_LOCATION/public/vendor/4.0/resumablejs \
$SUNSTONE_LOCATION/public/vendor/4.0/foundation \
$SUNSTONE_LOCATION/public/vendor/4.0/modernizr \
$SUNSTONE_LOCATION/public/vendor/4.0/nouislider \
$SUNSTONE_LOCATION/public/images \
$SUNSTONE_LOCATION/public/images/logos \
$SUNSTONE_LOCATION/views"
ONEFLOW_DIRS="$ONEFLOW_LOCATION/lib \
$ONEFLOW_LOCATION/lib/strategy \
$ONEFLOW_LOCATION/lib/models"
LIB_ECO_CLIENT_DIRS="$LIB_LOCATION/ruby \
$LIB_LOCATION/ruby/opennebula \
$LIB_LOCATION/ruby/cloud/ \
$LIB_LOCATION/ruby/cloud/econe"
LIB_MARKET_CLIENT_DIRS="$LIB_LOCATION/ruby \
$LIB_LOCATION/ruby/opennebula \
$LIB_LOCATION/ruby/cloud/marketplace"
LIB_OCA_CLIENT_DIRS="$LIB_LOCATION/ruby \
$LIB_LOCATION/ruby/opennebula"
LIB_CLI_CLIENT_DIRS="$LIB_LOCATION/ruby/cli \
$LIB_LOCATION/ruby/cli/one_helper"
CONF_CLI_DIRS="$ETC_LOCATION/cli"
if [ "$CLIENT" = "yes" ]; then
MAKE_DIRS="$MAKE_DIRS $LIB_ECO_CLIENT_DIRS $LIB_MARKET_CLIENT_DIRS \
$LIB_OCA_CLIENT_DIRS $LIB_CLI_CLIENT_DIRS $CONF_CLI_DIRS \
$ETC_LOCATION"
elif [ "$ONEGATE" = "yes" ]; then
MAKE_DIRS="$MAKE_DIRS $LIB_OCA_CLIENT_DIRS"
elif [ "$SUNSTONE" = "yes" ]; then
MAKE_DIRS="$MAKE_DIRS $SUNSTONE_DIRS $LIB_OCA_CLIENT_DIRS"
elif [ "$ONEFLOW" = "yes" ]; then
MAKE_DIRS="$MAKE_DIRS $ONEFLOW_DIRS $LIB_OCA_CLIENT_DIRS"
else
MAKE_DIRS="$MAKE_DIRS $SHARE_DIRS $ETC_DIRS $LIB_DIRS $VAR_DIRS \
$SUNSTONE_DIRS $ONEFLOW_DIRS"
fi
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# FILE DEFINITION, WHAT IS GOING TO BE INSTALLED AND WHERE
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
INSTALL_FILES=(
BIN_FILES:$BIN_LOCATION
INCLUDE_FILES:$INCLUDE_LOCATION
LIB_FILES:$LIB_LOCATION
RUBY_LIB_FILES:$LIB_LOCATION/ruby
RUBY_AUTH_LIB_FILES:$LIB_LOCATION/ruby/opennebula
RUBY_OPENNEBULA_LIB_FILES:$LIB_LOCATION/ruby/opennebula
MAD_RUBY_LIB_FILES:$LIB_LOCATION/ruby
MAD_RUBY_LIB_FILES:$VAR_LOCATION/remotes
MAD_SH_LIB_FILES:$LIB_LOCATION/sh
MAD_SH_LIB_FILES:$VAR_LOCATION/remotes
ONEDB_FILES:$LIB_LOCATION/ruby/onedb
ONEDB_SHARED_MIGRATOR_FILES:$LIB_LOCATION/ruby/onedb/shared
ONEDB_LOCAL_MIGRATOR_FILES:$LIB_LOCATION/ruby/onedb/local
MADS_LIB_FILES:$LIB_LOCATION/mads
IM_PROBES_FILES:$VAR_LOCATION/remotes/im
IM_PROBES_KVM_FILES:$VAR_LOCATION/remotes/im/kvm.d
IM_PROBES_KVM_PROBES_FILES:$VAR_LOCATION/remotes/im/kvm-probes.d
IM_PROBES_XEN3_FILES:$VAR_LOCATION/remotes/im/xen3.d
IM_PROBES_XEN3_PROBES_FILES:$VAR_LOCATION/remotes/im/xen3-probes.d
IM_PROBES_XEN4_FILES:$VAR_LOCATION/remotes/im/xen4.d
IM_PROBES_XEN4_PROBES_FILES:$VAR_LOCATION/remotes/im/xen4-probes.d
IM_PROBES_VMWARE_FILES:$VAR_LOCATION/remotes/im/vmware.d
IM_PROBES_VCENTER_FILES:$VAR_LOCATION/remotes/im/vcenter.d
IM_PROBES_EC2_FILES:$VAR_LOCATION/remotes/im/ec2.d
IM_PROBES_SL_FILES:$VAR_LOCATION/remotes/im/sl.d
IM_PROBES_AZ_FILES:$VAR_LOCATION/remotes/im/az.d
IM_PROBES_VERSION:$VAR_LOCATION/remotes
AUTH_SSH_FILES:$VAR_LOCATION/remotes/auth/ssh
AUTH_X509_FILES:$VAR_LOCATION/remotes/auth/x509
AUTH_LDAP_FILES:$VAR_LOCATION/remotes/auth/ldap
AUTH_SERVER_X509_FILES:$VAR_LOCATION/remotes/auth/server_x509
AUTH_SERVER_CIPHER_FILES:$VAR_LOCATION/remotes/auth/server_cipher
AUTH_DUMMY_FILES:$VAR_LOCATION/remotes/auth/dummy
AUTH_PLAIN_FILES:$VAR_LOCATION/remotes/auth/plain
VMM_EXEC_KVM_SCRIPTS:$VAR_LOCATION/remotes/vmm/kvm
VMM_EXEC_XEN3_SCRIPTS:$VAR_LOCATION/remotes/vmm/xen3
VMM_EXEC_XEN4_SCRIPTS:$VAR_LOCATION/remotes/vmm/xen4
VMM_EXEC_VMWARE_SCRIPTS:$VAR_LOCATION/remotes/vmm/vmware
VMM_EXEC_VCENTER_SCRIPTS:$VAR_LOCATION/remotes/vmm/vcenter
VMM_EXEC_EC2_SCRIPTS:$VAR_LOCATION/remotes/vmm/ec2
VMM_EXEC_SL_SCRIPTS:$VAR_LOCATION/remotes/vmm/sl
VMM_EXEC_AZ_SCRIPTS:$VAR_LOCATION/remotes/vmm/az
TM_FILES:$VAR_LOCATION/remotes/tm
TM_SHARED_FILES:$VAR_LOCATION/remotes/tm/shared
TM_FS_LVM_FILES:$VAR_LOCATION/remotes/tm/fs_lvm
TM_QCOW2_FILES:$VAR_LOCATION/remotes/tm/qcow2
TM_SSH_FILES:$VAR_LOCATION/remotes/tm/ssh
TM_VMFS_FILES:$VAR_LOCATION/remotes/tm/vmfs
TM_LVM_FILES:$VAR_LOCATION/remotes/tm/lvm
TM_CEPH_FILES:$VAR_LOCATION/remotes/tm/ceph
TM_DEV_FILES:$VAR_LOCATION/remotes/tm/dev
TM_DUMMY_FILES:$VAR_LOCATION/remotes/tm/dummy
DATASTORE_DRIVER_COMMON_SCRIPTS:$VAR_LOCATION/remotes/datastore/
DATASTORE_DRIVER_DUMMY_SCRIPTS:$VAR_LOCATION/remotes/datastore/dummy
DATASTORE_DRIVER_FS_SCRIPTS:$VAR_LOCATION/remotes/datastore/fs
DATASTORE_DRIVER_VMFS_SCRIPTS:$VAR_LOCATION/remotes/datastore/vmfs
DATASTORE_DRIVER_LVM_SCRIPTS:$VAR_LOCATION/remotes/datastore/lvm
DATASTORE_DRIVER_CEPH_SCRIPTS:$VAR_LOCATION/remotes/datastore/ceph
DATASTORE_DRIVER_DEV_SCRIPTS:$VAR_LOCATION/remotes/datastore/dev
NETWORK_FILES:$VAR_LOCATION/remotes/vnm
NETWORK_8021Q_FILES:$VAR_LOCATION/remotes/vnm/802.1Q
NETWORK_DUMMY_FILES:$VAR_LOCATION/remotes/vnm/dummy
NETWORK_EBTABLES_FILES:$VAR_LOCATION/remotes/vnm/ebtables
NETWORK_FW_FILES:$VAR_LOCATION/remotes/vnm/fw
NETWORK_OVSWITCH_FILES:$VAR_LOCATION/remotes/vnm/ovswitch
NETWORK_OVSWITCH_BRCOMPAT_FILES:$VAR_LOCATION/remotes/vnm/ovswitch_brcompat
NETWORK_VMWARE_FILES:$VAR_LOCATION/remotes/vnm/vmware
EXAMPLE_SHARE_FILES:$SHARE_LOCATION/examples
WEBSOCKIFY_SHARE_FILES:$SHARE_LOCATION/websockify
INSTALL_GEMS_SHARE_FILE:$SHARE_LOCATION
HOOK_FT_FILES:$VAR_LOCATION/remotes/hooks/ft
COMMON_CLOUD_LIB_FILES:$LIB_LOCATION/ruby/cloud
CLOUD_AUTH_LIB_FILES:$LIB_LOCATION/ruby/cloud/CloudAuth
ECO_LIB_FILES:$LIB_LOCATION/ruby/cloud/econe
ECO_LIB_VIEW_FILES:$LIB_LOCATION/ruby/cloud/econe/views
ECO_BIN_FILES:$BIN_LOCATION
MARKET_LIB_FILES:$LIB_LOCATION/ruby/cloud/marketplace
MARKET_BIN_FILES:$BIN_LOCATION
MAN_FILES:$MAN_LOCATION
CLI_LIB_FILES:$LIB_LOCATION/ruby/cli
ONE_CLI_LIB_FILES:$LIB_LOCATION/ruby/cli/one_helper
RBVMOMI_VENDOR_RUBY_FILES:$LIB_LOCATION/ruby/vendors/rbvmomi
RBVMOMI_VENDOR_RUBY_LIB_FILES:$LIB_LOCATION/ruby/vendors/rbvmomi/lib
RBVMOMI_VENDOR_RUBY_LIB_RBVMOMI_FILES:$LIB_LOCATION/ruby/vendors/rbvmomi/lib/rbvmomi
RBVMOMI_VENDOR_RUBY_LIB_RBVMOMI_VIM_FILES:$LIB_LOCATION/ruby/vendors/rbvmomi/lib/rbvmomi/vim
RBVMOMI_VENDOR_RUBY_LIB_RBVMOMI_UTILS_FILES:$LIB_LOCATION/ruby/vendors/rbvmomi/lib/rbvmomi/utils
)
INSTALL_CLIENT_FILES=(
COMMON_CLOUD_CLIENT_LIB_FILES:$LIB_LOCATION/ruby/cloud
ECO_LIB_CLIENT_FILES:$LIB_LOCATION/ruby/cloud/econe
ECO_BIN_CLIENT_FILES:$BIN_LOCATION
COMMON_CLOUD_CLIENT_LIB_FILES:$LIB_LOCATION/ruby/cloud
MARKET_LIB_CLIENT_FILES:$LIB_LOCATION/ruby/cloud/marketplace
MARKET_BIN_CLIENT_FILES:$BIN_LOCATION
CLI_BIN_FILES:$BIN_LOCATION
CLI_LIB_FILES:$LIB_LOCATION/ruby/cli
ONE_CLI_LIB_FILES:$LIB_LOCATION/ruby/cli/one_helper
CLI_CONF_FILES:$ETC_LOCATION/cli
OCA_LIB_FILES:$LIB_LOCATION/ruby
RUBY_OPENNEBULA_LIB_FILES:$LIB_LOCATION/ruby/opennebula
RUBY_AUTH_LIB_FILES:$LIB_LOCATION/ruby/opennebula
)
INSTALL_SUNSTONE_RUBY_FILES=(
RUBY_OPENNEBULA_LIB_FILES:$LIB_LOCATION/ruby/opennebula
OCA_LIB_FILES:$LIB_LOCATION/ruby
)
INSTALL_SUNSTONE_FILES=(
SUNSTONE_FILES:$SUNSTONE_LOCATION
SUNSTONE_BIN_FILES:$BIN_LOCATION
SUNSTONE_MODELS_FILES:$SUNSTONE_LOCATION/models
SUNSTONE_MODELS_JSON_FILES:$SUNSTONE_LOCATION/models/OpenNebulaJSON
SUNSTONE_VIEWS_FILES:$SUNSTONE_LOCATION/views
SUNSTONE_PUBLIC_JS_FILES:$SUNSTONE_LOCATION/public/js
SUNSTONE_PUBLIC_JS_PLUGINS_FILES:$SUNSTONE_LOCATION/public/js/plugins
SUNSTONE_ROUTES_FILES:$SUNSTONE_LOCATION/routes
SUNSTONE_PUBLIC_CSS_FILES:$SUNSTONE_LOCATION/public/css
SUNSTONE_PUBLIC_VENDOR_CRYPTOJS:$SUNSTONE_LOCATION/public/vendor/crypto-js
SUNSTONE_PUBLIC_VENDOR_NOVNC:$SUNSTONE_LOCATION/public/vendor/noVNC
SUNSTONE_PUBLIC_VENDOR_NOVNC_WEBSOCKET:$SUNSTONE_LOCATION/public/vendor/noVNC/web-socket-js
SUNSTONE_PUBLIC_NEW_VENDOR_FLOT:$SUNSTONE_LOCATION/public/vendor/4.0/flot
SUNSTONE_PUBLIC_NEW_VENDOR_DATATABLES:$SUNSTONE_LOCATION/public/vendor/4.0/datatables
SUNSTONE_PUBLIC_NEW_VENDOR_FOUNDATION_DATATABLES:$SUNSTONE_LOCATION/public/vendor/4.0/foundation_datatables
SUNSTONE_PUBLIC_NEW_VENDOR_JGROWL:$SUNSTONE_LOCATION/public/vendor/4.0/jgrowl
SUNSTONE_PUBLIC_NEW_VENDOR_RESUMABLEJS:$SUNSTONE_LOCATION/public/vendor/4.0/resumablejs
SUNSTONE_PUBLIC_NEW_VENDOR_JQUERY:$SUNSTONE_LOCATION/public/vendor/4.0/
SUNSTONE_PUBLIC_NEW_VENDOR_FOUNDATION:$SUNSTONE_LOCATION/public/vendor/4.0/foundation
SUNSTONE_PUBLIC_NEW_VENDOR_MODERNIZR:$SUNSTONE_LOCATION/public/vendor/4.0/modernizr
SUNSTONE_PUBLIC_NEW_VENDOR_FONTAWESOME:$SUNSTONE_LOCATION/public/vendor/4.0/fontawesome
SUNSTONE_PUBLIC_NEW_VENDOR_FONTAWESOME_FONT:$SUNSTONE_LOCATION/public/vendor/4.0/fontawesome/fonts
SUNSTONE_PUBLIC_NEW_VENDOR_FONTAWESOME_CSS:$SUNSTONE_LOCATION/public/vendor/4.0/fontawesome/css
SUNSTONE_PUBLIC_NEW_VENDOR_NOUISLIDER:$SUNSTONE_LOCATION/public/vendor/4.0/nouislider
SUNSTONE_PUBLIC_IMAGES_FILES:$SUNSTONE_LOCATION/public/images
SUNSTONE_PUBLIC_LOGOS_FILES:$SUNSTONE_LOCATION/public/images/logos
SUNSTONE_PUBLIC_LOCALE_CA:$SUNSTONE_LOCATION/public/locale/ca
SUNSTONE_PUBLIC_LOCALE_CS_CZ:$SUNSTONE_LOCATION/public/locale/cs_CZ
SUNSTONE_PUBLIC_LOCALE_DE:$SUNSTONE_LOCATION/public/locale/de
SUNSTONE_PUBLIC_LOCALE_DA:$SUNSTONE_LOCATION/public/locale/da
SUNSTONE_PUBLIC_LOCALE_EL_GR:$SUNSTONE_LOCATION/public/locale/el_GR
SUNSTONE_PUBLIC_LOCALE_EN_US:$SUNSTONE_LOCATION/public/locale/en_US
SUNSTONE_PUBLIC_LOCALE_ES_ES:$SUNSTONE_LOCATION/public/locale/es_ES
SUNSTONE_PUBLIC_LOCALE_FA_IR:$SUNSTONE_LOCATION/public/locale/fa_IR
SUNSTONE_PUBLIC_LOCALE_FR_FR:$SUNSTONE_LOCATION/public/locale/fr_FR
SUNSTONE_PUBLIC_LOCALE_IT_IT:$SUNSTONE_LOCATION/public/locale/it_IT
SUNSTONE_PUBLIC_LOCALE_NL_NL:$SUNSTONE_LOCATION/public/locale/nl_NL
SUNSTONE_PUBLIC_LOCALE_PL:$SUNSTONE_LOCATION/public/locale/pl
SUNSTONE_PUBLIC_LOCALE_PT_PT:$SUNSTONE_LOCATION/public/locale/pt_PT
SUNSTONE_PUBLIC_LOCALE_PT_BR:$SUNSTONE_LOCATION/public/locale/pt_BR
SUNSTONE_PUBLIC_LOCALE_RU_RU:$SUNSTONE_LOCATION/public/locale/ru_RU
SUNSTONE_PUBLIC_LOCALE_SK_SK:$SUNSTONE_LOCATION/public/locale/sk_SK
SUNSTONE_PUBLIC_LOCALE_ZH_CN:$SUNSTONE_LOCATION/public/locale/zh_CN
SUNSTONE_PUBLIC_LOCALE_ZH_TW:$SUNSTONE_LOCATION/public/locale/zh_TW
)
INSTALL_SUNSTONE_ETC_FILES=(
SUNSTONE_ETC_FILES:$ETC_LOCATION
SUNSTONE_ETC_VIEW_FILES:$ETC_LOCATION/sunstone-views
)
INSTALL_ONEGATE_FILES=(
ONEGATE_FILES:$ONEGATE_LOCATION
ONEGATE_BIN_FILES:$BIN_LOCATION
)
INSTALL_ONEGATE_ETC_FILES=(
ONEGATE_ETC_FILES:$ETC_LOCATION
)
INSTALL_ONEFLOW_FILES=(
ONEFLOW_FILES:$ONEFLOW_LOCATION
ONEFLOW_BIN_FILES:$BIN_LOCATION
ONEFLOW_LIB_FILES:$ONEFLOW_LOCATION/lib
ONEFLOW_LIB_STRATEGY_FILES:$ONEFLOW_LOCATION/lib/strategy
ONEFLOW_LIB_MODELS_FILES:$ONEFLOW_LOCATION/lib/models
)
INSTALL_ONEFLOW_ETC_FILES=(
ONEFLOW_ETC_FILES:$ETC_LOCATION
)
INSTALL_ETC_FILES=(
ETC_FILES:$ETC_LOCATION
VMWARE_ETC_FILES:$ETC_LOCATION
EC2_ETC_FILES:$ETC_LOCATION
SL_ETC_FILES:$ETC_LOCATION
AZ_ETC_FILES:$ETC_LOCATION
VMM_EXEC_ETC_FILES:$ETC_LOCATION/vmm_exec
HM_ETC_FILES:$ETC_LOCATION/hm
AUTH_ETC_FILES:$ETC_LOCATION/auth
ECO_ETC_FILES:$ETC_LOCATION
ECO_ETC_TEMPLATE_FILES:$ETC_LOCATION/ec2query_templates
CLI_CONF_FILES:$ETC_LOCATION/cli
)
#-------------------------------------------------------------------------------
# Binary files, to be installed under $BIN_LOCATION
#-------------------------------------------------------------------------------
BIN_FILES="src/nebula/oned \
src/scheduler/src/sched/mm_sched \
src/cli/onevm \
src/cli/oneacct \
src/cli/onehost \
src/cli/onevnet \
src/cli/oneuser \
src/cli/oneimage \
src/cli/onegroup \
src/cli/onetemplate \
src/cli/oneacl \
src/cli/onedatastore \
src/cli/onecluster \
src/cli/onezone \
src/cli/oneflow \
src/cli/oneflow-template \
src/cli/onevcenter \
src/onedb/onedb \
src/mad/utils/tty_expect \
share/scripts/one"
#-------------------------------------------------------------------------------
# C/C++ OpenNebula API Library & Development files
# Include files, to be installed under $INCLUDE_LOCATION
# Library files, to be installed under $LIB_LOCATION
#-------------------------------------------------------------------------------
INCLUDE_FILES=""
LIB_FILES=""
#-------------------------------------------------------------------------------
# Ruby library files, to be installed under $LIB_LOCATION/ruby
#-------------------------------------------------------------------------------
RUBY_LIB_FILES="src/mad/ruby/ActionManager.rb \
src/mad/ruby/CommandManager.rb \
src/mad/ruby/OpenNebulaDriver.rb \
src/mad/ruby/VirtualMachineDriver.rb \
src/mad/ruby/DriverExecHelper.rb \
src/mad/ruby/ssh_stream.rb \
src/vnm_mad/one_vnm.rb \
src/oca/ruby/deprecated/OpenNebula.rb \
src/oca/ruby/opennebula.rb \
src/sunstone/OpenNebulaVNC.rb \
src/vmm_mad/remotes/vcenter/vcenter_driver.rb"
#-------------------------------------------------------------------------------
# Ruby auth library files, to be installed under $LIB_LOCATION/ruby/opennebula
#-------------------------------------------------------------------------------
RUBY_AUTH_LIB_FILES="src/authm_mad/remotes/ssh/ssh_auth.rb \
src/authm_mad/remotes/server_x509/server_x509_auth.rb \
src/authm_mad/remotes/server_cipher/server_cipher_auth.rb \
src/authm_mad/remotes/ldap/ldap_auth.rb \
src/authm_mad/remotes/x509/x509_auth.rb"
#-----------------------------------------------------------------------------
# MAD Script library files, to be installed under $LIB_LOCATION/<script lang>
# and remotes directory
#-----------------------------------------------------------------------------
MAD_SH_LIB_FILES="src/mad/sh/scripts_common.sh"
MAD_RUBY_LIB_FILES="src/mad/ruby/scripts_common.rb"
#-------------------------------------------------------------------------------
# Driver executable files, to be installed under $LIB_LOCATION/mads
#-------------------------------------------------------------------------------
MADS_LIB_FILES="src/mad/sh/madcommon.sh \
src/vmm_mad/exec/one_vmm_exec.rb \
src/vmm_mad/exec/one_vmm_exec \
src/vmm_mad/exec/one_vmm_sh \
src/vmm_mad/exec/one_vmm_ssh \
src/vmm_mad/dummy/one_vmm_dummy.rb \
src/vmm_mad/dummy/one_vmm_dummy \
src/im_mad/im_exec/one_im_exec.rb \
src/im_mad/im_exec/one_im_exec \
src/im_mad/im_exec/one_im_ssh \
src/im_mad/im_exec/one_im_sh \
src/im_mad/dummy/one_im_dummy.rb \
src/im_mad/dummy/one_im_dummy \
src/im_mad/collectd/collectd \
src/tm_mad/one_tm \
src/tm_mad/one_tm.rb \
src/hm_mad/one_hm.rb \
src/hm_mad/one_hm \
src/authm_mad/one_auth_mad.rb \
src/authm_mad/one_auth_mad \
src/datastore_mad/one_datastore.rb \
src/datastore_mad/one_datastore"
#-------------------------------------------------------------------------------
# VMM SH Driver KVM scripts, to be installed under $REMOTES_LOCATION/vmm/kvm
#-------------------------------------------------------------------------------
VMM_EXEC_KVM_SCRIPTS="src/vmm_mad/remotes/kvm/cancel \
src/vmm_mad/remotes/kvm/deploy \
src/vmm_mad/remotes/kvm/kvmrc \
src/vmm_mad/remotes/kvm/migrate \
src/vmm_mad/remotes/kvm/migrate_local \
src/vmm_mad/remotes/kvm/restore \
src/vmm_mad/remotes/kvm/reboot \
src/vmm_mad/remotes/kvm/reset \
src/vmm_mad/remotes/kvm/save \
src/vmm_mad/remotes/kvm/poll \
src/vmm_mad/remotes/kvm/attach_disk \
src/vmm_mad/remotes/kvm/detach_disk \
src/vmm_mad/remotes/kvm/attach_nic \
src/vmm_mad/remotes/kvm/detach_nic \
src/vmm_mad/remotes/kvm/snapshot_create \
src/vmm_mad/remotes/kvm/snapshot_revert \
src/vmm_mad/remotes/kvm/snapshot_delete \
src/vmm_mad/remotes/kvm/shutdown"
#-------------------------------------------------------------------------------
# VMM SH Driver Xen scripts, to be installed under $REMOTES_LOCATION/vmm/xen
#-------------------------------------------------------------------------------
VMM_EXEC_XEN3_SCRIPTS="src/vmm_mad/remotes/xen/cancel \
src/vmm_mad/remotes/xen/deploy \
src/vmm_mad/remotes/xen/xen3/xenrc \
src/vmm_mad/remotes/xen/xen3/migrate \
src/vmm_mad/remotes/xen/restore \
src/vmm_mad/remotes/xen/reboot \
src/vmm_mad/remotes/xen/xen3/reset \
src/vmm_mad/remotes/xen/save \
src/vmm_mad/remotes/xen/poll \
src/vmm_mad/remotes/xen/attach_disk \
src/vmm_mad/remotes/xen/detach_disk \
src/vmm_mad/remotes/xen/attach_nic \
src/vmm_mad/remotes/xen/detach_nic \
src/vmm_mad/remotes/xen/snapshot_create \
src/vmm_mad/remotes/xen/snapshot_revert \
src/vmm_mad/remotes/xen/snapshot_delete \
src/vmm_mad/remotes/xen/shutdown"
VMM_EXEC_XEN4_SCRIPTS="src/vmm_mad/remotes/xen/cancel \
src/vmm_mad/remotes/xen/deploy \
src/vmm_mad/remotes/xen/xen4/xenrc \
src/vmm_mad/remotes/xen/xen4/migrate \
src/vmm_mad/remotes/xen/restore \
src/vmm_mad/remotes/xen/reboot \
src/vmm_mad/remotes/xen/xen4/reset \
src/vmm_mad/remotes/xen/save \
src/vmm_mad/remotes/xen/poll \
src/vmm_mad/remotes/xen/attach_disk \
src/vmm_mad/remotes/xen/detach_disk \
src/vmm_mad/remotes/xen/attach_nic \
src/vmm_mad/remotes/xen/detach_nic \
src/vmm_mad/remotes/xen/snapshot_create \
src/vmm_mad/remotes/xen/snapshot_revert \
src/vmm_mad/remotes/xen/snapshot_delete \
src/vmm_mad/remotes/xen/shutdown"
#-------------------------------------------------------------------------------
# VMM Driver VMWARE scripts, to be installed under $REMOTES_LOCATION/vmm/vmware
#-------------------------------------------------------------------------------
VMM_EXEC_VMWARE_SCRIPTS="src/vmm_mad/remotes/vmware/cancel \
src/vmm_mad/remotes/vmware/attach_disk \
src/vmm_mad/remotes/vmware/detach_disk \
src/vmm_mad/remotes/vmware/attach_nic \
src/vmm_mad/remotes/vmware/detach_nic \
src/vmm_mad/remotes/vmware/snapshot_create \
src/vmm_mad/remotes/vmware/snapshot_revert \
src/vmm_mad/remotes/vmware/snapshot_delete \
src/vmm_mad/remotes/vmware/scripts_common_sh.sh \
src/vmm_mad/remotes/vmware/deploy \
src/vmm_mad/remotes/vmware/migrate \
src/vmm_mad/remotes/vmware/restore \
src/vmm_mad/remotes/vmware/reboot \
src/vmm_mad/remotes/vmware/reset \
src/vmm_mad/remotes/vmware/save \
src/vmm_mad/remotes/vmware/poll \
src/vmm_mad/remotes/vmware/checkpoint \
src/vmm_mad/remotes/vmware/shutdown \
src/vmm_mad/remotes/vmware/vmware_driver.rb \
src/vmm_mad/remotes/vmware/vi_driver.rb"
#-------------------------------------------------------------------------------
# VMM Driver vCenter scripts, installed under $REMOTES_LOCATION/vmm/vcenter
#-------------------------------------------------------------------------------
VMM_EXEC_VCENTER_SCRIPTS="src/vmm_mad/remotes/vcenter/cancel \
src/vmm_mad/remotes/vcenter/attach_disk \
src/vmm_mad/remotes/vcenter/detach_disk \
src/vmm_mad/remotes/vcenter/attach_nic \
src/vmm_mad/remotes/vcenter/detach_nic \
src/vmm_mad/remotes/vcenter/snapshot_create \
src/vmm_mad/remotes/vcenter/snapshot_revert \
src/vmm_mad/remotes/vcenter/snapshot_delete \
src/vmm_mad/remotes/vcenter/deploy \
src/vmm_mad/remotes/vcenter/migrate \
src/vmm_mad/remotes/vcenter/restore \
src/vmm_mad/remotes/vcenter/reboot \
src/vmm_mad/remotes/vcenter/reset \
src/vmm_mad/remotes/vcenter/save \
src/vmm_mad/remotes/vcenter/poll \
src/vmm_mad/remotes/vcenter/shutdown"
#------------------------------------------------------------------------------
# VMM Driver EC2 scripts, to be installed under $REMOTES_LOCATION/vmm/ec2
#------------------------------------------------------------------------------
VMM_EXEC_EC2_SCRIPTS="src/vmm_mad/remotes/ec2/cancel \
src/vmm_mad/remotes/ec2/attach_disk \
src/vmm_mad/remotes/ec2/detach_disk \
src/vmm_mad/remotes/ec2/attach_nic \
src/vmm_mad/remotes/ec2/detach_nic \
src/vmm_mad/remotes/ec2/snapshot_create \
src/vmm_mad/remotes/ec2/snapshot_revert \
src/vmm_mad/remotes/ec2/snapshot_delete \
src/vmm_mad/remotes/ec2/deploy \
src/vmm_mad/remotes/ec2/migrate \
src/vmm_mad/remotes/ec2/restore \
src/vmm_mad/remotes/ec2/reboot \
src/vmm_mad/remotes/ec2/reset \
src/vmm_mad/remotes/ec2/save \
src/vmm_mad/remotes/ec2/poll \
src/vmm_mad/remotes/ec2/shutdown \
src/vmm_mad/remotes/ec2/ec2_driver.rb"
#------------------------------------------------------------------------------
# VMM Driver SoftLayer scripts, to be installed under $REMOTES_LOCATION/vmm/sl
#------------------------------------------------------------------------------
VMM_EXEC_SL_SCRIPTS="src/vmm_mad/remotes/sl/cancel \
src/vmm_mad/remotes/sl/attach_disk \
src/vmm_mad/remotes/sl/detach_disk \
src/vmm_mad/remotes/sl/attach_nic \
src/vmm_mad/remotes/sl/detach_nic \
src/vmm_mad/remotes/sl/snapshot_create \
src/vmm_mad/remotes/sl/snapshot_revert \
src/vmm_mad/remotes/sl/snapshot_delete \
src/vmm_mad/remotes/sl/deploy \
src/vmm_mad/remotes/sl/migrate \
src/vmm_mad/remotes/sl/restore \
src/vmm_mad/remotes/sl/reboot \
src/vmm_mad/remotes/sl/reset \
src/vmm_mad/remotes/sl/save \
src/vmm_mad/remotes/sl/poll \
src/vmm_mad/remotes/sl/shutdown \
src/vmm_mad/remotes/sl/sl_driver.rb"
#------------------------------------------------------------------------------
# VMM Driver Azure scripts, to be installed under $REMOTES_LOCATION/vmm/az
#------------------------------------------------------------------------------
VMM_EXEC_AZ_SCRIPTS="src/vmm_mad/remotes/az/cancel \
src/vmm_mad/remotes/az/attach_disk \
src/vmm_mad/remotes/az/detach_disk \
src/vmm_mad/remotes/az/attach_nic \
src/vmm_mad/remotes/az/detach_nic \
src/vmm_mad/remotes/az/snapshot_create \
src/vmm_mad/remotes/az/snapshot_revert \
src/vmm_mad/remotes/az/snapshot_delete \
src/vmm_mad/remotes/az/deploy \
src/vmm_mad/remotes/az/migrate \
src/vmm_mad/remotes/az/restore \
src/vmm_mad/remotes/az/reboot \
src/vmm_mad/remotes/az/reset \
src/vmm_mad/remotes/az/save \
src/vmm_mad/remotes/az/poll \
src/vmm_mad/remotes/az/shutdown \
src/vmm_mad/remotes/az/az_driver.rb"
#-------------------------------------------------------------------------------
# Information Manager Probes, to be installed under $REMOTES_LOCATION/im
#-------------------------------------------------------------------------------
IM_PROBES_FILES="src/im_mad/remotes/run_probes \
src/im_mad/remotes/stop_probes"
IM_PROBES_KVM_FILES="src/im_mad/remotes/kvm.d/collectd-client_control.sh \
src/im_mad/remotes/kvm.d/collectd-client.rb"
IM_PROBES_KVM_PROBES_FILES="src/im_mad/remotes/kvm-probes.d/kvm.rb \
src/im_mad/remotes/kvm-probes.d/architecture.sh \
src/im_mad/remotes/kvm-probes.d/cpu.sh \
src/im_mad/remotes/kvm-probes.d/poll.sh \
src/im_mad/remotes/kvm-probes.d/name.sh \
src/im_mad/remotes/common.d/monitor_ds.sh \
src/im_mad/remotes/common.d/version.sh \
src/im_mad/remotes/common.d/collectd-client-shepherd.sh"
IM_PROBES_XEN3_FILES="src/im_mad/remotes/xen.d/collectd-client_control.sh \
src/im_mad/remotes/xen.d/collectd-client.rb"
IM_PROBES_XEN3_PROBES_FILES="src/im_mad/remotes/xen-probes.d/xen.rb \
src/im_mad/remotes/xen-probes.d/architecture.sh \
src/im_mad/remotes/xen-probes.d/cpu.sh \
src/im_mad/remotes/xen-probes.d/poll3.sh \
src/im_mad/remotes/xen-probes.d/name.sh
src/im_mad/remotes/common.d/monitor_ds.sh \
src/im_mad/remotes/common.d/version.sh \
src/im_mad/remotes/common.d/collectd-client-shepherd.sh"
IM_PROBES_XEN4_FILES="src/im_mad/remotes/xen.d/collectd-client_control.sh \
src/im_mad/remotes/xen.d/collectd-client.rb"
IM_PROBES_XEN4_PROBES_FILES="src/im_mad/remotes/xen-probes.d/xen.rb \
src/im_mad/remotes/xen-probes.d/architecture.sh \
src/im_mad/remotes/xen-probes.d/cpu.sh \
src/im_mad/remotes/xen-probes.d/poll4.sh \
src/im_mad/remotes/xen-probes.d/name.sh \
src/im_mad/remotes/common.d/monitor_ds.sh \
src/im_mad/remotes/common.d/version.sh \
src/im_mad/remotes/common.d/collectd-client-shepherd.sh"
IM_PROBES_VMWARE_FILES="src/im_mad/remotes/vmware.d/vmware.rb"
IM_PROBES_VCENTER_FILES="src/im_mad/remotes/vcenter.d/vcenter.rb"
IM_PROBES_EC2_FILES="src/im_mad/remotes/ec2.d/poll"
IM_PROBES_SL_FILES="src/im_mad/remotes/sl.d/poll"
IM_PROBES_AZ_FILES="src/im_mad/remotes/az.d/poll"
IM_PROBES_VERSION="src/im_mad/remotes/VERSION"
#-------------------------------------------------------------------------------
# Auth Manager drivers to be installed under $REMOTES_LOCATION/auth
#-------------------------------------------------------------------------------
AUTH_SERVER_CIPHER_FILES="src/authm_mad/remotes/server_cipher/authenticate"
AUTH_SERVER_X509_FILES="src/authm_mad/remotes/server_x509/authenticate"
AUTH_X509_FILES="src/authm_mad/remotes/x509/authenticate"
AUTH_LDAP_FILES="src/authm_mad/remotes/ldap/authenticate"
AUTH_SSH_FILES="src/authm_mad/remotes/ssh/authenticate"
AUTH_DUMMY_FILES="src/authm_mad/remotes/dummy/authenticate"
AUTH_PLAIN_FILES="src/authm_mad/remotes/plain/authenticate"
#-------------------------------------------------------------------------------
# Virtual Network Manager drivers to be installed under $REMOTES_LOCATION/vnm
#-------------------------------------------------------------------------------
NETWORK_FILES="src/vnm_mad/remotes/OpenNebulaNetwork.rb \
src/vnm_mad/remotes/OpenNebulaNetwork.conf \
src/vnm_mad/remotes/Firewall.rb \
src/vnm_mad/remotes/OpenNebulaNic.rb"
NETWORK_8021Q_FILES="src/vnm_mad/remotes/802.1Q/clean \
src/vnm_mad/remotes/802.1Q/post \
src/vnm_mad/remotes/802.1Q/pre \
src/vnm_mad/remotes/802.1Q/HostManaged.rb"
NETWORK_DUMMY_FILES="src/vnm_mad/remotes/dummy/clean \
src/vnm_mad/remotes/dummy/post \
src/vnm_mad/remotes/dummy/pre"
NETWORK_EBTABLES_FILES="src/vnm_mad/remotes/ebtables/clean \
src/vnm_mad/remotes/ebtables/post \
src/vnm_mad/remotes/ebtables/pre \
src/vnm_mad/remotes/ebtables/Ebtables.rb"
NETWORK_FW_FILES="src/vnm_mad/remotes/fw/post \
src/vnm_mad/remotes/fw/pre \
src/vnm_mad/remotes/fw/clean"
NETWORK_OVSWITCH_FILES="src/vnm_mad/remotes/ovswitch/clean \
src/vnm_mad/remotes/ovswitch/post \
src/vnm_mad/remotes/ovswitch/pre \
src/vnm_mad/remotes/ovswitch/OpenvSwitch.rb"
NETWORK_OVSWITCH_BRCOMPAT_FILES="src/vnm_mad/remotes/ovswitch_brcompat/clean \
src/vnm_mad/remotes/ovswitch_brcompat/post \
src/vnm_mad/remotes/ovswitch_brcompat/pre \
src/vnm_mad/remotes/ovswitch_brcompat/OpenvSwitch.rb"
NETWORK_VMWARE_FILES="src/vnm_mad/remotes/vmware/clean \
src/vnm_mad/remotes/vmware/post \
src/vnm_mad/remotes/vmware/pre \
src/vnm_mad/remotes/vmware/VMware.rb"
#-------------------------------------------------------------------------------
# Transfer Manager commands, to be installed under $LIB_LOCATION/tm_commands
# - SHARED TM, $VAR_LOCATION/tm/shared
# - FS_LVM TM, $VAR_LOCATION/tm/fs_lvm
# - QCOW2 TM, $VAR_LOCATION/tm/qcow2
# - SSH TM, $VAR_LOCATION/tm/ssh
# - DUMMY TM, $VAR_LOCATION/tm/dummy
# - VMWARE TM, $VAR_LOCATION/tm/vmware
# - LVM TM, $VAR_LOCATION/tm/lvm
# - CEPH TM, $VAR_LOCATION/tm/ceph
# - DEV TM, $VAR_LOCATION/tm/dev
#-------------------------------------------------------------------------------
TM_FILES="src/tm_mad/tm_common.sh"
TM_SHARED_FILES="src/tm_mad/shared/clone \
src/tm_mad/shared/delete \
src/tm_mad/shared/ln \
src/tm_mad/shared/mkswap \
src/tm_mad/shared/mkimage \
src/tm_mad/shared/mv \
src/tm_mad/shared/context \
src/tm_mad/shared/premigrate \
src/tm_mad/shared/postmigrate \
src/tm_mad/shared/mvds \
src/tm_mad/shared/cpds"
TM_FS_LVM_FILES="src/tm_mad/fs_lvm/clone \
src/tm_mad/fs_lvm/ln \
src/tm_mad/fs_lvm/mv \
src/tm_mad/fs_lvm/mvds \
src/tm_mad/fs_lvm/cpds \
src/tm_mad/fs_lvm/premigrate \
src/tm_mad/fs_lvm/postmigrate \
src/tm_mad/fs_lvm/delete"
TM_QCOW2_FILES="src/tm_mad/qcow2/clone \
src/tm_mad/qcow2/delete \
src/tm_mad/qcow2/ln \
src/tm_mad/qcow2/mkswap \
src/tm_mad/qcow2/mkimage \
src/tm_mad/qcow2/mv \
src/tm_mad/qcow2/context \
src/tm_mad/qcow2/premigrate \
src/tm_mad/qcow2/postmigrate \
src/tm_mad/qcow2/mvds \
src/tm_mad/qcow2/cpds"
TM_SSH_FILES="src/tm_mad/ssh/clone \
src/tm_mad/ssh/delete \
src/tm_mad/ssh/ln \
src/tm_mad/ssh/mkswap \
src/tm_mad/ssh/mkimage \
src/tm_mad/ssh/mv \
src/tm_mad/ssh/context \
src/tm_mad/ssh/premigrate \
src/tm_mad/ssh/postmigrate \
src/tm_mad/ssh/mvds \
src/tm_mad/ssh/cpds"
TM_DUMMY_FILES="src/tm_mad/dummy/clone \
src/tm_mad/dummy/delete \
src/tm_mad/dummy/ln \
src/tm_mad/dummy/mkswap \
src/tm_mad/dummy/mkimage \
src/tm_mad/dummy/mv \
src/tm_mad/dummy/context \
src/tm_mad/dummy/premigrate \
src/tm_mad/dummy/postmigrate \
src/tm_mad/dummy/mvds \
src/tm_mad/dummy/cpds"
TM_VMFS_FILES="src/tm_mad/vmfs/clone \
src/tm_mad/vmfs/delete
src/tm_mad/vmfs/ln \
src/tm_mad/vmfs/mkswap \
src/tm_mad/vmfs/mkimage \
src/tm_mad/vmfs/mv \
src/tm_mad/vmfs/context \
src/tm_mad/vmfs/mvds \
src/tm_mad/vmfs/cpds \
src/tm_mad/vmfs/postmigrate \
src/tm_mad/vmfs/premigrate"
TM_LVM_FILES="src/tm_mad/lvm/clone \
src/tm_mad/lvm/ln \
src/tm_mad/lvm/mv \
src/tm_mad/lvm/mvds \
src/tm_mad/lvm/cpds \
src/tm_mad/lvm/premigrate \
src/tm_mad/lvm/postmigrate \
src/tm_mad/lvm/delete"
TM_CEPH_FILES="src/tm_mad/ceph/clone \
src/tm_mad/ceph/ln \
src/tm_mad/ceph/mv \
src/tm_mad/ceph/mvds \
src/tm_mad/ceph/cpds \
src/tm_mad/ceph/premigrate \
src/tm_mad/ceph/postmigrate \
src/tm_mad/ceph/delete"
TM_DEV_FILES="src/tm_mad/dev/clone \
src/tm_mad/dev/ln \
src/tm_mad/dev/mv \
src/tm_mad/dev/mvds \
src/tm_mad/dev/cpds \
src/tm_mad/dev/premigrate \
src/tm_mad/dev/postmigrate \
src/tm_mad/dev/delete"
#-------------------------------------------------------------------------------
# Datastore drivers, to be installed under $REMOTES_LOCATION/datastore
# - Dummy Image Repository, $REMOTES_LOCATION/datastore/dummy
# - FS based Image Repository, $REMOTES_LOCATION/datastore/fs
# - VMFS based Image Repository, $REMOTES_LOCATION/datastore/vmfs
# - LVM based Image Repository, $REMOTES_LOCATION/datastore/lvm
#-------------------------------------------------------------------------------
DATASTORE_DRIVER_COMMON_SCRIPTS="src/datastore_mad/remotes/xpath.rb \
src/datastore_mad/remotes/downloader.sh \
src/datastore_mad/remotes/libfs.sh"
DATASTORE_DRIVER_DUMMY_SCRIPTS="src/datastore_mad/remotes/dummy/cp \
src/datastore_mad/remotes/dummy/mkfs \
src/datastore_mad/remotes/dummy/stat \
src/datastore_mad/remotes/dummy/clone \
src/datastore_mad/remotes/dummy/monitor \
src/datastore_mad/remotes/dummy/rm"
DATASTORE_DRIVER_FS_SCRIPTS="src/datastore_mad/remotes/fs/cp \
src/datastore_mad/remotes/fs/mkfs \
src/datastore_mad/remotes/fs/stat \
src/datastore_mad/remotes/fs/clone \
src/datastore_mad/remotes/fs/monitor \
src/datastore_mad/remotes/fs/rm"
DATASTORE_DRIVER_VMFS_SCRIPTS="src/datastore_mad/remotes/vmfs/cp \
src/datastore_mad/remotes/vmfs/mkfs \
src/datastore_mad/remotes/vmfs/stat \
src/datastore_mad/remotes/vmfs/clone \
src/datastore_mad/remotes/vmfs/monitor \
src/datastore_mad/remotes/vmfs/rm \
src/datastore_mad/remotes/vmfs/vmfs.conf"
DATASTORE_DRIVER_LVM_SCRIPTS="src/datastore_mad/remotes/lvm/cp \
src/datastore_mad/remotes/lvm/mkfs \
src/datastore_mad/remotes/lvm/stat \
src/datastore_mad/remotes/lvm/rm \
src/datastore_mad/remotes/lvm/monitor \
src/datastore_mad/remotes/lvm/clone \
src/datastore_mad/remotes/lvm/lvm.conf"
DATASTORE_DRIVER_CEPH_SCRIPTS="src/datastore_mad/remotes/ceph/cp \
src/datastore_mad/remotes/ceph/mkfs \
src/datastore_mad/remotes/ceph/stat \
src/datastore_mad/remotes/ceph/rm \
src/datastore_mad/remotes/ceph/monitor \
src/datastore_mad/remotes/ceph/clone \
src/datastore_mad/remotes/ceph/ceph.conf"
DATASTORE_DRIVER_DEV_SCRIPTS="src/datastore_mad/remotes/dev/cp \
src/datastore_mad/remotes/dev/mkfs \
src/datastore_mad/remotes/dev/stat \
src/datastore_mad/remotes/dev/rm \
src/datastore_mad/remotes/dev/monitor \
src/datastore_mad/remotes/dev/clone"
#-------------------------------------------------------------------------------
# Migration scripts for onedb command, to be installed under $LIB_LOCATION
#-------------------------------------------------------------------------------
ONEDB_FILES="src/onedb/fsck.rb \
src/onedb/import_slave.rb \
src/onedb/onedb.rb \
src/onedb/onedb_backend.rb"
ONEDB_SHARED_MIGRATOR_FILES="src/onedb/shared/2.0_to_2.9.80.rb \
src/onedb/shared/2.9.80_to_2.9.85.rb \
src/onedb/shared/2.9.85_to_2.9.90.rb \
src/onedb/shared/2.9.90_to_3.0.0.rb \
src/onedb/shared/3.0.0_to_3.1.0.rb \
src/onedb/shared/3.1.0_to_3.1.80.rb \
src/onedb/shared/3.1.80_to_3.2.0.rb \
src/onedb/shared/3.2.0_to_3.2.1.rb \
src/onedb/shared/3.2.1_to_3.3.0.rb \
src/onedb/shared/3.3.0_to_3.3.80.rb \
src/onedb/shared/3.3.80_to_3.4.0.rb \
src/onedb/shared/3.4.0_to_3.4.1.rb \
src/onedb/shared/3.4.1_to_3.5.80.rb \
src/onedb/shared/3.5.80_to_3.6.0.rb \
src/onedb/shared/3.6.0_to_3.7.80.rb \
src/onedb/shared/3.7.80_to_3.8.0.rb \
src/onedb/shared/3.8.0_to_3.8.1.rb \
src/onedb/shared/3.8.1_to_3.8.2.rb \
src/onedb/shared/3.8.2_to_3.8.3.rb \
src/onedb/shared/3.8.3_to_3.8.4.rb \
src/onedb/shared/3.8.4_to_3.8.5.rb \
src/onedb/shared/3.8.5_to_3.9.80.rb \
src/onedb/shared/3.9.80_to_3.9.90.rb \
src/onedb/shared/3.9.90_to_4.0.0.rb \
src/onedb/shared/4.0.0_to_4.0.1.rb \
src/onedb/shared/4.0.1_to_4.1.80.rb \
src/onedb/shared/4.1.80_to_4.2.0.rb \
src/onedb/shared/4.2.0_to_4.3.80.rb \
src/onedb/shared/4.3.80_to_4.3.85.rb \
src/onedb/shared/4.3.85_to_4.3.90.rb \
src/onedb/shared/4.3.90_to_4.4.0.rb \
src/onedb/shared/4.4.0_to_4.4.1.rb \
src/onedb/shared/4.4.1_to_4.5.80.rb\
src/onedb/shared/4.5.80_to_4.6.0.rb"
ONEDB_LOCAL_MIGRATOR_FILES="src/onedb/local/4.5.80_to_4.7.80.rb \
src/onedb/local/4.7.80_to_4.9.80.rb \
src/onedb/local/4.9.80_to_4.10.3.rb"
#-------------------------------------------------------------------------------
# Configuration files for OpenNebula, to be installed under $ETC_LOCATION
#-------------------------------------------------------------------------------
ETC_FILES="share/etc/oned.conf \
share/etc/defaultrc \
src/scheduler/etc/sched.conf"
VMWARE_ETC_FILES="src/vmm_mad/remotes/vmware/vmwarerc"
EC2_ETC_FILES="src/vmm_mad/remotes/ec2/ec2_driver.conf \
src/vmm_mad/remotes/ec2/ec2_driver.default"
SL_ETC_FILES="src/vmm_mad/remotes/sl/sl_driver.conf \
src/vmm_mad/remotes/sl/sl_driver.default"
AZ_ETC_FILES="src/vmm_mad/remotes/az/az_driver.conf \
src/vmm_mad/remotes/az/az_driver.default"
#-------------------------------------------------------------------------------
# Virtualization drivers config. files, to be installed under $ETC_LOCATION
# - ssh, $ETC_LOCATION/vmm_exec
#-------------------------------------------------------------------------------
VMM_EXEC_ETC_FILES="src/vmm_mad/exec/vmm_execrc \
src/vmm_mad/exec/vmm_exec_kvm.conf \
src/vmm_mad/exec/vmm_exec_xen3.conf \
src/vmm_mad/exec/vmm_exec_xen4.conf \
src/vmm_mad/exec/vmm_exec_vmware.conf \
src/vmm_mad/exec/vmm_exec_vcenter.conf"
#-------------------------------------------------------------------------------
# Hook Manager driver config. files, to be installed under $ETC_LOCATION/hm
#-------------------------------------------------------------------------------
HM_ETC_FILES="src/hm_mad/hmrc"
#-------------------------------------------------------------------------------
# Auth Manager drivers config. files, to be installed under $ETC_LOCATION/auth
#-------------------------------------------------------------------------------
AUTH_ETC_FILES="src/authm_mad/remotes/server_x509/server_x509_auth.conf \
src/authm_mad/remotes/ldap/ldap_auth.conf \
src/authm_mad/remotes/x509/x509_auth.conf"
#-------------------------------------------------------------------------------
# Sample files, to be installed under $SHARE_LOCATION/examples
#-------------------------------------------------------------------------------
EXAMPLE_SHARE_FILES="share/examples/vm.template \
share/examples/private.net \
share/examples/public.net"
#-------------------------------------------------------------------------------
# Files required to interact with the websockify server
#-------------------------------------------------------------------------------
WEBSOCKIFY_SHARE_FILES="share/websockify/websocketproxy.py \
share/websockify/websocket.py \
share/websockify/websockify"
#-------------------------------------------------------------------------------
# HOOK scripts, to be installed under $VAR_LOCATION/remotes/hooks
#-------------------------------------------------------------------------------
HOOK_FT_FILES="share/hooks/host_error.rb"
#-------------------------------------------------------------------------------
# Installation scripts, to be installed under $SHARE_LOCATION
#-------------------------------------------------------------------------------
INSTALL_GEMS_SHARE_FILE="share/install_gems/install_gems"
#-------------------------------------------------------------------------------
# OCA Files
#-------------------------------------------------------------------------------
OCA_LIB_FILES="src/oca/ruby/opennebula.rb"
RUBY_OPENNEBULA_LIB_FILES="src/oca/ruby/opennebula/acl_pool.rb \
src/oca/ruby/opennebula/acl.rb \
src/oca/ruby/opennebula/client.rb \
src/oca/ruby/opennebula/cluster_pool.rb \
src/oca/ruby/opennebula/cluster.rb \
src/oca/ruby/opennebula/datastore_pool.rb \
src/oca/ruby/opennebula/datastore.rb \
src/oca/ruby/opennebula/document_json.rb \
src/oca/ruby/opennebula/document_pool_json.rb \
src/oca/ruby/opennebula/document_pool.rb \
src/oca/ruby/opennebula/document.rb \
src/oca/ruby/opennebula/error.rb \
src/oca/ruby/opennebula/group_pool.rb \
src/oca/ruby/opennebula/group.rb \
src/oca/ruby/opennebula/host_pool.rb \
src/oca/ruby/opennebula/host.rb \
src/oca/ruby/opennebula/image_pool.rb \
src/oca/ruby/opennebula/image.rb \
src/oca/ruby/opennebula/pool_element.rb \
src/oca/ruby/opennebula/pool.rb \
src/oca/ruby/opennebula/system.rb \
src/oca/ruby/opennebula/template_pool.rb \
src/oca/ruby/opennebula/template.rb \
src/oca/ruby/opennebula/user_pool.rb \
src/oca/ruby/opennebula/user.rb \
src/oca/ruby/opennebula/zone_pool.rb \
src/oca/ruby/opennebula/zone.rb \
src/oca/ruby/opennebula/virtual_machine_pool.rb \
src/oca/ruby/opennebula/virtual_machine.rb \
src/oca/ruby/opennebula/virtual_network_pool.rb \
src/oca/ruby/opennebula/virtual_network.rb \
src/oca/ruby/opennebula/xml_element.rb \
src/oca/ruby/opennebula/xml_pool.rb \
src/oca/ruby/opennebula/xml_utils.rb \
src/oca/ruby/opennebula/oneflow_client.rb"
#-------------------------------------------------------------------------------
# Common Cloud Files
#-------------------------------------------------------------------------------
COMMON_CLOUD_LIB_FILES="src/cloud/common/CloudServer.rb \
src/cloud/common/CloudClient.rb \
src/cloud/common/CloudAuth.rb"
COMMON_CLOUD_CLIENT_LIB_FILES="src/cloud/common/CloudClient.rb"
CLOUD_AUTH_LIB_FILES="src/cloud/common/CloudAuth/SunstoneCloudAuth.rb \
src/cloud/common/CloudAuth/EC2CloudAuth.rb \
src/cloud/common/CloudAuth/X509CloudAuth.rb \
src/cloud/common/CloudAuth/OneGateCloudAuth.rb \
src/cloud/common/CloudAuth/OpenNebulaCloudAuth.rb"
#-------------------------------------------------------------------------------
# EC2 Query for OpenNebula
#-------------------------------------------------------------------------------
ECO_LIB_FILES="src/cloud/ec2/lib/EC2QueryClient.rb \
src/cloud/ec2/lib/EC2QueryServer.rb \
src/cloud/ec2/lib/ImageEC2.rb \
src/cloud/ec2/lib/elastic_ip.rb \
src/cloud/ec2/lib/ebs.rb \
src/cloud/ec2/lib/tags.rb \
src/cloud/ec2/lib/instance.rb \
src/cloud/ec2/lib/keypair.rb \
src/cloud/ec2/lib/net_ssh_replacement.rb \
src/cloud/ec2/lib/econe_application.rb \
src/cloud/ec2/lib/econe-server.rb"
ECO_LIB_CLIENT_FILES="src/cloud/ec2/lib/EC2QueryClient.rb"
ECO_LIB_VIEW_FILES="src/cloud/ec2/lib/views/describe_images.erb \
src/cloud/ec2/lib/views/describe_instances.erb \
src/cloud/ec2/lib/views/describe_regions.erb \
src/cloud/ec2/lib/views/describe_availability_zones.erb \
src/cloud/ec2/lib/views/create_tags.erb \
src/cloud/ec2/lib/views/delete_tags.erb \
src/cloud/ec2/lib/views/describe_tags.erb \
src/cloud/ec2/lib/views/create_volume.erb \
src/cloud/ec2/lib/views/create_snapshot.erb \
src/cloud/ec2/lib/views/delete_snapshot.erb \
src/cloud/ec2/lib/views/describe_snapshots.erb \
src/cloud/ec2/lib/views/create_image.erb \
src/cloud/ec2/lib/views/describe_volumes.erb \
src/cloud/ec2/lib/views/attach_volume.erb \
src/cloud/ec2/lib/views/detach_volume.erb \
src/cloud/ec2/lib/views/delete_volume.erb \
src/cloud/ec2/lib/views/register_image.erb \
src/cloud/ec2/lib/views/run_instances.erb \
src/cloud/ec2/lib/views/allocate_address.erb \
src/cloud/ec2/lib/views/associate_address.erb \
src/cloud/ec2/lib/views/disassociate_address.erb \
src/cloud/ec2/lib/views/describe_addresses.erb \
src/cloud/ec2/lib/views/release_address.erb \
src/cloud/ec2/lib/views/create_keypair.erb \
src/cloud/ec2/lib/views/delete_keypair.erb \
src/cloud/ec2/lib/views/describe_keypairs.erb \
src/cloud/ec2/lib/views/terminate_instances.erb \
src/cloud/ec2/lib/views/stop_instances.erb \
src/cloud/ec2/lib/views/reboot_instances.erb \
src/cloud/ec2/lib/views/start_instances.erb"
ECO_BIN_FILES="src/cloud/ec2/bin/econe-server \
src/cloud/ec2/bin/econe-describe-images \
src/cloud/ec2/bin/econe-describe-volumes \
src/cloud/ec2/bin/econe-describe-instances \
src/cloud/ec2/bin/econe-describe-keypairs \
src/cloud/ec2/bin/econe-register \
src/cloud/ec2/bin/econe-attach-volume \
src/cloud/ec2/bin/econe-detach-volume \
src/cloud/ec2/bin/econe-delete-volume \
src/cloud/ec2/bin/econe-delete-keypair \
src/cloud/ec2/bin/econe-create-volume \
src/cloud/ec2/bin/econe-create-keypair \
src/cloud/ec2/bin/econe-run-instances \
src/cloud/ec2/bin/econe-terminate-instances \
src/cloud/ec2/bin/econe-start-instances \
src/cloud/ec2/bin/econe-stop-instances \
src/cloud/ec2/bin/econe-reboot-instances \
src/cloud/ec2/bin/econe-describe-addresses \
src/cloud/ec2/bin/econe-allocate-address \
src/cloud/ec2/bin/econe-release-address \
src/cloud/ec2/bin/econe-associate-address \
src/cloud/ec2/bin/econe-disassociate-address \
src/cloud/ec2/bin/econe-upload"
ECO_BIN_CLIENT_FILES="src/cloud/ec2/bin/econe-describe-images \
src/cloud/ec2/bin/econe-describe-instances \
src/cloud/ec2/bin/econe-describe-volumes \
src/cloud/ec2/bin/econe-register \
src/cloud/ec2/bin/econe-attach-volume \
src/cloud/ec2/bin/econe-detach-volume \
src/cloud/ec2/bin/econe-delete-volume \
src/cloud/ec2/bin/econe-create-volume \
src/cloud/ec2/bin/econe-run-instances \
src/cloud/ec2/bin/econe-terminate-instances \
src/cloud/ec2/bin/econe-start-instances \
src/cloud/ec2/bin/econe-stop-instances \
src/cloud/ec2/bin/econe-reboot-instances \
src/cloud/ec2/bin/econe-describe-addresses \
src/cloud/ec2/bin/econe-allocate-address \
src/cloud/ec2/bin/econe-release-address \
src/cloud/ec2/bin/econe-associate-address \
src/cloud/ec2/bin/econe-disassociate-address \
src/cloud/ec2/bin/econe-upload"
ECO_ETC_FILES="src/cloud/ec2/etc/econe.conf"
ECO_ETC_TEMPLATE_FILES="src/cloud/ec2/etc/templates/m1.small.erb"
#-------------------------------------------------------------------------------
# Marketplace Client
#-------------------------------------------------------------------------------
MARKET_LIB_FILES="src/cloud/marketplace/lib/marketplace_client.rb"
MARKET_LIB_CLIENT_FILES="src/cloud/marketplace/lib/marketplace_client.rb"
MARKET_BIN_FILES="src/cloud/marketplace/bin/onemarket"
MARKET_BIN_CLIENT_FILES="src/cloud/marketplace/bin/onemarket"
#-----------------------------------------------------------------------------
# CLI files
#-----------------------------------------------------------------------------
CLI_LIB_FILES="src/cli/cli_helper.rb \
src/cli/command_parser.rb \
src/cli/one_helper.rb"
ONE_CLI_LIB_FILES="src/cli/one_helper/onegroup_helper.rb \
src/cli/one_helper/onehost_helper.rb \
src/cli/one_helper/oneimage_helper.rb \
src/cli/one_helper/onetemplate_helper.rb \
src/cli/one_helper/onequota_helper.rb \
src/cli/one_helper/oneuser_helper.rb \
src/cli/one_helper/onevm_helper.rb \
src/cli/one_helper/onevnet_helper.rb \
src/cli/one_helper/oneacl_helper.rb \
src/cli/one_helper/onedatastore_helper.rb \
src/cli/one_helper/onecluster_helper.rb \
src/cli/one_helper/onezone_helper.rb \
src/cli/one_helper/oneacct_helper.rb"
CLI_BIN_FILES="src/cli/onevm \
src/cli/onehost \
src/cli/onevnet \
src/cli/oneuser \
src/cli/oneimage \
src/cli/onetemplate \
src/cli/onegroup \
src/cli/oneacl \
src/cli/onedatastore \
src/cli/onecluster \
src/cli/onezone \
src/cli/oneflow \
src/cli/oneflow-template \
src/cli/oneacct"
CLI_CONF_FILES="src/cli/etc/onegroup.yaml \
src/cli/etc/onehost.yaml \
src/cli/etc/oneimage.yaml \
src/cli/etc/onetemplate.yaml \
src/cli/etc/oneuser.yaml \
src/cli/etc/onevm.yaml \
src/cli/etc/onevnet.yaml \
src/cli/etc/oneacl.yaml \
src/cli/etc/onedatastore.yaml \
src/cli/etc/onecluster.yaml \
src/cli/etc/onezone.yaml \
src/cli/etc/oneacct.yaml"
#-----------------------------------------------------------------------------
# Sunstone files
#-----------------------------------------------------------------------------
SUNSTONE_FILES="src/sunstone/sunstone-server.rb \
src/sunstone/config.ru"
SUNSTONE_BIN_FILES="src/sunstone/bin/sunstone-server \
src/sunstone/bin/novnc-server"
SUNSTONE_ETC_FILES="src/sunstone/etc/sunstone-server.conf \
src/sunstone/etc/sunstone-views.yaml"
SUNSTONE_ETC_VIEW_FILES="src/sunstone/etc/sunstone-views/admin.yaml \
src/sunstone/etc/sunstone-views/user.yaml \
src/sunstone/etc/sunstone-views/cloud.yaml \
src/sunstone/etc/sunstone-views/cloud_vcenter.yaml \
src/sunstone/etc/sunstone-views/vdcadmin.yaml \
src/sunstone/etc/sunstone-views/vdcadmin_vcenter.yaml \
src/sunstone/etc/sunstone-views/vcenter.yaml"
SUNSTONE_MODELS_FILES="src/sunstone/models/OpenNebulaJSON.rb \
src/sunstone/models/SunstoneServer.rb \
src/sunstone/models/SunstoneMarketplace.rb \
src/sunstone/models/SunstoneViews.rb"
SUNSTONE_MODELS_JSON_FILES="src/sunstone/models/OpenNebulaJSON/HostJSON.rb \
src/sunstone/models/OpenNebulaJSON/ImageJSON.rb \
src/sunstone/models/OpenNebulaJSON/GroupJSON.rb \
src/sunstone/models/OpenNebulaJSON/JSONUtils.rb \
src/sunstone/models/OpenNebulaJSON/PoolJSON.rb \
src/sunstone/models/OpenNebulaJSON/UserJSON.rb \
src/sunstone/models/OpenNebulaJSON/VirtualMachineJSON.rb \
src/sunstone/models/OpenNebulaJSON/TemplateJSON.rb \
src/sunstone/models/OpenNebulaJSON/AclJSON.rb \
src/sunstone/models/OpenNebulaJSON/ClusterJSON.rb \
src/sunstone/models/OpenNebulaJSON/DatastoreJSON.rb \
src/sunstone/models/OpenNebulaJSON/VirtualNetworkJSON.rb \
src/sunstone/models/OpenNebulaJSON/ZoneJSON.rb"
SUNSTONE_VIEWS_FILES="src/sunstone/views/index.erb \
src/sunstone/views/login.erb \
src/sunstone/views/vnc.erb \
src/sunstone/views/_login_standard.erb \
src/sunstone/views/_login_x509.erb"
SUNSTONE_PUBLIC_JS_FILES="src/sunstone/public/js/login.js \
src/sunstone/public/js/sunstone.js \
src/sunstone/public/js/opennebula.js \
src/sunstone/public/js/locale.js"
SUNSTONE_PUBLIC_JS_PLUGINS_FILES="\
src/sunstone/public/js/plugins/dashboard-tab.js \
src/sunstone/public/js/plugins/hosts-tab.js \
src/sunstone/public/js/plugins/clusters-tab.js \
src/sunstone/public/js/plugins/datastores-tab.js \
src/sunstone/public/js/plugins/system-tab.js \
src/sunstone/public/js/plugins/vresources-tab.js \
src/sunstone/public/js/plugins/infra-tab.js \
src/sunstone/public/js/plugins/groups-tab.js \
src/sunstone/public/js/plugins/images-tab.js \
src/sunstone/public/js/plugins/files-tab.js \
src/sunstone/public/js/plugins/templates-tab.js \
src/sunstone/public/js/plugins/users-tab.js \
src/sunstone/public/js/plugins/vms-tab.js \
src/sunstone/public/js/plugins/acls-tab.js \
src/sunstone/public/js/plugins/vnets-tab.js \
src/sunstone/public/js/plugins/marketplace-tab.js \
src/sunstone/public/js/plugins/provision-tab.js \
src/sunstone/public/js/plugins/config-tab.js \
src/sunstone/public/js/plugins/oneflow-dashboard.js \
src/sunstone/public/js/plugins/oneflow-services.js \
src/sunstone/public/js/plugins/oneflow-templates.js \
src/sunstone/public/js/plugins/support-tab.js \
src/sunstone/public/js/plugins/zones-tab.js"
SUNSTONE_ROUTES_FILES="src/sunstone/routes/oneflow.rb \
src/sunstone/routes/vcenter.rb \
src/sunstone/routes/support.rb"
# begin bower
SUNSTONE_PUBLIC_NEW_VENDOR_JQUERY="\
src/sunstone/public/bower_components/jquery/dist/jquery.min.js \
src/sunstone/public/bower_components/jquery-migrate/jquery-migrate.min.js"
SUNSTONE_PUBLIC_NEW_VENDOR_DATATABLES="\
src/sunstone/public/bower_components/datatables/media/js/jquery.dataTables.min.js"
SUNSTONE_PUBLIC_NEW_VENDOR_MODERNIZR="\
src/sunstone/public/bower_components/modernizr/modernizr.js"
SUNSTONE_PUBLIC_NEW_VENDOR_FOUNDATION="\
src/sunstone/public/bower_components/foundation/js/foundation.min.js"
SUNSTONE_PUBLIC_NEW_VENDOR_JGROWL="\
src/sunstone/public/bower_components/jgrowl/jquery.jgrowl.min.js \
src/sunstone/public/bower_components/jgrowl/jquery.jgrowl.min.css"
SUNSTONE_PUBLIC_NEW_VENDOR_RESUMABLEJS="\
src/sunstone/public/bower_components/resumablejs/resumable.js"
SUNSTONE_PUBLIC_NEW_VENDOR_FOUNDATION_DATATABLES="\
src/sunstone/public/bower_components/foundation-datatables/integration/foundation/dataTables.foundation.js"
SUNSTONE_PUBLIC_NEW_VENDOR_FLOT="\
src/sunstone/public/bower_components/flot/jquery.flot.js \
src/sunstone/public/bower_components/flot/excanvas.min.js \
src/sunstone/public/bower_components/flot/jquery.flot.time.js \
src/sunstone/public/bower_components/flot/jquery.flot.resize.js \
src/sunstone/public/bower_components/flot.tooltip/js/jquery.flot.tooltip.min.js \
src/sunstone/public/bower_components/flot/jquery.flot.stack.js"
SUNSTONE_PUBLIC_NEW_VENDOR_FONTAWESOME_CSS="\
src/sunstone/public/bower_components/fontawesome/css/font-awesome.min.css"
SUNSTONE_PUBLIC_NEW_VENDOR_FONTAWESOME_FONT="\
src/sunstone/public/bower_components/fontawesome/fonts/fontawesome-webfont.eot \
src/sunstone/public/bower_components/fontawesome/fonts/fontawesome-webfont.woff \
src/sunstone/public/bower_components/fontawesome/fonts/fontawesome-webfont.ttf \
src/sunstone/public/bower_components/fontawesome/fonts/fontawesome-webfont.svg \
src/sunstone/public/bower_components/fontawesome/fonts/FontAwesome.otf"
SUNSTONE_PUBLIC_VENDOR_NOVNC="\
src/sunstone/public/bower_components/no-vnc/include/base.css \
src/sunstone/public/bower_components/no-vnc/include/base64.js \
src/sunstone/public/bower_components/no-vnc/include/black.css \
src/sunstone/public/bower_components/no-vnc/include/blue.css \
src/sunstone/public/bower_components/no-vnc/include/des.js \
src/sunstone/public/bower_components/no-vnc/include/display.js \
src/sunstone/public/bower_components/no-vnc/include/input.js \
src/sunstone/public/bower_components/no-vnc/include/jsunzip.js \
src/sunstone/public/bower_components/no-vnc/include/keyboard.js \
src/sunstone/public/bower_components/no-vnc/include/keysymdef.js \
src/sunstone/public/bower_components/no-vnc/include/logo.js \
src/sunstone/public/bower_components/no-vnc/include/Orbitron700.ttf \
src/sunstone/public/bower_components/no-vnc/include/Orbitron700.woff \
src/sunstone/public/bower_components/no-vnc/include/playback.js \
src/sunstone/public/bower_components/no-vnc/include/rfb.js \
src/sunstone/public/bower_components/no-vnc/include/ui.js \
src/sunstone/public/bower_components/no-vnc/include/util.js \
src/sunstone/public/bower_components/no-vnc/include/websock.js \
src/sunstone/public/bower_components/no-vnc/include/webutil.js"
SUNSTONE_PUBLIC_VENDOR_NOVNC_WEBSOCKET="\
src/sunstone/public/bower_components/no-vnc/include/web-socket-js/web_socket.js \
src/sunstone/public/bower_components/no-vnc/include/web-socket-js/swfobject.js \
src/sunstone/public/bower_components/no-vnc/include/web-socket-js/WebSocketMain.swf"
# end bower
SUNSTONE_PUBLIC_CSS_FILES="src/sunstone/public/css/app.css \
src/sunstone/public/css/opensans.woff \
src/sunstone/public/css/login.css"
SUNSTONE_PUBLIC_VENDOR_CRYPTOJS="\
src/sunstone/public/vendor/crypto-js/NOTICE \
src/sunstone/public/vendor/crypto-js/sha1-min.js \
src/sunstone/public/vendor/crypto-js/core-min.js \
src/sunstone/public/vendor/crypto-js/enc-base64-min.js \
src/sunstone/public/vendor/crypto-js/NEW-BSD-LICENSE.txt"
SUNSTONE_PUBLIC_VENDOR_XML2JSON="\
src/sunstone/public/vendor/xml2json/NOTICE \
src/sunstone/public/vendor/xml2json/jquery.xml2json.pack.js"
SUNSTONE_PUBLIC_NEW_VENDOR_NOUISLIDER="\
src/sunstone/public/vendor/4.0/nouislider/jquery.nouislider.min.js \
src/sunstone/public/vendor/4.0/nouislider/nouislider.css"
SUNSTONE_PUBLIC_IMAGES_FILES="src/sunstone/public/images/ajax-loader.gif \
src/sunstone/public/images/favicon.ico \
src/sunstone/public/images/login_over.png \
src/sunstone/public/images/login.png \
src/sunstone/public/images/opennebula-sunstone-big.png \
src/sunstone/public/images/opennebula-sunstone-small.png \
src/sunstone/public/images/opennebula-sunstone-v4.0.png \
src/sunstone/public/images/opennebula-sunstone-v4.0-small.png \
src/sunstone/public/images/one_small_logo.png \
src/sunstone/public/images/panel.png \
src/sunstone/public/images/panel_short.png \
src/sunstone/public/images/pbar.gif \
src/sunstone/public/images/Refresh-icon.png \
src/sunstone/public/images/red_bullet.png \
src/sunstone/public/images/yellow_bullet.png \
src/sunstone/public/images/green_bullet.png \
src/sunstone/public/images/vnc_off.png \
src/sunstone/public/images/vnc_on.png \
src/sunstone/public/images/network_icon.png \
src/sunstone/public/images/system_icon.png \
src/sunstone/public/images/server_icon.png \
src/sunstone/public/images/sort_asc.png \
src/sunstone/public/images/sort_asc_disabled.png \
src/sunstone/public/images/sort_both.png \
src/sunstone/public/images/sort_desc.png \
src/sunstone/public/images/sort_desc_disabled.png\
"
SUNSTONE_PUBLIC_LOGOS_FILES="src/sunstone/public/images/logos/arch.png \
src/sunstone/public/images/logos/centos.png \
src/sunstone/public/images/logos/debian.png \
src/sunstone/public/images/logos/fedora.png \
src/sunstone/public/images/logos/linux.png \
src/sunstone/public/images/logos/redhat.png \
src/sunstone/public/images/logos/ubuntu.png \
src/sunstone/public/images/logos/windowsxp.png \
src/sunstone/public/images/logos/windows8.png \
"
SUNSTONE_PUBLIC_LOCALE_CA="\
src/sunstone/locale/languages/ca.js \
src/sunstone/locale/languages/ca_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_CS_CZ="\
src/sunstone/locale/languages/cs_CZ.js \
src/sunstone/locale/languages/cs_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_DE="\
src/sunstone/locale/languages/de.js \
src/sunstone/locale/languages/de_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_DA="\
src/sunstone/locale/languages/da.js \
src/sunstone/locale/languages/da_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_EL_GR="\
src/sunstone/locale/languages/el_GR.js \
src/sunstone/locale/languages/el_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_EN_US="\
src/sunstone/locale/languages/en_US.js \
src/sunstone/locale/languages/en_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_ES_ES="\
src/sunstone/locale/languages/es_ES.js \
src/sunstone/locale/languages/es_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_FA_IR="\
src/sunstone/locale/languages/fa_IR.js \
src/sunstone/locale/languages/fa_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_FR_FR="\
src/sunstone/locale/languages/fr_FR.js \
src/sunstone/locale/languages/fr_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_IT_IT="\
src/sunstone/locale/languages/it_IT.js \
src/sunstone/locale/languages/it_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_NL_NL="\
src/sunstone/locale/languages/nl_NL.js \
src/sunstone/locale/languages/nl_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_PL="\
src/sunstone/locale/languages/pl.js \
src/sunstone/locale/languages/pl_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_PT_PT="\
src/sunstone/locale/languages/pt_PT.js \
src/sunstone/locale/languages/pt_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_PT_BR="\
src/sunstone/locale/languages/pt_BR.js \
src/sunstone/locale/languages/pt_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_RU_RU="\
src/sunstone/locale/languages/ru_RU.js \
src/sunstone/locale/languages/ru_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_SK_SK="\
src/sunstone/locale/languages/sk_SK.js \
src/sunstone/locale/languages/sk_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_ZH_CN="\
src/sunstone/locale/languages/zh_CN.js \
src/sunstone/locale/languages/zh_datatable.txt"
SUNSTONE_PUBLIC_LOCALE_ZH_TW="\
src/sunstone/locale/languages/zh_TW.js \
src/sunstone/locale/languages/zh_datatable.txt"
#-----------------------------------------------------------------------------
# OneGate files
#-----------------------------------------------------------------------------
ONEGATE_FILES="src/onegate/onegate-server.rb \
src/onegate/config.ru"
ONEGATE_BIN_FILES="src/onegate/bin/onegate-server"
ONEGATE_ETC_FILES="src/onegate/etc/onegate-server.conf"
#-----------------------------------------------------------------------------
# OneFlow files
#-----------------------------------------------------------------------------
ONEFLOW_FILES="src/flow/oneflow-server.rb \
src/flow/config.ru"
ONEFLOW_BIN_FILES="src/flow/bin/oneflow-server"
ONEFLOW_ETC_FILES="src/flow/etc/oneflow-server.conf"
ONEFLOW_LIB_FILES="src/flow/lib/grammar.rb \
src/flow/lib/grammar.treetop \
src/flow/lib/LifeCycleManager.rb \
src/flow/lib/log.rb \
src/flow/lib/models.rb \
src/flow/lib/strategy.rb \
src/flow/lib/validator.rb"
ONEFLOW_LIB_STRATEGY_FILES="src/flow/lib/strategy/straight.rb"
ONEFLOW_LIB_MODELS_FILES="src/flow/lib/models/role.rb \
src/flow/lib/models/service_pool.rb \
src/flow/lib/models/service.rb \
src/flow/lib/models/service_template_pool.rb \
src/flow/lib/models/service_template.rb"
#-----------------------------------------------------------------------------
# MAN files
#-----------------------------------------------------------------------------
MAN_FILES="share/man/oneacct.1.gz \
share/man/oneacl.1.gz \
share/man/onehost.1.gz \
share/man/oneimage.1.gz \
share/man/oneuser.1.gz \
share/man/onevm.1.gz \
share/man/onevnet.1.gz \
share/man/onetemplate.1.gz \
share/man/onegroup.1.gz \
share/man/onedb.1.gz \
share/man/onedatastore.1.gz \
share/man/onecluster.1.gz \
share/man/onezone.1.gz \
share/man/oneflow.1.gz \
share/man/oneflow-template.1.gz \
share/man/econe-allocate-address.1.gz \
share/man/econe-associate-address.1.gz \
share/man/econe-attach-volume.1.gz \
share/man/econe-create-keypair.1.gz \
share/man/econe-create-volume.1.gz \
share/man/econe-delete-keypair.1.gz \
share/man/econe-delete-volume.1.gz \
share/man/econe-describe-addresses.1.gz \
share/man/econe-describe-images.1.gz \
share/man/econe-describe-instances.1.gz \
share/man/econe-describe-keypairs.1.gz \
share/man/econe-describe-volumes.1.gz \
share/man/econe-detach-volume.1.gz \
share/man/econe-disassociate-address.1.gz \
share/man/econe-reboot-instances.1.gz \
share/man/econe-register.1.gz \
share/man/econe-release-address.1.gz \
share/man/econe-run-instances.1.gz \
share/man/econe-start-instances.1.gz \
share/man/econe-stop-instances.1.gz \
share/man/econe-terminate-instances.1.gz \
share/man/econe-upload.1.gz"
#-----------------------------------------------------------------------------
# Ruby VENDOR files
#-----------------------------------------------------------------------------
RBVMOMI_VENDOR_RUBY_FILES="share/vendor/ruby/gems/rbvmomi/LICENSE \
share/vendor/ruby/gems/rbvmomi/README.rdoc \
share/vendor/ruby/gems/rbvmomi/VERSION \
share/vendor/ruby/gems/rbvmomi/vmodl.db"
RBVMOMI_VENDOR_RUBY_LIB_FILES="share/vendor/ruby/gems/rbvmomi/lib/rbvmomi.rb"
RBVMOMI_VENDOR_RUBY_LIB_RBVMOMI_FILES="share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/basic_types.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/connection.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/deserialization.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/fault.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/pbm.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/trivial_soap.rb
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/trollop.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/type_loader.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim.rb"
RBVMOMI_VENDOR_RUBY_LIB_RBVMOMI_UTILS_FILES="share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/admission_control.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/deploy.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/leases.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/utils/perfdump.rb"
RBVMOMI_VENDOR_RUBY_LIB_RBVMOMI_VIM_FILES="share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ComputeResource.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Datacenter.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Datastore.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/DynamicTypeMgrAllTypeInfo.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/DynamicTypeMgrDataTypeInfo.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/DynamicTypeMgrManagedTypeInfo.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Folder.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/HostSystem.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ManagedEntity.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ManagedObject.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ObjectContent.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ObjectUpdate.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/OvfManager.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/PerfCounterInfo.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/PerformanceManager.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/PropertyCollector.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ReflectManagedMethodExecuter.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ResourcePool.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/ServiceInstance.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/Task.rb \
share/vendor/ruby/gems/rbvmomi/lib/rbvmomi/vim/VirtualMachine.rb"
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# INSTALL.SH SCRIPT
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# --- Create OpenNebula directories ---
if [ "$UNINSTALL" = "no" ] ; then
for d in $MAKE_DIRS; do
mkdir -p $DESTDIR$d
done
fi
# --- Install/Uninstall files ---
do_file() {
if [ "$UNINSTALL" = "yes" ]; then
rm $DESTDIR$2/`basename $1`
else
if [ "$LINK" = "yes" ]; then
ln -s $SRC_DIR/$1 $DESTDIR$2
else
cp $SRC_DIR/$1 $DESTDIR$2
fi
fi
}
if [ "$CLIENT" = "yes" ]; then
INSTALL_SET=${INSTALL_CLIENT_FILES[@]}
elif [ "$ONEGATE" = "yes" ]; then
INSTALL_SET="${INSTALL_ONEGATE_FILES[@]}"
elif [ "$SUNSTONE" = "yes" ]; then
INSTALL_SET="${INSTALL_SUNSTONE_RUBY_FILES[@]} ${INSTALL_SUNSTONE_FILES[@]}"
elif [ "$ONEFLOW" = "yes" ]; then
INSTALL_SET="${INSTALL_ONEFLOW_FILES[@]}"
else
INSTALL_SET="${INSTALL_FILES[@]} \
${INSTALL_SUNSTONE_FILES[@]} ${INSTALL_ONEGATE_FILES[@]} \
${INSTALL_ONEFLOW_FILES[@]}"
fi
for i in ${INSTALL_SET[@]}; do
SRC=$`echo $i | cut -d: -f1`
DST=`echo $i | cut -d: -f2`
eval SRC_FILES=$SRC
for f in $SRC_FILES; do
do_file $f $DST
done
done
if [ "$INSTALL_ETC" = "yes" ] ; then
if [ "$SUNSTONE" = "yes" ]; then
INSTALL_ETC_SET="${INSTALL_SUNSTONE_ETC_FILES[@]}"
elif [ "$ONEGATE" = "yes" ]; then
INSTALL_ETC_SET="${INSTALL_ONEGATE_ETC_FILES[@]}"
elif [ "$ONEFLOW" = "yes" ]; then
INSTALL_ETC_SET="${INSTALL_ONEFLOW_ETC_FILES[@]}"
else
INSTALL_ETC_SET="${INSTALL_ETC_FILES[@]} \
${INSTALL_SUNSTONE_ETC_FILES[@]} \
${INSTALL_ONEGATE_ETC_FILES[@]} \
${INSTALL_ONEFLOW_ETC_FILES[@]}"
fi
for i in ${INSTALL_ETC_SET[@]}; do
SRC=$`echo $i | cut -d: -f1`
DST=`echo $i | cut -d: -f2`
eval SRC_FILES=$SRC
OLD_LINK=$LINK
LINK="no"
for f in $SRC_FILES; do
do_file $f $DST
done
LINK=$OLD_LINK
done
fi
# --- Set ownership or remove OpenNebula directories ---
if [ "$UNINSTALL" = "no" ] ; then
for d in $CHOWN_DIRS; do
chown -R $ONEADMIN_USER:$ONEADMIN_GROUP $DESTDIR$d
done
else
for d in `echo $DELETE_DIRS | awk '{for (i=NF;i>=1;i--) printf $i" "}'`; do
rmdir $d
done
fi
|
Terradue/one
|
install.sh
|
Shell
|
apache-2.0
| 92,124 |
#!/bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eo pipefail
## Get the directory of the build script
scriptDir=$(realpath $(dirname "${BASH_SOURCE[0]}"))
## cd to the parent directory, i.e. the root of the git repo
cd ${scriptDir}/..
# include common functions
source ${scriptDir}/common.sh
# Print out Maven & Java version
mvn -version
echo ${JOB_TYPE}
# attempt to install 3 times with exponential backoff (starting with 10 seconds)
retry_with_backoff 3 10 \
mvn install -B -V -ntp \
-DskipTests=true \
-Dclirr.skip=true \
-Denforcer.skip=true \
-Dmaven.javadoc.skip=true \
-Dgcloud.download.skip=true \
-T 1C
# if GOOGLE_APPLICATION_CREDENTIALS is specified as a relative path, prepend Kokoro root directory onto it
if [[ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" && "${GOOGLE_APPLICATION_CREDENTIALS}" != /* ]]; then
export GOOGLE_APPLICATION_CREDENTIALS=$(realpath ${KOKORO_GFILE_DIR}/${GOOGLE_APPLICATION_CREDENTIALS})
fi
RETURN_CODE=0
set +e
case ${JOB_TYPE} in
test)
mvn test -B -ntp -Dclirr.skip=true -Denforcer.skip=true
RETURN_CODE=$?
;;
lint)
mvn com.coveo:fmt-maven-plugin:check -B -ntp
RETURN_CODE=$?
;;
javadoc)
mvn javadoc:javadoc javadoc:test-javadoc -B -ntp
RETURN_CODE=$?
;;
integration)
mvn -B ${INTEGRATION_TEST_ARGS} \
-ntp \
-Penable-integration-tests \
-DtrimStackTrace=false \
-Dclirr.skip=true \
-Denforcer.skip=true \
-fae \
verify
RETURN_CODE=$?
;;
graalvm)
# Run Unit and Integration Tests with Native Image
mvn -B ${INTEGRATION_TEST_ARGS} -ntp -Pnative -Penable-integration-tests test
RETURN_CODE=$?
;;
samples)
SAMPLES_DIR=samples
# only run ITs in snapshot/ on presubmit PRs. run ITs in all 3 samples/ subdirectories otherwise.
if [[ ! -z ${KOKORO_GITHUB_PULL_REQUEST_NUMBER} ]]
then
SAMPLES_DIR=samples/snapshot
fi
if [[ -f ${SAMPLES_DIR}/pom.xml ]]
then
for FILE in ${KOKORO_GFILE_DIR}/secret_manager/*-samples-secrets; do
[[ -f "$FILE" ]] || continue
source "$FILE"
done
pushd ${SAMPLES_DIR}
mvn -B \
-ntp \
-DtrimStackTrace=false \
-Dclirr.skip=true \
-Denforcer.skip=true \
-fae \
verify
RETURN_CODE=$?
popd
else
echo "no sample pom.xml found - skipping sample tests"
fi
;;
clirr)
mvn -B -ntp -Denforcer.skip=true clirr:check
RETURN_CODE=$?
;;
*)
;;
esac
if [ "${REPORT_COVERAGE}" == "true" ]
then
bash ${KOKORO_GFILE_DIR}/codecov.sh
fi
# fix output location of logs
bash .kokoro/coerce_logs.sh
if [[ "${ENABLE_FLAKYBOT}" == "true" ]]
then
chmod +x ${KOKORO_GFILE_DIR}/linux_amd64/flakybot
${KOKORO_GFILE_DIR}/linux_amd64/flakybot -repo=googleapis/java-orchestration-airflow
fi
echo "exiting with ${RETURN_CODE}"
exit ${RETURN_CODE}
|
googleapis/java-orchestration-airflow
|
.kokoro/build.sh
|
Shell
|
apache-2.0
| 3,471 |
#!/bin/bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
set -x
source tensorflow/tools/ci_build/release/common.sh
install_ubuntu_16_pip_deps pip3.8
# Update bazel
update_bazel_linux
# Export required variables for running pip.sh
export OS_TYPE="UBUNTU"
export CONTAINER_TYPE="GPU"
export TF_PYTHON_VERSION='python3.8'
# Run configure.
export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION})
yes "" | "$PYTHON_BIN_PATH" configure.py
# Get the default test targets for bazel.
source tensorflow/tools/ci_build/build_scripts/DEFAULT_TEST_TARGETS.sh
# Export optional variables for running pip.sh
export TF_TEST_FILTER_TAGS='gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py38,-no_cuda11'
export TF_BUILD_FLAGS="--config=release_gpu_linux "
export TF_TEST_FLAGS="--test_tag_filters=${TF_TEST_FILTER_TAGS} --build_tag_filters=${TF_TEST_FILTER_TAGS} \
--distinct_host_configuration=false \
--action_env=TF_CUDA_VERSION=11 --action_env=TF_CUDNN_VERSION=8 --test_env=TF2_BEHAVIOR=1 \
--config=cuda --test_output=errors --local_test_jobs=4 --test_lang_filters=py \
--verbose_failures=true --keep_going --define=no_tensorflow_py_deps=true \
--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute "
export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... "
export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean"
#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo.
export TF_PROJECT_NAME="tensorflow_gpu"
export TF_PIP_TEST_ROOT="pip_test"
# To build both tensorflow and tensorflow-gpu pip packages
export TF_BUILD_BOTH_GPU_PACKAGES=1
./tensorflow/tools/ci_build/builds/pip_new.sh
|
aam-at/tensorflow
|
tensorflow/tools/ci_build/rel/ubuntu/gpu_py38_pip.sh
|
Shell
|
apache-2.0
| 2,306 |
#!/bin/bash
deployment_home=$1
query=$2
expected_count=$3
count=`$deployment_home/mongodb-2.0/bin/mongo --eval "$2" | grep -E '^[0-9]'`
rc=$?
if [[ "$rc" != "0" ]]; then
echo "Query $query failed"
exit 1
fi
if [[ "$count" != "$expected_count" ]]; then
echo "Got count $count, expecting $expected_count"
exit 1
fi
echo "Mongo db query returned $count records, as expected"
exit 0
|
mpi-sws-rse/datablox
|
scripts/check_mongo_results.sh
|
Shell
|
apache-2.0
| 390 |
./gradlew clean testDebugUnitTest connectedDebugAndroidTest lintDebug assembleDebug --no-daemon
|
jainsahab/AndroidSnooper
|
prePush.sh
|
Shell
|
apache-2.0
| 96 |
#!/usr/bin/env bash
export SQLITE_SCRIPT=quill-jdbc/src/test/resources/sql/sqlite-schema.sql
export MYSQL_SCRIPT=quill-sql/src/test/sql/mysql-schema.sql
export POSTGRES_SCRIPT=quill-sql/src/test/sql/postgres-schema.sql
export SQL_SERVER_SCRIPT=quill-sql/src/test/sql/sqlserver-schema.sql
export ORACLE_SCRIPT=quill-sql/src/test/sql/oracle-schema.sql
export CASSANDRA_SCRIPT=quill-cassandra/src/test/cql/cassandra-schema.cql
function get_host() {
if [ -z "$1" ]; then
echo "127.0.0.1"
else
echo "$1"
fi
}
# usage: setup_x <script>
function setup_sqlite() {
# DB File in quill-jdbc
DB_FILE=quill-jdbc/quill_test.db
rm -f $DB_FILE
sqlite3 $DB_FILE < $1
chmod a+rw $DB_FILE
# DB File in quill-jdbc-monix
DB_FILE=quill-jdbc-monix/quill_test.db
rm -f $DB_FILE
sqlite3 $DB_FILE < $1
chmod a+rw $DB_FILE
echo "Sqlite ready!"
}
function setup_mysql() {
connection=$2
if [[ "$2" == "mysql" ]]; then
connection="mysql -proot"
hacks="mysql -h mysql -u root -proot -e \"ALTER USER 'root'@'%' IDENTIFIED BY ''\""
fi
echo "Waiting for MySql"
until mysql -h $connection -u root -e "select 1" &> /dev/null; do
sleep 5;
done
echo "Connected to MySql"
eval $hacks
mysql -h $2 -u root -e "CREATE DATABASE quill_test;"
mysql -h $2 -u root quill_test < $1
mysql -h $2 -u root -e "CREATE USER 'finagle'@'%' IDENTIFIED BY 'finagle';"
mysql -h $2 -u root -e "GRANT ALL PRIVILEGES ON * . * TO 'finagle'@'%';"
mysql -h $2 -u root -e "FLUSH PRIVILEGES;"
}
function setup_postgres() {
host=$(get_host $2)
echo "Waiting for Postgres"
until psql -h $2 -U postgres -c "select 1" &> /dev/null; do
sleep 5;
done
echo "Connected to Postgres"
psql -h $2 -U postgres -c "CREATE DATABASE quill_test"
psql -h $2 -U postgres -d quill_test -a -q -f $1
}
function setup_cassandra() {
host=$(get_host $2)
echo "Waiting for Cassandra"
until cqlsh $2 -e "describe cluster" &> /dev/null; do
sleep 5;
done
echo "Connected to Cassandra"
cqlsh $2 -f $1
}
function setup_sqlserver() {
host=$(get_host $2)
echo "Waiting for SqlServer"
until /opt/mssql-tools/bin/sqlcmd -S $2 -U SA -P "QuillRocks!" -Q "select 1" &> /dev/null; do
sleep 5;
done
echo "Connected to SqlServer"
/opt/mssql-tools/bin/sqlcmd -S $2 -U SA -P "QuillRocks!" -Q "CREATE DATABASE quill_test"
/opt/mssql-tools/bin/sqlcmd -S $2 -U SA -P "QuillRocks!" -d quill_test -i $1
}
# Do a simple necat poll to make sure the oracle database is ready.
# All internal database creation and schema setup scripts are handled
# by the container and docker-compose steps.
function setup_oracle() {
while ! nc -z $2 1521; do
echo "Waiting for Oracle"
sleep 5;
done;
sleep 5;
echo "Connected to Oracle"
sleep 5
}
function send_script() {
echo "Send Script Args: 1: $1 - 2 $2 - 3: $3"
docker cp $2 "$(docker-compose ps -q $1)":/$3
}
export -f setup_sqlite
export -f setup_mysql
export -f setup_postgres
export -f setup_cassandra
export -f setup_sqlserver
export -f setup_oracle
export -f send_script
|
mentegy/quill
|
build/setup_db_scripts.sh
|
Shell
|
apache-2.0
| 3,208 |
#!/bin/bash
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
. ${ROOT}/scripts/all-utilities.sh || { echo "Cannot load Bash utilities"; exit 1; }
# This script will create a tag for a release branch.
function usage() {
[[ -n "${1}" ]] && echo "${1}"
cat <<EOF
usage: ${BASH_SOURCE[0]} -t <tag git ref> -b <build git ref> [-n <current version number>]"
tag git ref: commit which to tag with the release
(typically release branch HEAD)
build git ref: commit at which the build was produced
this is typically used when subsequent commits (such as changelog)
were made in the release branch after the release build was produced.
example:
${BASH_SOURCE[0]} \\
-t HEAD \\
-b be2eb101f1b1b3e671e852656066c2909c41049b
EOF
exit 1
}
BUILD_REF=''
TAG_REF=''
while getopts :b:t:n: arg; do
case ${arg} in
b) BUILD_REF="${OPTARG}";;
t) TAG_REF="${OPTARG}";;
n) VERSION="${OPTARG}";;
*) usage "Invalid option: -${OPTARG}";;
esac
done
[[ -n "${BUILD_REF}" ]] \
|| usage "Please provide the release build ref via '-b' parameter."
[[ -n "${TAG_REF}" ]] \
|| usage "Please provide the release tag ref via '-t' parameter."
BUILD_SHA=$(git rev-parse --verify "${BUILD_REF}") \
|| usage "Invalid Git reference \"${BUILD_REF}\"."
TAG_SHA=$(git rev-parse --verify "${TAG_REF}") \
|| usage "Invalid Git reference \"${TAG_REF}\"."
if [ "${VERSION}" = "" ]; then
VERSION="$(command cat ${ROOT}/VERSION)" \
|| usage "Cannot determine release version (${ROOT}/VERSION)."
fi
# Prefix 'v' for the tag name
VERSION_TAG="v${VERSION}"
set -x
git tag --annotate --force --file=- ${VERSION_TAG} ${TAG_SHA} <<EOF
ESPv2 Release ${VERSION}
The release build was produced at ${BUILD_SHA}.
The Docker image released is:
$(get_proxy_image_release_name):${VERSION}
EOF
# Check the version is correct.
git show -q ${VERSION_TAG}
{ set +x; } 2>/dev/null
printf "\\e[31m
You are about to push the tag ${VERSION_TAG} for ${TAG_SHA} to origin.
Once pushed, the tag cannot be removed. Are you sure? [Y/N] \\e[0m"
read yn
if [[ "${yn}" != "y" && "${yn}" != "Y" ]]; then
echo "Aborting."
exit 1
fi
# Push the tag to the server.
set -x
git push upstream ${VERSION_TAG}
{ set +x; } 2>/dev/null
printf '\e[31m
***************************************************************************
* Please paste the script output verbatim into the release bug. *
***************************************************************************
\e[0m'
|
GoogleCloudPlatform/esp-v2
|
scripts/release/release-tag-git.sh
|
Shell
|
apache-2.0
| 3,061 |
#!/bin/bash
# Script for testing MQTT channel on Linux machine
# mosquitto_sub is using local MQTT broker to communicate to IQRF daemon
# Tested on Raspberry PI 3, Raspbian Lite
# Tested on AAEON UP, UbiLinux
echo "Listening for DPA responses from IQRF network:"
mosquitto_sub -v -t Iqrf/DpaResponse | xargs -d $'\n' -L1 bash -c 'date "+%Y-%m-%d %T.%3N $0"'
|
iqrfsdk/iqrf-daemon
|
examples/iqrf_thermometer_example/mqtt-sub.sh
|
Shell
|
apache-2.0
| 359 |
#!/bin/bash
# ----------------------------------------------------------------------------
#
# Package : flagged-respawn
# Version : v1.0.1
# Source : https://github.com/gulpjs/flagged-respawn.git
# Tested on : RHEL 7.6
# Node Version : v12.16.1
# Maintainer : Amol Patil <[email protected]>
#
# Disclaimer: This script has been tested in non-root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
# ----------------------------------------------------------------------------
set -e
# Install all dependencies.
sudo yum clean all
sudo yum -y update
PACKAGE_VERSION=v1.0.1
#Install nvm
if [ ! -d ~/.nvm ]; then
#Install the required dependencies
sudo yum install -y openssl-devel.ppc64le curl git
curl https://raw.githubusercontent.com/creationix/nvm/v0.33.0/install.sh | bash
fi
source ~/.nvm/nvm.sh
#Install node version v12.16.1
if [ `nvm list | grep -c "v12.16.1"` -eq 0 ]
then
nvm install v12.16.1
fi
nvm alias default v12.16.1
git clone https://github.com/gulpjs/flagged-respawn.git && cd flagged-respawn
git checkout $PACKAGE_VERSION
sed -i -e "135s/expect(err.signal).toEqual(null);/expect(err.signal).toEqual('SIGHUP');/g" test/index.js
npm install
npm test
|
ppc64le/build-scripts
|
f/flagged-respawn/flagged-respawn_rhel_7.6.sh
|
Shell
|
apache-2.0
| 1,457 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
B=`basename $0 .sh`
cd `dirname $0`
source ./rungraphd
# Unique and ~=
#
# A unique expression that versions another GUID must be
# unique even before the record that it versions becomes
# invalid. (The opposite is hard to implement - if I had
# a good way of implementing it, I'd do it.)
#
# If that bugs you, and if what you're really doing is just
# rewriting or adding something, you may want to look into
# using key= instead of unique= .
#
rm -rf $D
rungraphd -d${D} -bty <<- 'EOF'
write("x" value="Foo")
write(value="Bar")
read ( value="Bar"
datatype=string
(<-left
datatype=string
typeguid=00000012400034568000000000000001
right->(value="Foo" datatype=string)))
write (
value="Bar"
datatype=string key=(value)
(<-right
value="baz"
datatype=string
type="has_key"
unique=(typeguid right left)
left->(
value="Foo"
datatype=string
key=(value))
)
)
write (
value="Bar" datatype=string unique=(value)
(<-left
value="baz"
datatype=string
typeguid=00000012400034568000000000000002
unique=(typeguid right left)
right->(
value="Foo"
datatype=string
unique=(value))
)
)
write (
value="Bar"
datatype=string
key=(value)
(<-left
value="baz"
datatype=string
type="has_name"
right->(value="Foo"
datatype=string
key=(value) ) ) )
write (
type="boo"
key=(typeguid))
write (
type="boo"
key=(typeguid))
EOF
rm -rf $D
|
googlearchive/graphd
|
test/unit/unique4.sh
|
Shell
|
apache-2.0
| 1,998 |
#!/bin/bash
##
# Copyright IBM Corporation 2017
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# If any commands fail, we want the shell script to exit immediately.
set -e
# Delete services first
echo "Deleting services..."
bx service delete -f "BluePic-Cloudant"
bx service delete -f "BluePic-Object-Storage"
bx service delete -f "BluePic-App-ID"
bx service delete -f "BluePic-IBM-Push"
bx service delete -f "BluePic-Weather-Company-Data"
bx service delete -f "BluePic-Visual-Recognition"
echo "Services deleted."
# Create services
echo "Creating services..."
bx service create cloudantNoSQLDB Lite "BluePic-Cloudant"
bx service create Object-Storage Free "BluePic-Object-Storage"
bx service create AppID "Graduated tier" "BluePic-App-ID"
bx service create imfpush Basic "BluePic-IBM-Push"
bx service create weatherinsights Free-v2 "BluePic-Weather-Company-Data"
bx service create watson_vision_combined free "BluePic-Visual-Recognition"
echo "Services created."
|
IBM-Swift/BluePic
|
Cloud-Scripts/Deployment/create_services.sh
|
Shell
|
apache-2.0
| 1,466 |
#!/bin/bash
#
# File managed by puppet. All modifications will be lost.
# Wrapper calling check_prometheus_metric.sh with the harcoded prometheus query
# incorrectly parsed when passed to vars.check_prometheus_metric_query
PROGPATH=$(dirname $0)
while getopts ':H:n:c:w:' OPT "$@"
do
case ${OPT} in
H) PROMETHEUS_SERVER="$OPTARG" ;;
n) METRIC_NAME="$OPTARG" ;;
c) CRITICAL_THRESHOLD=${OPTARG}
;;
w) WARNING_THRESHOLD=${OPTARG}
;;
*) echo "Invalid option ${OPT}"
exit 1
;;
esac
done
QUERY='sum(sql_pg_stat_replication{instance="belvedere.internal.softwareheritage.org", host=":5433", application_name="softwareheritage_replica"})'
${PROGPATH}/check_prometheus_metric.sh -H ${PROMETHEUS_SERVER} -q "${QUERY}" -w ${WARNING_THRESHOLD} -c ${CRITICAL_THRESHOLD} -n "${METRIC_NAME}" -t vector
|
SoftwareHeritage/puppet-swh-site
|
site-modules/profile/files/icinga2/plugins/check_belvedere_replication_lag.sh
|
Shell
|
apache-2.0
| 889 |
#!/bin/sh
#
# Copyright 2010 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
rdoc --main HBaseClient --op rdoc --charset utf-8 -d -S -U -N -a
|
hbase-sculptor/sculptor
|
framework/ruby/rdocgen.sh
|
Shell
|
apache-2.0
| 914 |
#!/usr/bin/env bash
# global parameters
g_tmp_folder="ncdc_tmp";
g_output_folder="ncdc_data";
g_remote_host="ftp.ncdc.noaa.gov";
g_remote_path="pub/data/noaa";
# $1: folder_path
function create_folder {
if [ -d "$1" ]; then
rm -rf "$1";
fi
mkdir "$1"
}
# $1: year to download
function download_data {
local source_url="ftp://$g_remote_host/$g_remote_path/$1"
wget -r -c -q --no-parent -P "$g_tmp_folder" "$source_url";
}
# $1: year to process
function process_data {
local year="$1"
local local_path="$g_tmp_folder/$g_remote_host/$g_remote_path/$year"
local tmp_output_file="$g_tmp_folder/$year"
for file in $local_path/*; do
gunzip -c $file >> "$tmp_output_file"
done
zipped_file="$g_output_folder/$year.gz"
gzip -c "$tmp_output_file" >> "$zipped_file"
echo "created file: $zipped_file"
rm -rf "$local_path"
rm "$tmp_output_file"
}
# $1 - start year
# $2 - finish year
function main {
local start_year=2014
local finish_year=2014
if [ -n "$1" ]; then
start_year=$1
fi
if [ -n "$2" ]; then
finish_year=$2
fi
create_folder $g_tmp_folder
create_folder $g_output_folder
for year in `seq $start_year $finish_year`; do
download_data $year
process_data $year
done
rm -rf "$g_tmp_folder"
}
main $1 $2
|
rahulkumar-aws/HadoopClient
|
src/main/resources/ncdc_data.sh
|
Shell
|
apache-2.0
| 1,374 |
#!/usr/bin/env bash
# Cause the script to fail if any subcommand fails
set -e
pushd .
if [ "$JAVA_HOME" != "" ]; then
echo "JAVA_HOME is set"
exit
fi
java_version=$1
arch=$2
osname=`uname -s`
if [ "$osname" = "Darwin" ]; then
echo "macOS not supported, relying on the machine providing java itself"
exit 1
else
platformarch="linux-$arch"
fi
echo "PlatformArch: $platformarch"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
output_dir="$DIR/java"
url="https://netcorenativeassets.blob.core.windows.net/resource-packages/external/linux/java/jdk-${java_version}_${platformarch}_bin.tar.gz"
echo "Downloading from: $url"
tmp="$(mktemp -d -t install-jdk.XXXXXX)"
cleanup() {
exitcode=$?
if [ $exitcode -ne 0 ]; then
echo "Failed to install java with exit code: $exitcode"
fi
rm -rf "$tmp"
exit $exitcode
}
trap "cleanup" EXIT
cd "$tmp"
curl -Lsfo $(basename $url) "$url" --retry 5
echo "Installing java from $(basename $url) $url"
mkdir $output_dir
echo "Unpacking to $output_dir"
tar --strip-components 1 -xzf "jdk-${java_version}_${platformarch}_bin.tar.gz" --no-same-owner --directory "$output_dir"
popd
|
aspnet/AspNetCore
|
eng/helix/content/installjdk.sh
|
Shell
|
apache-2.0
| 1,161 |
#!/bin/bash
# If set, modify main.bash
if [ $SEND_AS_SINGLEJOB == "true" ]; then
# Go through all variables named lineN, where N: 1..integer
linenum=1
while [ 1 ]; do
eval line=( \${line$linenum} )
if [ ! -z $line ]; then
#echo "filled" ${line[@]}
line="${line[@]}"
sed -i -e "$((linenum+1))s;$;${line}\n;" $WORK/main.bash
linenum=$(( linenum+1 ))
else
#echo "empty" ${line[@]}
break
fi
done
# Else, modify model run
elif [ $SEND_AS_MULTIJOB == "true" ]; then
# Go through all variables named lineN, where N: 1..integer
linenum=1
while [ 1 ]; do
eval line=( \${line$linenum} )
if [ ! -z $line ]; then
#echo "filled" ${line[@]}
line="${line[@]}"
sed -i -e "$((linenum+1))s;$;${line}\n;" $SCRI/run.bash
linenum=$(( linenum+1 ))
else
#echo "empty" ${line[@]}
break
fi
done
fi
|
pirkkao/OpenEPS
|
bin/util_batchjob.bash
|
Shell
|
apache-2.0
| 875 |
#!/usr/bin/env bash
set -e
set -x
cp -r gems-src-in/. gems-src-out
cd gems-src-out/src/bosh_openstack_cpi
git config --global user.email [email protected]
git config --global user.name CI
git fetch origin master:refs/remotes/origin/master
git rebase origin/master
echo "Looking for new gem versions"
bundle update
git diff --exit-code Gemfile.lock || exit_code=$?
if [ -v exit_code ]; then
echo "Running unit tests"
bundle exec rspec spec/unit/*
echo "Creating new pull request"
git add .
git commit -m "Bump gems"
else
echo "No new gem versions found"
fi
|
cloudfoundry-incubator/bosh-openstack-cpi-release
|
ci/pipelines/auto-update/tasks/update-gems.sh
|
Shell
|
apache-2.0
| 570 |
# -----------------------------------------------------------------------------
#
# Package : nocache
# Version : 2.0.0
# Source repo : https://github.com/helmetjs/nocache
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=nocache
PACKAGE_VERSION=2.0.0
PACKAGE_URL=https://github.com/helmetjs/nocache
yum -y update && yum install -y yum-utils nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git gcc gcc-c++ libffi libffi-devel ncurses git jq make cmake
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/appstream/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/baseos/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/7Server/ppc64le/optional/
yum install -y firefox liberation-fonts xdg-utils && npm install n -g && n latest && npm install -g npm@latest && export PATH="$PATH" && npm install --global yarn grunt-bump xo testem acorn
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME" > /home/tester/output/clone_fails
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails" > /home/tester/output/version_tracker
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
PACKAGE_VERSION=$(jq -r ".version" package.json)
# run the test command from test.sh
if ! npm install && npm audit fix && npm audit fix --force; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! npm test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
n/nocache/nocache_rhel_8.3.sh
|
Shell
|
apache-2.0
| 3,048 |
#!/bin/bash
# function to test exist status
testexit()
{
EXIT_STATUS=$?
LOG_FILE="$1"
if [ "x${EXIT_STATUS}" != "x0" ]; then
echo "COMMAND EXITED WITH STATUS ${EXIT_STATUS}"
echo "LOG OUTPUT:"
cat ${LOG_FILE}
exit ${EXIT_STATUS}
fi
}
pushd $(dirname "${0}") > /dev/null
BASEDIR=$(pwd -L)
popd > /dev/null
echo "${BASEDIR}"
APP_NAME=gprom
VERSION=`${BASEDIR}/../configure --version | grep -o '[0-9]\+\.[0-9]\+\.[0-9]\+'`
TMP_DIR=${BASEDIR}/../dpkg
PACKAGE_FILES=${BASEDIR}/debfiles
TAR_NAME=${APP_NAME}-${VERSION}.tar.gz
TAR_PACKAGE=${BASEDIR}/../${TAR_NAME}
# Create a deb package
rm -rf ${TMP_DIR}
mkdir -p ${TMP_DIR}
cp ${TAR_PACKAGE} ${TMP_DIR}/
echo "---------- UNTAR"
tar --directory ${TMP_DIR}/ -xzf ${TMP_DIR}/${TAR_NAME} > log.debpackage >&1
testexit log.debpackage
echo "---------- PREPARE"
pushd ${TMP_DIR}/${APP_NAME}-${VERSION}/
echo "dh_make --single --copyright gpl -e [email protected] -f ${BASEDIR}/../${TAR_NAME} -y"
dh_make --single --copyright gpl -e [email protected] -f ${BASEDIR}/../${TAR_NAME} -y > log.debpackage >&1
testexit log.debpackage
popd
cp ${PACKAGE_FILES}/changelog ${PACKAGE_FILES}/control ${PACKAGE_FILES}/copyright ${PACKAGE_FILES}/rules ${TMP_DIR}/${APP_NAME}-${VERSION}/debian/ > log.debpackage >&1
testexit log.debpackage
#exit 0
echo "---------- BUILD PACKAGE"
pushd ${TMP_DIR}/${APP_NAME}-${VERSION}/
pwd
rm ./debian/README.Debian debian/*.ex debian/*.EX > log.debpackage >&1
testexit log.debpackage
dpkg-buildpackage -b -rfakeroot > log.debpackage >&1
testexit log.debpackage
popd
cp ${TMP_DIR}/*.deb ${BASEDIR}/
testexit
rm -rf ${TMP_DIR}
|
IITDBGroup/gprom
|
packaging/debpackage.sh
|
Shell
|
apache-2.0
| 1,609 |
#! /bin/bash
#
# Copyright (C) 2018 University of Oregon
#
# You may distribute under the terms of either the GNU General Public
# License or the Apache License, as specified in the LICENSE file.
#
# For more information, see the LICENSE file.
#
#
# set -x
ping -W 1 -c 1 $(hostname) > /dev/null 2>&1
if [[ $? -ne 0 ]]; then
echo "Unable to ping $(hostname)"
echo "This hostname should be added to the /etc/hosts file"
fi
numHost=$(grep -v ^::1 /etc/hosts | grep -v ^# | grep -w $(hostname) | wc -l)
if [[ $numHost -eq 0 ]]; then
echo "Hostname $(hostname) should be added to the /etc/hosts file"
elif [[ $numHost -gt 1 ]]; then
echo "Hostname $(hostname) should only be defined once in the /etc/hosts file"
fi
if hash ldd 2> /dev/null; then
ldd /vnmr/bin/Vnmrbg | grep -i "not found" > /dev/null 2>&1
if [[ $? -eq 0 ]]; then
echo "Some libraries appear to be missing."
echo "They appear as \"not found\" in the following output"
ldd /vnmr/bin/Vnmrbg
echo ""
echo "Use /vnmr/bin/installpkgs"
echo "to install the missing libraries"
echo ""
fi
ldd /vnmr/bin/Vnmrbg | grep -i "not a dynamic executable" > /dev/null 2>&1
if [[ $? -eq 0 ]]; then
echo "Some libraries appear to be missing."
echo "Use /vnmr/bin/installpkgs"
echo "to install the missing libraries"
echo ""
fi
fi
echo ""
echo "$0 complete"
|
OpenVnmrJ/OpenVnmrJ
|
src/scripts/ovjDiag.sh
|
Shell
|
apache-2.0
| 1,372 |
#!/bin/bash
echo "This script will provision a chef server, grab the validation.pem file, "
echo "and upload the contents of the repo to that server"
echo ""
read -p "Press any key to begin the setup..."
echo ""
# make temp dir to store validation.pem file. Delete the old one if it's there
rm -rf .tmp
mkdir .tmp
echo ""
echo "Vagrant up-ing the chef server"
echo ""
cd systems/0.chef_server
vagrant destroy -f
vagrant up
echo ""
echo "Grabbing the admin.pem and validation.pem file off the server"
echo ""
cd ../..
scp -o StrictHostKeyChecking=no -i ~/.vagrant.d/insecure_private_key [email protected]:/home/vagrant/chef-validator.pem .tmp/validation.pem
scp -o StrictHostKeyChecking=no -i ~/.vagrant.d/insecure_private_key [email protected]:/home/vagrant/admin.pem .tmp/admin.pem
echo ""
echo "Uploading contents of chef-repo to the server"
echo ""
knife upload / -c knife_config/knife.rb
|
tas50/vagrant_chef_server
|
setup_server.sh
|
Shell
|
apache-2.0
| 909 |
#!/bin/bash
# species_mapper.sh - This script maps a list of request filenames to
# an array. Receives an occurrences file and the minimum number of occurrences
# that a species need to have in the occurrences file to be modeled as input.
#
# More about mappers:
# http://swift-lang.org/guides/trunk/userguide/userguide.html#_mappers
HELP_MSG="
$(sed -n '2,/^#$/p' "$0" | sed 's/^# //; $d')
Usage: $(basename "$0") -i occurrences_file [-m number_of_occurrences]
"
# Process mapper arguments
while [ $# -gt 0 ]; do
case "$1" in
-h | --help)
echo "$HELP_MSG"
exit 0
;;
-i) i="$2" ;;
-m) min="$2" ;;
*)
echo "$0: bad mapper args" 1>&2
exit 1
;;
esac
shift 2
done
# Yields the request filenames array
awk -F'\t' '/^[0-9]/ { print $2 }' "$i" |
sed 's/ /_/; s/,//; s/^\n//;' |
sort |
uniq -c |
awk -v "min_occ=$min" '{ if ($1 > min_occ) print $2 }' |
awk '{print "["NR"] request_"$1".txt" }'
|
sibbr/sdm-workflows
|
species_mapper.sh
|
Shell
|
apache-2.0
| 925 |
#!/bin/bash -eu
kube_bins=(
"kubelet"
"kubeadm"
"kubectl"
)
cni_bins=(
"cnitool"
"flannel"
"tuning"
"bridge"
"ipvlan"
"loopback"
"macvlan"
"ptp"
"dhcp"
"host-local"
)
if [ "$#" -gt 0 ] ; then
case "$1" in
help)
echo "You are looking at this because you probably want to learn how to revert what has been done."
echo
echo "You first want to stop disable and stop kubelet like this:"
echo
echo "> sudo systemctl disable kubelet && sudo systemctl stop kubelet"
echo
echo "Next, you can run this to remove all local containers owned by Kubernetes:"
echo
echo "> sudo docker rm --force --volumes \$(sudo docker ps --filter label=io.kubernetes.pod.name --all --quiet)"
echo
echo "And now you should remove files created by kubeadm simply like this:"
echo
echo "> sudo rm -rf /etc/kubernetes"
echo
echo "Finally you can uninstall the binaries and configuration files we have installed with this command:"
echo
echo "> sudo docker run -v /usr/local:/target gcr.io/kubeadm/installer uninstall"
echo
echo "If you aren't happy, read the code. Anyhow, good luck!"
exit
;;
uninstall|remove|cleanup)
echo "Uninstalling..."
echo
for i in "${kube_bins[@]}" ; do
rm -f -v "/target/bin/${i}"
done
rm -f -v "/target/lib/systemd/system/kubelet.service"
for i in "${cni_bins[@]}" ; do
rm -f -v "/target/lib/cni/bin/${i}"
done
echo
echo "Hope you enjoyed, and see you later!"
exit
;;
install)
;;
*)
echo "Usage: sudo docker run -v /usr/local:/target gcr.io/kubeadm/installer [install|help|uninstall]"
exit
;;
esac
fi
if ! [ -d "/target" ] ; then
echo "Please make sure to specify target install direcory, e.g.:"
echo
echo "> sudo docker run -v /usr/local:/target gcr.io/kubeadm/installer"
echo
echo "Don't give up!"
exit 1
fi
echo "Installing binaries for Kubernetes (git-${KUBERNETES_BUILD_VERSION}) and systemd configuration..."
echo
dir="/opt/kube-${KUBERNETES_BUILD_VERSION}"
install -v -m 755 -d "/target/bin"
install -v -m 755 -d "/target/lib/systemd/system"
for i in "${kube_bins[@]}" ; do
install -v -p -m 755 -t "/target/bin" "${dir}/${i}"
done
install -v -p -m 755 -t "/target/lib/systemd/system" "${dir}/kubelet.service"
echo
echo "Installing generic CNI plugins and configuration..."
echo
install -v -m 755 -d "/target/lib/cni/bin"
install -v -m 755 -d "/target/etc/cni/net.d"
for i in "${cni_bins[@]}"; do
install -v -p -m 755 -t "/target/lib/cni/bin" "${dir}/cni/${i}"
done
echo
echo "Binaries and configuration files had been installed, you can now start kubelet and run kubeadm."
echo
echo "> sudo systemctl daemon-reload && sudo systemctl enable kubelet && sudo systemctl start kubelet"
echo
echo "If this host is going to be the master, run:"
echo
echo "> sudo env KUBE_HYPERKUBE_IMAGE=gcr.io/kubeadm/hyperkube:latest KUBE_DISCOVERY_IMAGE=gcr.io/kubeadm/hyperkube:latest kubeadm init"
echo
echo "If it's going to be a node, run:"
echo
echo "> sudo kubeadm join --token=<...> <master-ip-address>"
echo
echo "Have fun, and enjoy!"
exit
|
errordeveloper/t8s
|
kubeadm-installer/installer.sh
|
Shell
|
apache-2.0
| 3,238 |
export MAVEN_OPT=-Xmx1024m
# update version of test integration modules
#---------------------------------------------
cd kaleido-integration-test
mvn versions:set -DnewVersion=0.9.1-SNAPSHOT
mvn -N versions:update-child-modules
git commit -m "release XXX prepare" .
# site
#---------------------------------------------
mvn clean site:site && mvn site:stage
# mvn site:deploy - does not work with maven-site 3 and svn scm
cp -rf target/staging/* ./site/
find ./site/ -name '*.html' -exec dos2unix {} \;
# releasing : https://docs.sonatype.org/display/Repository/Sonatype+OSS+Maven+Repository+Usage+Guide
# signing gpg : https://docs.sonatype.org/display/Repository/How+To+Generate+PGP+Signatures+With+Maven
# sonatype :
# nexus for maven central : https://oss.sonatype.org/index.html#welcome
# staging repo url : https://oss.sonatype.org/index.html#stagingRepositories
# usefull :
# http://www.coding-stories.com/2010/09/02/signer-les-jars-avec-maven/
# https://confluence.sakaiproject.org/display/REL/Maven+release+plugin+cheat+sheet
# http://www.sonatype.com/people/2010/01/how-to-generate-pgp-signatures-with-maven/
#---------------------------------------------
#mvn clean deploy -P kaleido-gpg,javaee5
mvn release:clean
mvn release:prepare
#mvn release:rollback
mvn release:perform -P kaleido-gpg
# git branch and tagging part
#---------------------------------------------
# git branch kaleido-0.9.1
# git push --all
# git tag -a kaleido-0.9.1 -m "maven central release"
# git push --tags
|
jraduget/kaleido-repository
|
kaleido-parent/script/deploy/release.sh
|
Shell
|
apache-2.0
| 1,512 |
#!/bin/sh
. perf_env.sh
KERN_CONF=GENERIC-NODEBUG
login_and_build() {
local prefix=`date +%Y%m%d%H%M%S`
local build_log_file=${prefix}_build.log
local install_log_file=${prefix}_install.log
local build_log_path=$tmp_dir/$build_log_file
local install_log_path=$tmp_dir/$install_log_file
ssh root@$build_machine_ip "cd $src_folder; svn up; make -j8 buildkernel KERNCONF=$KERN_CONF" |tee $build_log_path
## check build errors
build_proc_marker=`grep "Kernel build for" $build_log_path|wc -l|awk '{print $1}'`
if [ $build_proc_marker == 2 ]
then
log "build passed"
else
log "build error occurs! see $build_log_path"
return
fi
ssh root@$build_machine_ip -p $build_machine_port "cd $src_folder; make -j8 installkernel KERNCONF=$KERN_CONF"|tee $install_log_path
## check install errors
ssh root@$build_machine_ip -p $build_machine_port "cd /boot; tar zcvf $kernel_zip_file kernel"
scp -P $build_machine_port root@$build_machine_ip:/boot/$kernel_zip_file .
cp $kernel_zip_file $kernel_bak_folder/${prefix}${kernel_zip_file}
}
distribute_kernel_and_reboot() {
if [ -e $kernel_zip_file ]
then
scp -P $perf03_corp_port $kernel_zip_file root@$perf03_corp_ip:/boot/
ssh root@$perf03_corp_ip -p $perf03_corp_port "cd /boot; tar zxvf $kernel_zip_file"
scp -P $perf04_corp_port $kernel_zip_file root@$perf04_corp_ip:/boot/
ssh root@$perf04_corp_ip -p $perf04_corp_port "cd /boot; tar zxvf $kernel_zip_file"
reboot_perf03
reboot_perf04
if [ "$storage_remote_ip" != "$perf03_corp_ip" ] &&
[ "$storage_remote_ip" != "$perf04_corp_ip" ] &&
[ "$storage_remote_port" != "$perf03_corp_port" ] &&
[ "$storage_remote_port" != "$perf04_corp_port" ]
then
scp -P $storage_remote_port $kernel_zip_file root@$storage_remote_ip:/boot/
ssh root@$storage_remote_ip -p $storage_remote_port "cd /boot; tar zxvf $kernel_zip_file"
reboot_storage_server
fi
else
log "Cannot find $kernel_zip_file and cannot distribute it to test VMs"
fi
}
create_tmp_dir_ifnotexist
create_kernel_bak_ifnotexist
if [ -e ${kernel_zip_file} ]
then
rm ${kernel_zip_file}
fi
login_and_build
distribute_kernel_and_reboot
|
clovertrail/rocket
|
shell/performance/auto_build.sh
|
Shell
|
apache-2.0
| 2,209 |
#!/bin/bash
set -e # enforces the script to fail as soon as one command fails
mvn javadoc:javadoc > /dev/null
if [ "${TRAVIS_PULL_REQUEST}" = "false" ]; then
echo '$TRAVIS_PULL_REQUEST is false, running all tests'
openssl aes-256-cbc -K $encrypted_a973fe4f8e79_key -iv $encrypted_a973fe4f8e79_iv -in .config.properties.enc -out src/test/resources/.config.properties -d
mvn clean cobertura:cobertura-integration-test
else
echo '$TRAVIS_PULL_REQUEST is not false ($TRAVIS_PULL_REQUEST), running unit tests'
mvn clean test
fi
|
m2fd/java-sdk
|
.utility/travis.sh
|
Shell
|
apache-2.0
| 536 |
#!/usr/bin/env sh
echo "\033[31m 红色字 \033[0m"
echo "\033[34m 黄色字 \033[0m"
echo "\033[41;33m 红底黄字 \033[0m"
echo "\033[41;37m 红底白字 \033[0m"
#字颜色:30—–37
echo "\033[30m 30 黑色字 \033[0m"
echo "\033[31m 31 红色字 \033[0m"
echo "\033[32m 32 绿色字 \033[0m"
echo "\033[33m 33 黄色字 \033[0m"
echo "\033[34m 34 蓝色字 \033[0m"
echo "\033[35m 35 紫色字 \033[0m"
echo "\033[36m 36 青色字 \033[0m"
echo "\033[37m 37 白色字 \033[0m"
|
bin3/learnsh
|
color.sh
|
Shell
|
apache-2.0
| 488 |
pkg_origin=core
pkg_name=glib
pkg_version="2.50.3"
pkg_description="$(cat << EOF
GLib is a general-purpose utility library, which provides many useful data
types, macros, type conversions, string utilities, file utilities, a
mainloop abstraction, and so on. It works on many UNIX-like platforms, as
well as Windows and OS X.
EOF
)"
pkg_source="https://download.gnome.org/sources/${pkg_name}/${pkg_version%.*}/${pkg_name}-${pkg_version}.tar.xz"
pkg_license=('LGPL-2.0')
pkg_maintainer='The Habitat Maintainers <[email protected]>'
pkg_upstream_url="https://developer.gnome.org/glib/"
pkg_shasum="82ee94bf4c01459b6b00cb9db0545c2237921e3060c0b74cff13fbc020cfd999"
pkg_deps=(
lilian/coreutils
lilian/elfutils
core/glibc
lilian/libffi
lilian/libiconv
lilian/pcre
lilian/python
lilian/util-linux
lilian/zlib
)
pkg_build_deps=(
lilian/dbus
lilian/diffutils
lilian/file
lilian/gcc
lilian/gettext
lilian/libxslt
lilian/make
lilian/perl
lilian/pkg-config
)
pkg_bin_dirs=(bin)
pkg_lib_dirs=(lib)
pkg_include_dirs=(include)
pkg_pconfig_dirs=(lib/pkgconfig)
pkg_interpreters=(lilian/coreutils)
do_prepare() {
if [[ ! -r /usr/bin/file ]]; then
ln -sv "$(pkg_path_for file)/bin/file" /usr/bin/file
_clean_file=true
fi
}
do_build() {
./configure \
--prefix="$pkg_prefix" \
--with-libiconv \
--disable-gtk-doc \
--disable-fam
make -j $(nproc)
}
do_after() {
fix_interpreter "$pkg_prefix/bin/*" lilian/coreutils bin/env
}
do_end() {
if [[ -n "$_clean_file" ]]; then
rm -fv /usr/bin/file
fi
}
|
be-plans/be
|
glib/plan.sh
|
Shell
|
apache-2.0
| 1,564 |
#!/bin/bash
set -e
current_dir=$(pwd)
cd $(dirname $0)
old_version=$(git tag | tail -n1)
version=$1
npm test
update_version() { sed -i '' -E "s/(\"version\".*:.*\").*(\".*)/\1$version\2/g" $@; }
update_version src/index.js
npm install
#npm release
#TODO create js documentation
#/usr/local/share/npm/lib/node_modules/doxx/bin/doxx --source ./src/main --target ./docs
if [ "$version" = "" ]
then
echo "Version information not found. Type ./release.sh <version>"
echo "Previous version was $old_version"
else
echo "Creating version $version"
update_version package.json
echo -e "h3. $version \n\n$PLEASE EDIT:\n$(git log --pretty=format:%s $old_version^..)\n\n$(cat History.textile)" > History.textile
vim History.textile
git add -A .
git commit -m "Build for version $version"
git tag $version
git status
echo "Now type:"
echo "git push "
echo "git push --tags"
echo "npm publish"
echo "git co gh-pages && git merge master && git push && git co master"
fi
cd $current_dir
|
continuouscalendar/dateutils
|
release.sh
|
Shell
|
apache-2.0
| 990 |
#!/bin/bash
PGSHELL_CONFDIR="$1"
# Load the psql connection option parameters.
source $PGSHELL_CONFDIR/pgsql_funcs.conf
result=$(psql -A -t -X -h $PGHOST -p $PGPORT -U $PGROLE -d $PGDATABASE -c "select pg_is_in_recovery()::int" 2>&1)
echo "$result"
|
pg-monz/pg_monz
|
pg_monz/usr-local-bin/pgsql_standby.sh
|
Shell
|
apache-2.0
| 252 |
#!/bin/bash
export ANT_OPTS=-Xmx512m
export DBFLUTE_HOME=../mydbflute/dbflute-1.0.5A
export MY_PROJECT_NAME=exampledb
export MY_PROPERTIES_PATH=build.properties
|
taktos/dbflute-jodatime-example
|
dbflute_exampledb/_project.sh
|
Shell
|
apache-2.0
| 165 |
#!/bin/sh
TILE_NAME=sentimentr
TILE_FILE=`pwd`/*tile.yml
RELEASE_TARFILE=`pwd`/dev_releases/*/*.tgz
#BOSH_STEMCELL_FILE=`cat ${TILE_FILE} | grep "bosh-stemcell" | grep "^ *file:" | awk '{print $2}' `
#BOSH_STEMCELL_LOCATION=https://s3.amazonaws.com/bosh-jenkins-artifacts/bosh-stemcell/vsphere
mkdir -p tmp
cd tmp
mkdir -p metadata releases #stemcells
cp $TILE_FILE metadata
cp $RELEASE_TARFILE releases
#if [ ! -e "stemcells/$BOSH_STEMCELL_FILE" ]; then
# curl -k $BOSH_STEMCELL_LOCATION/$BOSH_STEMCELL_FILE -o stemcells/$BOSH_STEMCELL_FILE
#fi
zip -r $TILE_NAME.pivotal metadata releases #stemcells
mv $TILE_NAME.pivotal ..
cd ..
rm -rf tmp
|
dflick-pivotal/sentimentr-release
|
createTileWithDevRelease.sh
|
Shell
|
apache-2.0
| 646 |
#!/bin/bash
echo "Enabled=0 AND Disabled=1"
/usr/sbin/sysctl net.ipv6.conf.all.disable_ipv6
/usr/sbin/sysctl net.ipv6.conf.lo.disable_ipv6
|
zhuwbigdata/hadoop-admin-utils
|
preinstall-utils/3ipv6_check/checkIPv6.sh
|
Shell
|
apache-2.0
| 140 |
./configure
make
sudo make install
|
bayvictor/distributed-polling-system
|
bin/do.sh
|
Shell
|
apache-2.0
| 36 |
#!/bin/bash
# distp from Mapr to HDP (root user)
# Make adjustments to the protocol_prefix.sh file to control source and dest. filesystems.
# Get the SOURCE and TARGET protocol prefix's
if [ `whoami` != "root" ]; then
echo "Should be run as root, since this is the 'control' superuser between the two clusters"
exit -1
fi
# Change to the shells directory.
cd=`dirname $0`
if [ -f ../misc/protocol_prefix.sh ]; then
. ../misc/protocol_prefix.sh
else
echo "Couldn't find ../misc/protocol_prefix.sh. Needed to set cluster name information for transfers"
exit -1
fi
hadoop distcp -i -pugp -delete -update $SOURCE/user/root/validation/mapr $TARGET/user/root/validation/mapr
if [ -d mapr ]; then
rm -rf mapr
fi
hdfs dfs -get validation/mapr .
|
dstreev/mapr-migration
|
validation/get_Mapr_Results.sh
|
Shell
|
apache-2.0
| 764 |
#!/bin/sh -e
# compile.sh <source> <obj> <compiler> <flags>
`${NUMAKE} --enable-trace`
# Compile source to object file
mkdir -p $(dirname $2)
echo $3 -c $1 -o $2 $4 -MMD -MF $2.d
$3 -c $1 -o $2 $4 -MMD -MF $2.d
# Add headers dependency
headers=$(sed -e :a -e '/\\$/N; s/\\\n//; ta' "$2.d" | cut -d' ' -f3-)
${NUMAKE} --depend ${headers}
|
bullno1/xveearr
|
cpp.nu/compile.sh
|
Shell
|
bsd-2-clause
| 341 |
#!/bin/sh -x
rm -rf *.o *.so *~ tmp.app build *_wrap.* *.pyc ext_gridloop.py
|
sniemi/SamPy
|
sandbox/src1/TCSE3-3rd-examples/src/py/mixed/Grid2D/C++/convertptr/clean.sh
|
Shell
|
bsd-2-clause
| 79 |
#! /bin/bash
lxc_start()
{
lxc-info -n "$1" 2>&1 > /dev/null | grep RUNNING > /dev/null || {
echo_info "Starting LXC container ..."
lxc-start -n $LXC_NAME -d --logfile $LXC_LOG_FILE || {
echo_error "Could not start LXC container."
exit 1
}
}
echo_info "Waiting for server to start ..."
while [ 1 -eq 1 ]; do
lxc-attach -n $LXC_NAME -- ping google.com -c2 2>&1 > /dev/null | do_spin
if [ "z$?" == "z0" ]; then
echo_info "Network discovered ..."
sleep 5
break;
fi
sleep 5
done
}
lxc_install_mylin2()
{
lxc-attach -n $1 -- apt-get update 2>&1 | do_log;
lxc-attach -n $1 -- apt-get install -y git 2>&1 | do_log;
lxc-attach -n $1 -- git clone https://github.com/dragoscirjan/mylin2.git 2>&1 | do_log;
}
lxc_info()
{
lxc-info -n $1
lxc-attach -n $1 -- ifconfig
}
|
dragoscirjan/mylin2
|
sh/lxc/utils.sh
|
Shell
|
bsd-3-clause
| 799 |
#!/bin/bash
export EDITOR="emacs -nw"
export VISUAL="$EDITOR"
export PATH="$PATH:~/.emacs.d/bin"
|
ggazzi/linux-configs
|
dotfiles/emacs/.bashrc.d/10_emacs.sh
|
Shell
|
bsd-3-clause
| 98 |
#!/bin/bash
if [ $# -ne 1 ]; then
echo "Usage: $0 <petuum_ps_hostfile>"
echo ""
echo "Kills hung Matrix Factorization clients"
exit
fi
host_file=`readlink -f $1`
progname=rand_forest_main
ssh_options="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=quiet"
# Parse hostfile
unique_host_list=`cat $host_file | awk '{ print $2 }' | uniq`
# Kill instances
echo "Killing previous instances of '$progname' on servers, please wait..."
for ip in $unique_host_list; do
ssh $ssh_options $ip \
killall -q $progname
done
echo "All done!"
|
daiwei89/wdai_petuum_public
|
apps/rand_forest/scripts/kill_rand_forest.sh
|
Shell
|
bsd-3-clause
| 565 |
#!/bin/sh
echo ZGG: Testing COMPLEX16 Nonsymmetric Generalized Eigenvalue Problem routines
exec ./test_eigz < $CBLAPACKDIR/test/input/zgg.in
|
juanjosegarciaripoll/cblapack
|
test/test_zgg.sh
|
Shell
|
bsd-3-clause
| 141 |
#!/bin/sh
PIDS=$(pgrep java -lf | grep pedantic | cut -d" " -f1);
if [ -n "$PIDS" ]
then
echo "Killing $PIDS";
echo $PIDS | xargs kill;
else
echo "No running instances found";
fi
|
mixradio/mr-pedantic
|
scripts/bin/stop.sh
|
Shell
|
bsd-3-clause
| 187 |
#!/usr/bin/env bash
TESTFILE1=$(mktemp -p .)
if [ -x "$(command -v python3)" ]; then
PYTHON=$(command -v python3)
else
PYTHON=$(command -v python)
fi
${PYTHON} << END
import random as rnd
import time as time
rnd.seed(time.time())
randnum = rnd.sample(range(1,101), 18)
f1 = open("${TESTFILE1}", "w+")
for m in randnum:
for n in randnum:
line = str(m) + '_' + str(n) + '_' \
+ str(m) + '_' + str(n) + '\n'
f1.write(line)
f1.close()
END
UNARY_OP=1
for i in `cat ${TESTFILE1}`
do
M=`echo ${i} | awk -F"_" '{print $1}'`
N=`echo ${i} | awk -F"_" '{print $2}'`
LDI=`echo ${i} | awk -F"_" '{print $3}'`
LDO=`echo ${i} | awk -F"_" '{print $4}'`
echo ${M} ${N} 100 100
for BCAST_IN in 0 1 2 3
do
./eltwise_unary_simple ${UNARY_OP} ${BCAST_IN} 4 4 4 ${M} ${N} 100 100
done
done
rm ${TESTFILE1}
|
hfp/libxsmm
|
samples/eltwise/kernel_test/unary_copy_32b_gtld.sh
|
Shell
|
bsd-3-clause
| 847 |
#!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)/js"
cd $DIR
printf "Building protobuf file\n"
PROTO_INCLUDE=`realpath "$DIR/../../common_protos"`
PROTOS="$PROTO_INCLUDE"/*.proto
protoc --proto_path="$PROTO_INCLUDE" --js_out=library=vizProto,binary:"." $PROTOS
|
SCAII/SCAII
|
viz/gen_protos.sh
|
Shell
|
bsd-3-clause
| 291 |
#!/bin/bash
source src/benchmark.sh
ble/util/c2s.0() { :; }
table1=$' \x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F'
table1=$table1$'\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F'
table1=$table1$'\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F'
table1=$table1$'\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\x3E\x3F'
table1=$table1$'\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F'
table1=$table1$'\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F'
table1=$table1$'\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\x6C\x6D\x6E\x6F'
table1=$table1$'\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\x7C\x7D\x7E\x7F'
ble/util/c2s.1() {
if ((c<0x80)); then
ret=${table1:c:1}
else
ret=something
fi
}
for i in {0..127}; do
table2[i]=${table1:i:1}
done
ble/util/c2s.2() {
ret=${table2[c]}
if [[ ! $ret ]]; then
ret=something
fi
}
#------------------------------------------------------------------------------
set +f
input=($(od -vAn -tx1 /dev/urandom | head -200 | od -vAn -td1))
function tester.loop {
local ret
for c; do
"$c2s" "$c"
done
}
function tester {
local c2s=$1
tester.loop "${input[@]}"
}
# ble-measure 'tester ble/util/c2s.0'
# ble-measure 'tester ble/util/c2s.1'
# ble-measure 'tester ble/util/c2s.2'
# bash-5.0
# 68272.500 usec/eval: tester ble/util/c2s.1 (x2)
# 87022.500 usec/eval: tester ble/util/c2s.2 (x2)
# bash-4.2
# 12300000.000 usec/eval: tester ble/util/c2s.1 (x1)
# 12323000.000 usec/eval: tester ble/util/c2s.2 (x1)
# bash-4.0
# 12262000.000 usec/eval: tester ble/util/c2s.1 (x1)
# 12323000.000 usec/eval: tester ble/util/c2s.2 (x1)
#------------------------------------------------------------------------------
# どうも遅いのは c2s を辞書から引く操作ではなくて、大量の引数を抱えた
# 関数から子関数を呼び出す時の動作の様である。
# 次に引数を小分けにして呼び出す事を考えてみる。
# 結果を見ると Bash-5.0 ではまとめて呼び出す方が速いが、
# Bash-4.4 以下では分割した方が速い。
# 色々計測しても一定しないが大体 150-170 辺りが最小になるだろうか。
function tester.2/loop1 {
local N=$# i
for ((i=0;i+B<N;i+=B)); do
tester.loop "${@:i+1:B}"
done
((i<N)) && tester.loop "${@:i+1:N-i}"
}
function tester.2 {
local c2s=$1 B=${2:-100}
tester.2/loop1 "${input[@]}"
}
# ble-measure 'tester.2 ble/util/c2s.0 5'
# ble-measure 'tester.2 ble/util/c2s.0 10'
# ble-measure 'tester.2 ble/util/c2s.0 20'
# ble-measure 'tester.2 ble/util/c2s.0 50'
# ble-measure 'tester.2 ble/util/c2s.0 100'
# ble-measure 'tester.2 ble/util/c2s.0 200'
# ble-measure 'tester.2 ble/util/c2s.0 500'
# ble-measure 'tester.2 ble/util/c2s.0 1000'
# ble-measure 'tester.2 ble/util/c2s.0 2000'
# ble-measure 'tester.2 ble/util/c2s.0 5000'
# ble-measure 'tester.2 ble/util/c2s.0 10000'
# bash-4.0
# 21591000.000 usec/eval: tester.2 ble/util/c2s.0 2 (x1)
# 8790000.000 usec/eval: tester.2 ble/util/c2s.0 5 (x1)
# 4481000.000 usec/eval: tester.2 ble/util/c2s.0 10 (x1)
# 2276000.000 usec/eval: tester.2 ble/util/c2s.0 20 (x1)
# 982000.000 usec/eval: tester.2 ble/util/c2s.0 50 (x1)
# 603000.000 usec/eval: tester.2 ble/util/c2s.0 100 (x1)
# 513000.000 usec/eval: tester.2 ble/util/c2s.0 200 (x1)
# 747000.000 usec/eval: tester.2 ble/util/c2s.0 500 (x1)
# 1297000.000 usec/eval: tester.2 ble/util/c2s.0 1000 (x1)
# 2491000.000 usec/eval: tester.2 ble/util/c2s.0 2000 (x1)
# 6045000.000 usec/eval: tester.2 ble/util/c2s.0 5000 (x1)
# 12179000.000 usec/eval: tester.2 ble/util/c2s.0 10000 (x1)
# bash-5.0
# 4975379.200 usec/eval: tester.2 ble/util/c2s.0 5 (x1)
# 2594160.200 usec/eval: tester.2 ble/util/c2s.0 10 (x1)
# 1371359.200 usec/eval: tester.2 ble/util/c2s.0 20 (x1)
# 591166.200 usec/eval: tester.2 ble/util/c2s.0 50 (x1)
# 333587.200 usec/eval: tester.2 ble/util/c2s.0 100 (x1)
# 199348.200 usec/eval: tester.2 ble/util/c2s.0 200 (x1)
# 122687.200 usec/eval: tester.2 ble/util/c2s.0 500 (x1)
# 95477.700 usec/eval: tester.2 ble/util/c2s.0 1000 (x2)
# 82203.200 usec/eval: tester.2 ble/util/c2s.0 2000 (x2)
# 72446.200 usec/eval: tester.2 ble/util/c2s.0 5000 (x2)
# 68311.200 usec/eval: tester.2 ble/util/c2s.0 10000 (x2)
# bash-4.4
# 8066000.000 usec/eval: tester.2 ble/util/c2s.0 5 (x1)
# 4074000.000 usec/eval: tester.2 ble/util/c2s.0 10 (x1)
# 2070000.000 usec/eval: tester.2 ble/util/c2s.0 20 (x1)
# 904000.000 usec/eval: tester.2 ble/util/c2s.0 50 (x1)
# 528000.000 usec/eval: tester.2 ble/util/c2s.0 100 (x1)
# 438000.000 usec/eval: tester.2 ble/util/c2s.0 200 (x1)
# 705000.000 usec/eval: tester.2 ble/util/c2s.0 500 (x1)
# 1290000.000 usec/eval: tester.2 ble/util/c2s.0 1000 (x1)
# 2541000.000 usec/eval: tester.2 ble/util/c2s.0 2000 (x1)
# 6197000.000 usec/eval: tester.2 ble/util/c2s.0 5000 (x1)
# 12171000.000 usec/eval: tester.2 ble/util/c2s.0 10000 (x1)
# 428000.000 usec/eval: tester.2 ble/util/c2s.0 150 (x1)
# 435000.000 usec/eval: tester.2 ble/util/c2s.0 200 (x1)
# 463000.000 usec/eval: tester.2 ble/util/c2s.0 250 (x1)
# 494000.000 usec/eval: tester.2 ble/util/c2s.0 300 (x1)
# 544000.000 usec/eval: tester.2 ble/util/c2s.0 350 (x1)
# 586000.000 usec/eval: tester.2 ble/util/c2s.0 400 (x1)
# 改めて計測する→対して違いは見られない
# ble-measure 'tester.2 ble/util/c2s.0 160'
# ble-measure 'tester.2 ble/util/c2s.1 160'
# ble-measure 'tester.2 ble/util/c2s.2 160'
#------------------------------------------------------------------------------
# 実際に buff に値を格納して確かめてみる事にする
# 然し、それでも大した計算時間の違いはない。
function tester.3/loop2 {
local ret
for c; do
"$c2s" "$c"
buff[b++]=$ret
done
}
function tester.3/loop1 {
local N=$# i b=0
local -a buff=()
for ((i=0;i+B<N;i+=B)); do
tester.3/loop2 "${@:i+1:B}"
done
((i<N)) && tester.3/loop2 "${@:i+1:N-i}"
}
function tester.3 {
local c2s=$1 B=${2:-160}
tester.3/loop1 "${input[@]}"
}
# ble-measure 'tester.3 ble/util/c2s.0'
# ble-measure 'tester.3 ble/util/c2s.1'
# ble-measure 'tester.3 ble/util/c2s.2'
|
akinomyoga/ble.sh
|
memo/D1302.benchmark-c2s-cached.sh
|
Shell
|
bsd-3-clause
| 6,223 |
#!/bin/bash
echo "================================================"
echo " Ubuntu 16.04 (PHP 7.0)"
echo "================================================"
echo -n "[1/4] Starting MariaDB 10.0 ..... "
# make sure mysql can create socket and lock
mkdir /var/run/mysqld && chmod 777 /var/run/mysqld
# run mysql server
nohup mysqld > /root/mysql.log 2>&1 &
# wait for mysql to become available
while ! mysqladmin ping -hlocalhost >/dev/null 2>&1; do
sleep 1
done
# create database and user on mysql
mysql -u root >/dev/null << 'EOF'
CREATE DATABASE `php-crud-api` CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci;
CREATE USER 'php-crud-api'@'localhost' IDENTIFIED BY 'php-crud-api';
GRANT ALL PRIVILEGES ON `php-crud-api`.* TO 'php-crud-api'@'localhost' WITH GRANT OPTION;
FLUSH PRIVILEGES;
EOF
echo "done"
echo -n "[2/4] Starting PostgreSQL 9.5 ... "
# run postgres server
nohup su - -c "/usr/lib/postgresql/9.5/bin/postgres -D /etc/postgresql/9.5/main" postgres > /root/postgres.log 2>&1 &
# wait for postgres to become available
until su - -c "psql -U postgres -c '\q'" postgres >/dev/null 2>&1; do
sleep 1;
done
# create database and user on postgres
su - -c "psql -U postgres >/dev/null" postgres << 'EOF'
CREATE USER "php-crud-api" WITH PASSWORD 'php-crud-api';
CREATE DATABASE "php-crud-api";
GRANT ALL PRIVILEGES ON DATABASE "php-crud-api" to "php-crud-api";
\c "php-crud-api";
CREATE EXTENSION IF NOT EXISTS postgis;
\q
EOF
echo "done"
echo -n "[3/4] Starting SQLServer 2017 ... "
# run sqlserver server
nohup /opt/mssql/bin/sqlservr --accept-eula > /root/mssql.log 2>&1 &
# create database and user on postgres
/opt/mssql-tools/bin/sqlcmd -l 30 -S localhost -U SA -P sapwd123! >/dev/null << 'EOF'
CREATE DATABASE [php-crud-api]
GO
CREATE LOGIN [php-crud-api] WITH PASSWORD=N'php-crud-api', DEFAULT_DATABASE=[php-crud-api], CHECK_EXPIRATION=OFF, CHECK_POLICY=OFF
GO
USE [php-crud-api]
GO
CREATE USER [php-crud-api] FOR LOGIN [php-crud-api] WITH DEFAULT_SCHEMA=[dbo]
exec sp_addrolemember 'db_owner', 'php-crud-api';
GO
exit
EOF
echo "done"
echo -n "[4/4] Cloning PHP-CRUD-API v2 ... "
# install software
if [ -d /php-crud-api ]; then
echo "skipped"
else
git clone --quiet https://github.com/mevdschee/php-crud-api.git
echo "done"
fi
echo "------------------------------------------------"
# run the tests
cd php-crud-api
php test.php
|
mevdschee/mysql-crud-api
|
docker/ubuntu16/run.sh
|
Shell
|
mit
| 2,358 |
#!/bin/bash
SRC=$(realpath $(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)/..)
pushd $SRC &> /dev/null
IMAGE=${IMAGE:-chromedp/headless-shell:latest}
set -e
(set -x;
CGO_ENABLED=0 go test -c
)
(set -x;
docker run \
--rm \
--volume=$PWD:/chromedp \
--entrypoint=/chromedp/chromedp.test \
--workdir=/chromedp \
--env=PATH=/headless-shell \
--env=HEADLESS_SHELL=1 \
$IMAGE -test.v -test.parallel=1 -test.timeout=3m
)
popd &> /dev/null
|
chromedp/chromedp
|
contrib/docker-test.sh
|
Shell
|
mit
| 472 |
wget https://shapenet.cs.stanford.edu/ericyi/shapenetcore_partanno_segmentation_benchmark_v0.zip
unzip shapenetcore_partanno_segmentation_benchmark_v0.zip
|
fxia22/pointGAN
|
download.sh
|
Shell
|
mit
| 155 |
#! /usr/bin/env bash
./checksum < $1 | cut -f 3 -d : | paste -sd '+' | bc
|
Jexah/comp2129-assignment1
|
balance.sh
|
Shell
|
mit
| 76 |
#!/bin/bash
<% for(var index in toAdd) { %>
docker node update <%- toAdd[index].server %> \
--label-add <%- toAdd[index].label %>=<%- toAdd[index].value %>
<% } %>
<% for(var index in toRemove) { %>
docker node update <%- toRemove[index].server %> \
--label-rm <%- toRemove[index].label %>
<% } %>
|
arunoda/meteor-up
|
src/plugins/docker/assets/swarm-labels.sh
|
Shell
|
mit
| 319 |
#!/bin/bash
#
##########################################################################
#Red Hat Enterprise Linux 6 - DISA STIG Compliance Remediation Content
#Copyright (C) 2013
#Vincent C. Passaro ([email protected])
#
##########################################################################
#
###################### Buddha Labs LLC ################################
# By Vincent C. Passaro #
# Buddha Labs LLC. #
# vince[@]buddhalabs[.]com #
# www.buddhalabs.com #
###################### Buddha Labs LLC ################################
#_________________________________________________________________________
# Version | Change Information | Author | Date
#-------------------------------------------------------------------------
# 1.0 | Initial Script Creation | Vincent Passaro | 1-JUNE-2013
#
#
#######################DISA INFORMATION##################################
# Group ID (Vulid): RHEL-06-000241
# Group Title: SRG-OS-000242
#
# Rule ID: RHEL-06-000241_rule
# Severity: low
# Rule Version (STIG-ID): RHEL-06-000241
# Rule Title: The SSH daemon must not permit user environment settings.
#
# Vulnerability Discussion: SSH environment options potentially allow
# users to bypass access restriction in some configurations.
#
# Responsibility:
# IAControls:
#
# Check Content:
#
# To ensure users are not able to present environment daemons, run the
# following command:
# grep PermitUserEnvironment /etc/ssh/sshd_config
# If properly configured, output should be:
# PermitUserEnvironment no
# If it is not, this is a finding.
#
# Fix Text:
#
# To ensure users are not able to present environment options to the SSH
# daemon, add or correct the following line in "/etc/ssh/sshd_config":
# PermitUserEnvironment no
#######################DISA INFORMATION##################################
#
# Global Variables
PDI=RHEL-06-000241
#
#BEGIN_CHECK
. /usr/libexec/aqueduct/functions
PKG_CONFIG=/etc/ssh/sshd_config
if ! grep -q "^PermitUserEnvironment no" $PKG_CONFIG; then
#END_CHECK
#BEGIN_REMEDY
edit_file $PKG_CONFIG $PDI 'PermitUserEnvironment no' 'PermitUserEnvironment'
fi
#END_REMEDY
|
atomicturtle/t-stig
|
aqueduct-0.4/compliance/Bash/STIG/rhel-6/prod/RHEL-06-000241.sh
|
Shell
|
mit
| 2,443 |
#!/bin/sh
echo "Preparing to watch styles.scss"
sass --watch css/sass/styles.scss:css/styles.css --style expanded
|
edsloan/sassbase
|
watch.sh
|
Shell
|
mit
| 115 |
#!/usr/bin/env bash
#
# Run this script from root source directory and provide path to Geany-Plugins
# root source tree as the only argument.
#
SRCDIR=`pwd`
DSTDIR=$1
cp -rv $SRCDIR/geany-plugins/* "$DSTDIR"
cp -v $SRCDIR/overview/*.[ch] "$DSTDIR/overview/overview/" || exit $?
cp -v $SRCDIR/data/prefs.ui "$DSTDIR/overview/data/" || exit $?
|
FPGL/overview-plugin
|
gp-update.sh
|
Shell
|
gpl-2.0
| 344 |
# (c) 2014-2015 Sam Nazarko
# [email protected]
#!/bin/bash
. ../common.sh
pull_source "http://www.freescale.com/lgfiles/NMG/MAD/YOCTO/imx-lib-3.10.17-1.0.0.tar.gz" "$(pwd)/src"
if [ $? != 0 ]; then echo -e "Error downloading" && exit 1; fi
rm -rf src/imx-lib*/hdmi-cec
pull_bin "http://www.freescale.com/lgfiles/NMG/MAD/YOCTO/firmware-imx-3.10.17-1.0.0.bin" "$(pwd)/src/firmware-imx.bin"
if [ $? != 0 ]; then echo -e "Error downloading" && exit 1; fi
pull_bin "http://www.freescale.com/lgfiles/NMG/MAD/YOCTO/imx-vpu-3.10.17-1.0.0.bin" "$(pwd)/src/imx-vpu.bin"
if [ $? != 0 ]; then echo -e "Error downloading" && exit 1; fi
pull_bin "http://www.freescale.com/lgfiles/NMG/MAD/YOCTO/gpu-viv-g2d-3.10.17-1.0.0.bin" "$(pwd)/src/viv-g2d.bin"
pull_bin "http://www.freescale.com/lgfiles/NMG/MAD/YOCTO/libfslvpuwrap-1.0.46.bin" "$(pwd)/src/libfslvpuwrap.bin"
if [ $? != 0 ]; then echo -e "Error downloading" && exit 1; fi
pull_bin "http://www.freescale.com/lgfiles/NMG/MAD/YOCTO/gpu-viv-bin-mx6q-3.10.17-1.0.0-hfp.bin" "$(pwd)/src/gpu-viv.bin"
if [ $? != 0 ]; then echo -e "Error downloading" && exit 1; fi
# Build in native environment
build_in_env "${1}" $(pwd) "vero-userland-osmc"
build_return=$?
if [ $build_return == 99 ]
then
echo -e "Building package vero-userland-osmc"
out=$(pwd)/files
make clean
update_sources
handle_dep "libtool-bin"
handle_dep "autoconf"
handle_dep "automake"
mkdir -p ${out}/opt/vero/lib
mkdir -p files-dev/opt/vero/include
pushd src
install_patch "../patches" "all"
cp -ar headers /
cp -ar headers/include ../files-dev/opt/vero
rm -rf headers > /dev/null 2>&1
pushd imx-lib*
sed -i */Makefile -e s/-O2/-O3/
$BUILD PLATFORM=IMX6Q C_INCLUDE_PATH=/headers/include/ all
make install PLATFORM=IMX6Q DEST_DIR=${out}
if [ $? != 0 ]; then echo "Error occured during build" && exit 1; fi
popd
sh firmware-imx.bin --auto-accept
pushd firmware-imx*
rm -rf firmware/ar3k
rm -rf firmware/ath6k
rm firmware/LICENCE.atheros_firmware
rm firmware/README
rm firmware/Android.mk
mkdir -p ${out}/lib
cp -ar firmware ${out}/lib
popd
sh viv-g2d.bin --auto-accept
pushd gpu-viv-g2d*
cp -ar usr/include ../../files-dev/opt/vero
cp -ar usr/lib ${out}/opt/vero
popd
sh imx-vpu.bin --auto-accept
pushd imx-vpu*
sed -i */Makefile -e s/-O2/-O3/
$BUILD PLATFORM=IMX6Q
if [ $? != 0 ]; then echo "Error occured during build" && exit 1; fi
make install PLATFORM=IMX6Q DEST_DIR=${out} # of course, Freescale like to hop between DESTDIR and DEST_DIR
popd
sh libfslvpuwrap.bin --auto-accept
pushd libfslvpuwrap*
CFLAGS="-I../../files/usr/include -L../../files/usr/lib" ./autogen.sh --prefix=/opt/vero
$BUILD all
if [ $? != 0 ]; then echo "Error occured during build" && exit 1; fi
make install DESTDIR=${out}
popd
sh gpu-viv.bin --auto-accept
# Remove samples
rm -rf gpu-viv-bin-mx6q*/opt
# Remove conflicting libraries
pushd gpu-viv-bin-mx6q*
pushd usr/lib
rm libGAL.so libVIVANTE.so libEGL.so *-wl.so* *wayland* *-dfb.so* *-x11.so*
ln -s libEGL-fb.so libEGL.so
ln -s libGAL-fb.so libGAL.so
ln -s libVIVANTE-fb.so libVIVANTE.so
popd
cp -ar usr/include ../../files-dev/opt/vero
cp -ar usr/lib ${out}/opt/vero
popd
strip_libs
popd
cp -ar ${out}/usr/include files-dev/opt/vero/ # Remnants
cp -ar ${out}/usr/lib ${out}/opt/vero/
cp -ar ${out}/opt/vero/include files-dev/opt/vero
rm -rf ${out}/usr/lib >/dev/null 2>&1
rm -rf ${out}/usr/include >/dev/null 2>&1
rm -rf ${out}/opt/vero/include >/dev/null 2>&1
mkdir -p ${out}/etc/ld.so.conf.d
echo "/opt/vero/lib" > files/etc/ld.so.conf.d/vero.conf
rm -rf ${out}/opt/vero/share
dpkg_build files/ vero-userland-osmc.deb
dpkg_build files-dev vero-userland-dev-osmc.deb
rm -rf /headers
build_return=$?
fi
teardown_env "${1}"
exit $build_return
|
DBMandrake/osmc
|
package/vero-userland-osmc/build.sh
|
Shell
|
gpl-2.0
| 3,759 |
#!/usr/bin/env bash
set -e
# Runs the program with the virtualenv
source /home/jason/Env/letsencrypt/bin/activate
python3 Emailer.py
deactivate
|
JasonYao/Phi-Kappa-Sigma-Server
|
letsEncrypt/run.sh
|
Shell
|
gpl-2.0
| 146 |
#!/sbin/busybox sh
(
PROFILE=`cat /data/.siyah/.active.profile`;
. /data/.siyah/${PROFILE}.profile;
if [ "$cron_drop_cache" == "on" ]; then
MEM_ALL=`free | grep Mem | awk '{ print $2 }'`;
MEM_USED=`free | grep Mem | awk '{ print $3 }'`;
MEM_USED_CALC=$(($MEM_USED*100/$MEM_ALL));
# do clean cache only if cache uses 50% of free memory.
if [ "$MEM_USED_CALC" \> 50 ]; then
# wait till CPU is idle.
while [ ! `cat /proc/loadavg | cut -c1-4` \< "3.50" ]; do
echo "Waiting For CPU to cool down";
sleep 30;
done;
sync;
sysctl -w vm.drop_caches=3
sync;
sysctl -w vm.drop_caches=1
sync;
date +%H:%M-%D-%Z > /data/crontab/cron-clear-ram-cache;
echo "Cache above 50%! Cleaned RAM Cache" >> /data/crontab/cron-clear-ram-cache;
fi;
fi;
)&
|
jthatch12/STi
|
initramfs32/res/crontab/cron-scripts/drop-cache-only.sh
|
Shell
|
gpl-2.0
| 789 |
#!/bin/sh
#
# This procedure execute Oracle statspack snap and report.
#
# Example:
# sh -x ./chcsv.sh -i TEST2 -l . -u system/manager -f oratbs
# $STATCMD{'ORACLE'} = join ( "\n",
# '_pwd_/getorasql.sh -i RTD -l _odir_ -u perfstat/perfstat -f oratab',
# '_pwd_/getorasql.sh -i RTD -l _odir_ -u perfstat/perfstat -f orases -t 300 -c 36',
LANG=C;export LANG
COLUMNS=160;export COLUMNS
#resize -s 100 160
CMDNAME=`basename $0`
USAGE="Usage: $CMDNAME [-l dir] [-e errfile] [-i sid] [-u userid/passwd] [-f src] [-t interval] [-c cnt] [-x]"
# Set default param
CWD=`dirname $0`
DIR=.
SID=RTD
CNT=1
INTERVAL=10
USER=perfstat/perfstat
FILE=
ERR=/dev/null
CHECK_PROCESS=YES
SCRIPT="ora10g"
# Get command option
OPT=
while getopts l:e:i:u:f:c:t:d:x OPT
do
case $OPT in
x) CHECK_PROCESS="NO"
;;
l) DIR=$OPTARG
;;
e) ERR=$OPTARG
;;
i) SID=$OPTARG
;;
u) USER=$OPTARG
;;
f) FILE=$OPTARG
;;
c) CNT=$OPTARG
;;
t) INTERVAL=$OPTARG
;;
d) SCRIPT=$OPTARG
;;
\?) echo "$USAGE" 1>&2
exit 1
;;
esac
done
shift `expr $OPTIND - 1`
echo $SID
# Set current Date & Time
WORK="${CWD}/../_wk"
if [ ! -d ${WORK} ]; then
/bin/mkdir -p ${WORK}
if [ $? -ne 0 ]; then
echo "Command failed."
exit 1
fi
fi
# --------- Set Oracle env --------------
if [ ! -f ${CWD}/${SCRIPT}/oracle_env ]; then
echo "File not fount: ${CWD}/${SCRIPT}/oracle_env"
exit 1
fi
. ${CWD}/${SCRIPT}/oracle_env
# Check Oracle process
if [ "YES" = "${CHECK_PROCESS}" ]; then
ORACLE_SID=${SID}; export ORACLE_SID
ORAPROC=`perl ${CWD}/hastat.pl`
if [ 0 != $? ]; then
echo "exec error : CHECK_PROCESS"
exit 1
fi
if [ "${ORACLE_SID}" != "${ORAPROC}" ]; then
echo "ORACLE(${ORACLE_SID}) not found."
exit 1
fi
fi
SQLPLUS="${ORACLE_HOME}/bin/sqlplus"
ORASQL="${CWD}/${SCRIPT}/${FILE}.sql"
if [ ! -x ${SQLPLUS} ]; then
echo "File not fount: ${SQLPLUS}"
exit 1
fi
if [ ! -f "${ORASQL}" ]; then
echo "File not found: ${ORASQL}"
exit 1
fi
ORARES="${WORK}/${FILE}_${SID}.$$"
ORAFILE="${DIR}/${FILE}__${SID}.txt"
if [ -f ${ORAFILE} ]; then
/bin/rm -f $ORAFILE
fi
ORACNT=1
while test ${ORACNT} -le ${CNT}
do
# Sleep Interval
if [ ${ORACNT} -ne ${CNT} ]; then
sleep ${INTERVAL} &
fi
# Exec ps command.
/bin/date '+Date:%y/%m/%d %H:%M:%S' >> ${ORAFILE}
${SQLPLUS} -s ${USER} << EOF1 >> ${ERR} 2>&1
SET ECHO OFF
SET PAGESIZE 49999
SET HEADING ON
SET UNDERLINE OFF
SET LINESIZE 5000
SET FEEDBACK OFF
SET VERIFY OFF
SET TRIMSPOOL ON
SET COLSEP '|'
WHENEVER SQLERROR EXIT 1;
SPOOL ${ORARES}
@${ORASQL}
SPOOL OFF
EOF1
if [ 0 != $? ]; then
echo "ERROR[sqlplus] : ${ORASQL}"
/bin/rm -f ${WORK}/*.$$
exit 1
fi
cat ${ORAFILE} ${ORARES} >> chcsv_res.$$
mv chcsv_res.$$ ${ORAFILE}
wait
ORACNT=`expr ${ORACNT} + 1`
done
/bin/rm -f ${WORK}/*.$$
exit 0
|
getperf/getperf
|
lib/agent/Oracle/script/ora12c/chcsv.sh
|
Shell
|
gpl-2.0
| 3,087 |
#!/bin/bash
if [ -z $1 ]; then
DB_FILE=notes.db
else
DB_FILE=$1
fi
adb push $DB_FILE /data/data/com.github.simplenotes/databases
|
mattr-/SimpleNotes
|
SimpleNotes/scripts/push-db.sh
|
Shell
|
gpl-2.0
| 139 |
#!/bin/bash
DIR="Filtered";
if [[ ! -d "$DIR" ]]; then mkdir -v "$PWD/$DIR"; fi
for F in *.log ; do
awk '/Sub/{x = $NF - 2; y = $NF - 1; theta = $NF; print "x: " x "\ny: " y "\ntheta: " theta "\n---"}' $F | tr -cd '\11\12\15\40-\176' | sed 's/\[0m//g; $d' > $DIR/$F;
done
|
em-er-es/rollo
|
Logs/filter-logs.sh
|
Shell
|
gpl-2.0
| 273 |
#!/bin/sh
TEST_PURPOSE=regress
TEST_TYPE=umlXhost
TESTNAME=x509-pluto-06
XHOST_LIST="NIC NORTH EAST"
REF_CONSOLE_FIXUPS="kern-list-fixups.sed nocr.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS east-prompt-splitline.pl"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS script-only.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS cutout.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS klips-debug-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS ipsec-setup-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS ipsec-lwdnsq-sanitize.pl"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS pluto-whack-sanitize.sed"
THREEEIGHT=true
NORTH_PLAY=../../klips/inputs/08-pole-sunrise-ping.pcap
REF_EAST_OUTPUT=east-cleartext.txt
EASTHOST=east
EAST_INIT_SCRIPT=eastinit.sh
EAST_FINAL_SCRIPT=final.sh
REF_EAST_CONSOLE_OUTPUT=east-console.txt
REF26_EAST_CONSOLE_OUTPUT=east-console.txt
NORTHHOST=north
NORTH_INIT_SCRIPT=northinit.sh
NORTH_RUN_SCRIPT=northrun.sh
NORTH_FINAL_SCRIPT=final.sh
REF_NORTH_CONSOLE_OUTPUT=north-console.txt
REF26_NORTH_CONSOLE_OUTPUT=north-console.txt
NICHOST=nic
NETJIG_EXTRA=../x509-pluto-01/debugpublic.txt
|
ZHAW-INES/rioxo-uClinux-dist
|
openswan/testing/pluto/x509-pluto-06/testparams.sh
|
Shell
|
gpl-2.0
| 1,112 |
#! /bin/sh
$EXTRACTRC `find . -name '*.ui' -or -name '*.kcfg'` >> rc.cpp || exit 11
$XGETTEXT *.cpp -o $podir/libtemplateparser.pot
rm -f rc.cpp
|
chusopr/kdepim-ktimetracker-akonadi
|
templateparser/Messages.sh
|
Shell
|
gpl-2.0
| 145 |
#!/bin/bash
mkdir tmp_install
cd tmp_install
if [ ! -f "./mdk3-v6/mdk3" ]
then
echo
echo "Downloading mdk3..."
#wget http://homepages.tu-darmstadt.de/~p_larbig/wlan/mdk3-v6.tar.bz2
wget http://www.fruitywifi.com/downloads/mdk3-v6.tar.bz2
echo
echo "Extracting mdk3..."
bunzip2 mdk3-v6.tar.bz2
tar xvf mdk3-v6.tar
echo
echo "Compiling mdk3"
cd mdk3-v6/
make
cp mdk3 /usr/bin/
cd ../
fi
chmod 755 ./mdk3-v6/mdk3
cp ./mdk3-v6/mdk3 /usr/bin/
cd ..
#echo
#echo "Installing libssl-dev..."
#apt-get -y install libssl-dev
#echo
#echo "Downloading aircrack-ng..."
#wget http://download.aircrack-ng.org/aircrack-ng-1.2-beta1.tar.gz
#echo
#echo "Extracting aircrack-ng..."
#tar -zxvf aircrack-ng-1.2-beta1.tar.gz
#echo
#echo "Compiling aircrack-ng..."
#cd aircrack-ng-1.2-beta1
#make
#cd ../
echo
echo "..DONE.."
|
jesux/FruityWifi
|
FruityWifi/www/modules/mdk3/includes/install.sh
|
Shell
|
gpl-3.0
| 869 |
#!/bin/sh
# Copyright 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# start jb-mr2-dev
# 700272 = JSR67
# 703372 = JSR71
# 704765 = JSR72
# 708191 = JSR74
# 711747 = JSR78
# 713896 = JSR78B
# 719009 = JSR82
# 725949 = JSR88
# 728843 = JSS01
# 730471 = JSS02B
# 740015 = JSS11F
# 741000 = JSS11I
# 741250 = JSS15
# 746990 = JSS15H
# 748502 = JSS15I
# 748593 = JSS15J
# 750418 = JSS15K
# end jb-mr2-dev
BRANCH=klp-dev
if test $BRANCH = klp-dev
then
ZIP=razorg-ota-937116
BUILD=kot49h
fi # klp-dev
ROOTDEVICE=deb
DEVICE=deb
MANUFACTURER=asus
for COMPANY in asus broadcom qcom
do
echo Processing files from $COMPANY
rm -rf tmp
FILEDIR=tmp/vendor/$COMPANY/$DEVICE/proprietary
mkdir -p $FILEDIR
mkdir -p tmp/vendor/$MANUFACTURER/$ROOTDEVICE
case $COMPANY in
asus)
TO_EXTRACT="\
system/etc/apns-conf.xml \
system/lib/libacdbdata.so \
system/lib/libAKM.so \
"
;;
broadcom)
TO_EXTRACT="\
system/vendor/firmware/bcm2079x-b5_firmware.ncd \
system/vendor/firmware/bcm2079x-b5_pre_firmware.ncd \
"
;;
qcom)
TO_EXTRACT="\
system/bin/ATFWD-daemon \
system/bin/bridgemgrd \
system/bin/btnvtool \
system/bin/diag_klog \
system/bin/diag_mdlog \
system/bin/ds_fmc_appd \
system/bin/efsks \
system/bin/hci_qcomm_init \
system/bin/irsc_util \
system/bin/ks \
system/bin/mm-qcamera-app \
system/bin/mm-qcamera-daemon \
system/bin/mm-qjpeg-enc-test \
system/bin/mm-qomx-ienc-test \
system/bin/mpdecision \
system/bin/netmgrd \
system/bin/nl_listener \
system/bin/port-bridge \
system/bin/qcks \
system/bin/qmuxd \
system/bin/qseecomd \
system/bin/radish \
system/bin/rmt_storage \
system/bin/sensors.qcom \
system/bin/thermald \
system/bin/usbhub \
system/bin/usbhub_init \
system/etc/firmware/vidc_1080p.fw \
system/etc/firmware/vidc.b00 \
system/etc/firmware/vidc.b01 \
system/etc/firmware/vidc.b02 \
system/etc/firmware/vidc.b03 \
system/etc/firmware/vidcfw.elf \
system/etc/firmware/vidc.mdt \
system/etc/gps.conf \
system/vendor/lib/egl/eglsubAndroid.so \
system/vendor/lib/egl/libEGL_adreno.so \
system/vendor/lib/egl/libGLESv1_CM_adreno.so \
system/vendor/lib/egl/libGLESv2_adreno.so \
system/vendor/lib/egl/libplayback_adreno.so \
system/vendor/lib/egl/libq3dtools_adreno.so \
system/lib/hw/flp.msm8960.so \
system/lib/hw/gps.msm8960.so \
system/lib/hw/sensors.msm8960.so \
system/lib/libacdbloader.so \
system/vendor/lib/libadreno_utils.so \
system/lib/libaudcal.so \
system/lib/libaudioalsa.so \
system/vendor/lib/libC2D2.so \
system/vendor/lib/libc2d30-a3xx.so \
system/vendor/lib/libc2d30.so \
system/vendor/lib/libCB.so \
system/lib/libchromatix_ov5693_common.so \
system/lib/libchromatix_ov5693_default_video.so \
system/lib/libchromatix_ov5693_preview.so \
system/lib/libCommandSvc.so \
system/lib/libconfigdb.so \
system/lib/libcsd-client.so \
system/lib/libdiag.so \
system/lib/libdrmdiag.so \
system/lib/libdrmfs.so \
system/lib/libdrmtime.so \
system/lib/libdsi_netctrl.so \
system/lib/libdsprofile.so \
system/lib/libdss.so \
system/lib/libdsucsd.so \
system/lib/libdsutils.so \
system/lib/libDxHdcp.so \
system/lib/libgps.utils.so \
system/vendor/lib/libgsl.so \
system/lib/libI420colorconvert.so \
system/lib/libidl.so \
system/vendor/lib/libllvm-qcom.so \
system/lib/libloc_core.so \
system/lib/libloc_eng.so \
system/lib/libmm-abl.so \
system/lib/libmmcamera2_stats_algorithm.so \
system/lib/libmmcamera_image_stab.so \
system/lib/libmmcamera_mi1040.so \
system/lib/libmmcamera_ov5693.so \
system/lib/libmm-color-convertor.so \
system/lib/libnetmgr.so \
system/lib/liboemcrypto.so \
system/lib/libqcci_legacy.so \
system/lib/libqdi.so \
system/lib/libqdp.so \
system/lib/libqmi_cci.so \
system/lib/libqmi_client_qmux.so \
system/lib/libqmi_common_so.so \
system/lib/libqmi_csi.so \
system/lib/libqmi_csvt_srvc.so \
system/lib/libqmi_encdec.so \
system/lib/libqmiservices.so \
system/lib/libqmi.so \
system/lib/libQSEEComAPI.so \
system/lib/libril-qc-qmi-1.so \
system/lib/libril-qcril-hook-oem.so \
system/vendor/lib/librs_adreno_sha1.so \
system/vendor/lib/librs_adreno.so \
system/vendor/lib/libRSDriver_adreno.so \
system/vendor/lib/libsc-a3xx.so \
system/lib/libsensor1.so \
system/lib/libsensor_reg.so \
system/lib/libsensor_user_cal.so \
system/lib/libstagefright_hdcp.so \
system/lib/libxml.so \
system/vendor/firmware/a300_pfp.fw \
system/vendor/firmware/a300_pm4.fw \
system/vendor/firmware/discretix/dxhdcp2.b00 \
system/vendor/firmware/discretix/dxhdcp2.b01 \
system/vendor/firmware/discretix/dxhdcp2.b02 \
system/vendor/firmware/discretix/dxhdcp2.b03 \
system/vendor/firmware/discretix/dxhdcp2.mdt \
system/vendor/firmware/dsps.b00 \
system/vendor/firmware/dsps.b01 \
system/vendor/firmware/dsps.b02 \
system/vendor/firmware/dsps.b03 \
system/vendor/firmware/dsps.b04 \
system/vendor/firmware/dsps.b05 \
system/vendor/firmware/dsps.mdt \
system/vendor/firmware/gss.b00 \
system/vendor/firmware/gss.b01 \
system/vendor/firmware/gss.b02 \
system/vendor/firmware/gss.b03 \
system/vendor/firmware/gss.b04 \
system/vendor/firmware/gss.b05 \
system/vendor/firmware/gss.b06 \
system/vendor/firmware/gss.b07 \
system/vendor/firmware/gss.b08 \
system/vendor/firmware/gss.b09 \
system/vendor/firmware/gss.b10 \
system/vendor/firmware/gss.b11 \
system/vendor/firmware/gss.mdt \
system/vendor/firmware/keymaster/keymaster.b00 \
system/vendor/firmware/keymaster/keymaster.b01 \
system/vendor/firmware/keymaster/keymaster.b02 \
system/vendor/firmware/keymaster/keymaster.b03 \
system/vendor/firmware/keymaster/keymaster.mdt \
system/vendor/firmware/q6.b00 \
system/vendor/firmware/q6.b01 \
system/vendor/firmware/q6.b03 \
system/vendor/firmware/q6.b04 \
system/vendor/firmware/q6.b05 \
system/vendor/firmware/q6.b06 \
system/vendor/firmware/q6.mdt \
system/vendor/firmware/tzapps.b00 \
system/vendor/firmware/tzapps.b01 \
system/vendor/firmware/tzapps.b02 \
system/vendor/firmware/tzapps.b03 \
system/vendor/firmware/tzapps.mdt \
system/vendor/firmware/wcnss.b00 \
system/vendor/firmware/wcnss.b01 \
system/vendor/firmware/wcnss.b02 \
system/vendor/firmware/wcnss.b04 \
system/vendor/firmware/wcnss.b05 \
system/vendor/firmware/wcnss.mdt \
system/vendor/lib/libdrmdecrypt.so \
system/vendor/lib/libgemini.so \
system/vendor/lib/libgeofence.so \
system/vendor/lib/libimage-jpeg-enc-omx-comp.so \
system/vendor/lib/libimage-omx-common.so \
system/vendor/lib/libizat_core.so \
system/vendor/lib/libloc_api_v02.so \
system/vendor/lib/libloc_ds_api.so \
system/vendor/lib/libmmcamera2_c2d_module.so \
system/vendor/lib/libmmcamera2_cpp_module.so \
system/vendor/lib/libmmcamera2_iface_modules.so \
system/vendor/lib/libmmcamera2_imglib_modules.so \
system/vendor/lib/libmmcamera2_isp_modules.so \
system/vendor/lib/libmmcamera2_pproc_modules.so \
system/vendor/lib/libmmcamera2_sensor_modules.so \
system/vendor/lib/libmmcamera2_stats_modules.so \
system/vendor/lib/libmmcamera2_vpe_module.so \
system/vendor/lib/libmmcamera2_wnr_module.so \
system/vendor/lib/libmmcamera_faceproc.so \
system/vendor/lib/libmmcamera_imglib.so \
system/vendor/lib/libmmipl.so \
system/vendor/lib/libmmjpeg.so \
system/vendor/lib/libmmqjpeg_codec.so \
system/vendor/lib/libmmstillomx.so \
system/vendor/lib/liboemcamera.so \
system/vendor/lib/libqomx_jpegenc.so \
"
;;
esac
echo \ \ Extracting files from OTA package
for ONE_FILE in $TO_EXTRACT
do
echo \ \ \ \ Extracting $ONE_FILE
unzip -j -o $ZIP $ONE_FILE -d $FILEDIR > /dev/null || echo \ \ \ \ Error extracting $ONE_FILE
if test $ONE_FILE = system/vendor/bin/gpsd -o $ONE_FILE = system/vendor/bin/pvrsrvinit -o $ONE_FILE = system/bin/fRom
then
chmod a+x $FILEDIR/$(basename $ONE_FILE) || echo \ \ \ \ Error chmoding $ONE_FILE
fi
done
echo \ \ Setting up $COMPANY-specific makefiles
cp -R $COMPANY/staging/* tmp/vendor/$COMPANY/$DEVICE || echo \ \ \ \ Error copying makefiles
echo \ \ Setting up shared makefiles
cp -R root/* tmp/vendor/$MANUFACTURER/$ROOTDEVICE || echo \ \ \ \ Error copying makefiles
echo \ \ Generating self-extracting script
SCRIPT=extract-$COMPANY-$DEVICE.sh
cat PROLOGUE > tmp/$SCRIPT || echo \ \ \ \ Error generating script
cat $COMPANY/COPYRIGHT >> tmp/$SCRIPT || echo \ \ \ \ Error generating script
cat PART1 >> tmp/$SCRIPT || echo \ \ \ \ Error generating script
cat $COMPANY/LICENSE >> tmp/$SCRIPT || echo \ \ \ \ Error generating script
cat PART2 >> tmp/$SCRIPT || echo \ \ \ \ Error generating script
echo tail -n +$(expr 2 + $(cat PROLOGUE $COMPANY/COPYRIGHT PART1 $COMPANY/LICENSE PART2 PART3 | wc -l)) \$0 \| tar zxv >> tmp/$SCRIPT || echo \ \ \ \ Error generating script
cat PART3 >> tmp/$SCRIPT || echo \ \ \ \ Error generating script
(cd tmp ; tar zc --owner=root --group=root vendor/ >> $SCRIPT || echo \ \ \ \ Error generating embedded tgz)
chmod a+x tmp/$SCRIPT || echo \ \ \ \ Error generating script
ARCHIVE=$COMPANY-$DEVICE-$BUILD-$(md5sum < tmp/$SCRIPT | cut -b -8 | tr -d \\n).tgz
rm -f $ARCHIVE
echo \ \ Generating final archive
(cd tmp ; tar --owner=root --group=root -z -c -f ../$ARCHIVE $SCRIPT || echo \ \ \ \ Error archiving script)
rm -rf tmp
done
|
MTK6580/walkie-talkie
|
ALPS.L1.MP6.V2_HEXING6580_WE_L/alps/device/asus/deb/self-extractors/generate-packages.sh
|
Shell
|
gpl-3.0
| 11,872 |
##Script to get the Allele frequency in the 1000 genome project, using the file /scratch/inmegen/100g/references/1000g-phase_3-allele-frequency/1000GENOMES-phase_3.vcf.gz previously indexed by Tabix.
##The script take a list of cordinates variants in tab format: "chr corrd rsdbSNP"
##tabix program have to be imported to de $PATH
#1000G project file with AF
GP="/scratch/inmegen/100g/references/1000g-phase_3-allele-frequency/1000GENOMES-phase_3.vcf.gz"
#Definging the variables for input -i and output -o files to use with getopts
usage() { echo "Usage: $0 -i [List of variants with tabular format: "chr corrdenate rsdbSNP"] -o [output file]" 1>&2; exit 1; }
while getopts ":i:o:" op; do
case "${op}" in
i)
i=${OPTARG}
;;
o)
out=${OPTARG}
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [ -z "${i}" ] || [ -z "${out}" ]; then
usage
fi
echo "input = ${i}"
echo "output wiil be writen into: ${out}"
#Getting the coordinates with tabix
IFS='
'
printf "chr\tpos\trs\tAMR\tEAS\tSAS\tEUR\tAFR\n" > $out
for q in $(cat $i)
do
chr=$(echo "$q" | cut -f 1)
pos=$(echo "$q" | cut -f 2)
rs=$(echo "$q" | cut -f 3)
t=$(tabix $GP ${chr}:${pos}-${pos})
EAS=$(echo $t | awk 'BEGIN{FS="EAS_AF="} {print $2}' | cut -d ";" -f 1)
AFR=$(echo $t | awk 'BEGIN{FS="AFR_AF="} {print $2}' | cut -d ";" -f 1)
EUR=$(echo $t | awk 'BEGIN{FS="EUR_AF="} {print $2}' | cut -d ";" -f 1)
SAS=$(echo $t | awk 'BEGIN{FS="SAS_AF="} {print $2}' | cut -d ";" -f 1)
AMR=$(echo $t | awk 'BEGIN{FS="AMR_AF="} {print $2}' | cut -d ";" -f 1)
printf "$chr\t$pos\t$rs\t$AMR\t$EAS\t$SAS\t$EUR\t$AFR\n"
done >> $out
|
rgarcia-herrera/100g
|
scripts/gwas-catalog-analysis/AF-for-traits-in1000G.sh
|
Shell
|
gpl-3.0
| 1,665 |
#!/bin/bash
#This bash script checks to see if a file by the name "MON.txt" exists. If not, it creates the file.
#Example: If Sep.txt exists, nothing happens. If Sep.txt does not exist, then an empty file viz., Sep.txt
#is created
file=$(date +"%b").txt
if [ -e "$file" ]; then
echo " "
else
touch /home/dnaneet/Desktop/$(date +"%b").txt
fi
#touch $(date +"%b-%d-%y").csv
|
dnaneet/ELC
|
tracking/alpha/fcreate2.sh
|
Shell
|
gpl-3.0
| 386 |
#!/bin/sh
find "$1" -not -type l -exec codesign --verify --strict -v '{}' ';' 2>&1 | \
sed -E '/^.*: bundle format unrecognized, invalid, or unsuitable$/d;
/^.*: code object is not signed at all$/d;
s/^(.*): .*$/\1/' | uniq
|
dmarmor/epichrome
|
scripts/find_codesign.sh
|
Shell
|
gpl-3.0
| 229 |
#!/bin/bash
if [ $# -ne 2 ]
then
echo "Usage: `basename $0` <numDocuments> <cluedata=1,tweets=2,emails=3,ymusic=4,gnews=5,wiki=6,disease=7>"
exit 3
fi
############################################################
# Environment Variables Set
############################################################
if [ -z ${HADOOP_HOME} ] || [ -z ${JAVA_VERSION} ]
then
echo "ERROR: either HADOOP_HOME or JAVA_VERSION is not set."
exit 0
fi
############################################################
numdocs=$1
benchmark=$2
xmlconf=../../conf/partitioning/conf.xml
partjar=../../target/partitioning.jar
run_hadoop=${HADOOP_HOME}/bin/hadoop
############################################################
# Run Preprocessing
############################################################
cd ../preprocess
if [[ $benchmark -ne 0 && $numdocs -ne 0 ]]
then
if [ $benchmark -eq 1 ] ##Clueweb
then
bash runc.sh $numdocs
elif [ $benchmark -eq 2 ] ##Twitter
then
bash runt.sh $numdocs
elif [ $benchmark -eq 3 ] ## emails
then
bash rune.sh $numdocs
elif [ $benchmark -eq 4 ] ## ymusic
then
bash runym.sh $numdocs
elif [ $benchmark -eq 5 ] ## gnew
then
bash rung.sh $numdocs
elif [ $benchmark -eq 6 ] ## wiki
then
bash runw.sh $numdocs
else
bash rund.sh $numdocs ##disease
fi
fi
############################################################
# Run Partitioning
############################################################
cd ../partition
$run_hadoop jar $partjar -conf $xmlconf
|
ucsb-similarity/pss
|
build/partition/run.sh
|
Shell
|
apache-2.0
| 2,007 |
#!/bin/bash
set -o errexit
set -o pipefail
if ! which golint &>/dev/null; then
echo "Unable to detect 'golint' package"
echo "To install it, run: 'go get github.com/golang/lint/golint'"
exit 1
fi
GO_VERSION=($(go version))
echo "Detected go version: $(go version)"
if [[ ${GO_VERSION[2]} != "go1.2" && ${GO_VERSION[2]} != "go1.3.1" && ${GO_VERSION[2]} != "go1.3.3" ]]; then
echo "Unknown go version, skipping golint."
exit 0
fi
OS_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${OS_ROOT}/hack/common.sh"
cd "${OS_ROOT}"
arg="${1:-""}"
bad_files=""
if [ "$arg" == "-m" ]; then
head=$(git rev-parse --short HEAD | xargs echo -n)
bad_files=$(git diff-tree --no-commit-id --name-only -r master..$head | \
grep "^pkg" | grep ".go$" | grep -v "bindata.go$" | grep -v "Godeps" | \
grep -v "third_party" | xargs golint)
else
find_files() {
find . -not \( \
\( \
-wholename './Godeps' \
-o -wholename './release' \
-o -wholename './target' \
-o -wholename './test' \
-o -wholename './pkg/assets/bindata.go' \
-o -wholename '*/Godeps/*' \
-o -wholename '*/third_party/*' \
-o -wholename '*/_output/*' \
\) -prune \
\) -name '*.go' | sort -u | sed 's/^.{2}//' | xargs -n1 printf "${GOPATH}/src/${OS_GO_PACKAGE}/%s\n"
}
bad_files=$(find_files | xargs -n1 golint)
fi
if [[ -n "${bad_files}" ]]; then
echo "golint detected following problems:"
echo "${bad_files}"
exit 1
fi
|
mrunalp/origin
|
hack/verify-golint.sh
|
Shell
|
apache-2.0
| 1,485 |
#!/bin/bash
cat > nginx-deployment.yaml << EOF
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 3
selector:
matchLabels:
app: nginx
env: prod
template:
metadata:
name: nginx
labels:
app: nginx
env: prod
rev: "${BUILD_COMMIT}"
spec:
containers:
- name: nginx
image: ${DOCKER_REPO}:${IMAGE_TAG}
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
readinessProbe:
httpGet:
path: /
port: 443
scheme: HTTPS
initialDelaySeconds: 5
timeoutSeconds: 1
livenessProbe:
httpGet:
path: /
port: 443
scheme: HTTPS
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: nginx-config
mountPath: /etc/config
- name: nginx-nfs-pvc
mountPath: /srv/
volumes:
- name: nginx-config
configMap:
name: nginx-config
- name: nginx-nfs-pvc
persistentVolumeClaim:
claimName: nginx-nfs-pvc
EOF
|
metral/nginx
|
k8s/prod/deployment/create-nginx-deployment.yaml.sh
|
Shell
|
apache-2.0
| 1,348 |
#! /bin/bash
for i in `seq 1 5`;
do
DIR=$EQ_WORKING_DIR/zookeeper$i/
# if [ ! -d $DIR ];
# then
# mkdir $DIR
# echo $i > $DIR/myid
# fi
# CFG=$EQ_WORKING_DIR/quorumconf/$i/zoo.cfg
# TMPCFG=$CFG.tmp
# echo "CFG:" $CFG
# echo "TMPCFG:" $TMPCFG
# sed "s#dataDir=#dataDir=$DIR#" $CFG > $TMPCFG
# mv $TMPCFG $CFG
EQ_MODE_DIRECT=1 EQ_ENV_ENTITY_ID=zksrv$i EQ_NO_INITIATION=1 SERVER_JVMFLAGS="-javaagent:$AGENT_CP=script:$EQ_MATERIALS_DIR/server.btm" ZOO_LOG_DIR=$DIR/logs/$i/ $EQ_MATERIALS_DIR/zookeeper/bin/zkServer.sh --config $EQ_WORKING_DIR/quorumconf/$i start
done
|
AkihiroSuda/earthquake
|
example/not-so-much-useful/zk.byteman.combination-of-operation/materials/1-9/quorumStart.sh
|
Shell
|
apache-2.0
| 603 |
# UBOOTWPDA config http://www.twpda.com/2013/08/uboot-code.html
# NOTE: This file's length should under 988 bytes
# bootargs(old): root=8:1 means sda1, sda2 8:2, sdb1 8:17, sdb2 8:18
# bootargs(new): root=/dev/sda2
# bootcmd: 0:1 first part, 0:2 2nd part
# usbcfg: 0:internal, 1:external
# boot settings for sh4twbox install disk
# * bootargs: root=8:1 means sda1, sda2 8:2, sdb1 8:17, sdb2 8:18
# * bootcmd: 0:1 first usb parition, 0:2 2nd usb parition
setenv bootargs 'console=ttyAS0,115200 rootdelay=0 rw rootflags=data=journal nwhwconf=device:eth0,hwaddr:10:08:E2:12:06:BD phyaddr:0,watchdog:5000 mem=256M bigphysarea=2048'
#setenv bootcmd 'usbcfg 0; usb start; usb info; usb part; ext2load usb 0:1 80000000 vmlinux.ub; bootm 80000000'
setenv bootcmd 'fatload usb 0:1 80000000 initrd.ub; bootm 80000000'
|
dlintw/sh4twbox-multiboot
|
example/boot/1_recovery/uboot.sh
|
Shell
|
apache-2.0
| 811 |
#!/bin/bash
#
# BASH OPTIONS
#
set -e # Exit when an error occurs
set -u # Exit when undefined variable
#set -x # Enable bash trace
#
# SCRIPT GLOBAL VARIABLES
#
NUM_FILES=10
#
# HELPER METHODS
#
get_args() {
output_path=$1
sleep_time=$2
}
check_args() {
echo "Received arguments:"
echo " - Output path: ${output_path}"
echo " - Sleep time: ${sleep_time}"
# Check output path
if [ ! -d "${output_path}" ]; then
echo "ERROR: Invalid output path ${output_path}"
exit 1
fi
# Check sleep time
if ! [[ "${sleep_time}" =~ ^[0-9]+$ ]]; then
echo "ERROR: Sleep time is not an integer"
exit 2
fi
}
write_files() {
for (( i=0; i<NUM_FILES; i++ )); do
# Write file
file_name=$(mktemp -p "${output_path}")
echo "WRITING FILE: ${file_name}"
cat > "${file_name}" << EOT
Test ${i}
EOT
# Sleep between generated files
sleep "${sleep_time}s"
done
}
#
# MAIN METHOD
#
main() {
# Retrive arguments
get_args "$@"
# Check arguments
check_args
# Write files
write_files
}
#
# ENTRY POINT
#
main "$@"
|
mF2C/COMPSs
|
tests/sources/python/9_stream_non_native/src/scripts/write_files.sh
|
Shell
|
apache-2.0
| 1,077 |
#!/bin/sh
#-- @@@ START COPYRIGHT @@@
#--
#-- Licensed to the Apache Software Foundation (ASF) under one
#-- or more contributor license agreements. See the NOTICE file
#-- distributed with this work for additional information
#-- regarding copyright ownership. The ASF licenses this file
#-- to you under the Apache License, Version 2.0 (the
#-- "License"); you may not use this file except in compliance
#-- with the License. You may obtain a copy of the License at
#--
#-- http://www.apache.org/licenses/LICENSE-2.0
#--
#-- Unless required by applicable law or agreed to in writing,
#-- software distributed under the License is distributed on an
#-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#-- KIND, either express or implied. See the License for the
#-- specific language governing permissions and limitations
#-- under the License.
#--
#-- @@@ END COPYRIGHT @@@
# process the parser through bison to get a list of shift/reduce and reduce/reduce conflicts
# in file sqlparser.output. Remove the directory name from the output.
topdir=$1
bisondir=${TOOLSDIR}/bison_3_linux/share/bison
bisonexedir=${TOOLSDIR}/bison_3_linux/bin
parserdir=$topdir/parser
toolsdir=$topdir/regress/tools
# m4 is a utility needed by bison
export M4=$bisonexedir/m4
export BISON_PKGDATADIR=$bisondir
$bisonexedir/bison -v $parserdir/sqlparser.y 2>&1 | sed -r 's/.+sqlparser\.y/sqlparser.y/' >LOGTOK;
# extract a list of conflicts from the sqlparser.output file
awk '/State [0-9]+ conflicts:/ { printf "%06d ", $2; print } ' sqlparser.output | grep State | sed -r 's/ State [0-9]+//' >LOGTOK_conflicts
# extract a list of parser states (state number and first descriptive line) from the parser output file
awk '/^State 0$/,/untilthelastline/ { print }' sqlparser.output | awk '/^State [0-9]+$/ { printf "%06d ", $2; getline; getline; print }' >LOGTOK_gramm
# join the two extracted files on the state number (first 6 digits)
join LOGTOK_conflicts LOGTOK_gramm >LOGTOK_join
# replace state numbers with nnnn, so unrelated parser changes don't cause this test to fail
echo " " >>LOGTOK
cat LOGTOK_join | sed -r 's/^[0-9]+ conflicts/nnnn conflicts/' | sed -r 's/reduce [0-9]+/reduce nnnn/' >>LOGTOK
# delete some of the larger output files produced (uncomment for debugging)
rm sqlparser.output sqlparser.tab.c;
|
apache/incubator-trafodion
|
core/sql/regress/compGeneral/TESTTOK.sh
|
Shell
|
apache-2.0
| 2,319 |
gcc -o aes_pthread aes_pthread.c -std=c99 -lpthread -lm
./aes_pthread -n 100 -t 16 -b 10
|
tancheng/iot-kernels-locus
|
testbench/decpt_dtw_aes/pthread/pthread_compile.sh
|
Shell
|
bsd-3-clause
| 89 |
#!/bin/sh
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
gen_build_yaml_dirs=" \
src/boringssl \
src/proto \
src/zlib \
test/core/bad_client \
test/core/bad_ssl \
test/core/end2end"
gen_build_files=""
for gen_build_yaml in $gen_build_yaml_dirs
do
output_file=`mktemp /tmp/genXXXXXX`
$gen_build_yaml/gen_build_yaml.py > $output_file
gen_build_files="$gen_build_files $output_file"
done
|
ananthonline/grpc
|
tools/buildgen/generate_build_additions.sh
|
Shell
|
bsd-3-clause
| 1,920 |
#!/bin/bash
rm -f arch/arm/boot/dts/*.dtb
rm -f arch/arm/boot/dt.img
rm -f cwm_flash_zip/boot.img
make -j10 zImage
make -j10 dtimage
make -j10 modules
rm -rf squid_install
mkdir -p squid_install
make -j10 modules_install INSTALL_MOD_PATH=squid_install INSTALL_MOD_STRIP=1
mkdir -p cwm_flash_zip/system/lib/modules/pronto
find squid_install/ -name '*.ko' -type f -exec cp '{}' cwm_flash_zip/system/lib/modules/ \;
mv cwm_flash_zip/system/lib/modules/wlan.ko cwm_flash_zip/system/lib/modules/pronto/pronto_wlan.ko
cp arch/arm/boot/zImage cwm_flash_zip/tools/
cp arch/arm/boot/dt.img cwm_flash_zip/tools/
rm -f arch/arm/boot/squid_kernel.zip
cd cwm_flash_zip
zip -r ../arch/arm/boot/squid_kernel.zip ./
|
boulzordev/android_kernel_motorola_msm8916
|
build_cwm_zip.sh
|
Shell
|
gpl-2.0
| 700 |
#!/bin/bash
#
# Copyright (C) 2014, 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <[email protected]>
# This script runs the entire kickstart_tests suite. It is an interface
# between "make check" (which is why it takes environment variables instead
# of arguments) and livemedia-creator. Each test consists of a kickstart
# file that specifies most everything about the installation, and a shell
# script that does validation and specifies kernel boot parameters. lmc
# then fires up a VM and watches for tracebacks or stuck installs.
#
# A boot ISO is required, which should be specified with TEST_BOOT_ISO=.
#
# The number of jobs corresponds to the number of VMs that will be started
# simultaneously. Each one wants about 2 GB of memory. The default is
# two simultaneous jobs, but you can control this with TEST_JOBS=. It is
# suggested you not run out of memory.
#
# You can control what logs are held onto after the test is complete via the
# KEEPIT= variable, explained below. By default, nothing is kept.
#
# Finally, you can run tests across multiple computers at the same time by
# putting all the hostnames into TEST_REMOTES= as a space separated list.
# Do not add localhost manually, as it will always be added for you. You
# must create a user named kstest on each remote system, allow that user to
# sudo to root for purposes of running livemedia-creator, and have ssh keys
# set up so that the user running this script can login to the remote systems
# as kstest without a password. TEST_JOBS= applies on a per-system basis.
# KEEPIT= controls how much will be kept on the master system (where "make
# check" is run). All results will be removed from the slave systems.
# The boot.iso location can come from one of two different places:
# (1) $TEST_BOOT_ISO, if this script is being called from "make check"
# (2) The command line, if this script is being called directly.
IMAGE=""
if [[ "${TEST_BOOT_ISO}" != "" ]]; then
IMAGE=${TEST_BOOT_ISO}
elif [[ $# != 0 ]]; then
IMAGE=$1
shift
fi
if [[ ! -e "${IMAGE}" ]]; then
echo "Required boot.iso does not exist; skipping."
exit 77
fi
# Possible values for this parameter:
# 0 - Keep nothing (the default)
# 1 - Keep log files
# 2 - Keep log files and disk images (will take up a lot of space)
KEEPIT=${KEEPIT:-0}
# This is for environment variables that parallel needs to pass to
# remote systems. Put anything here that test cases care about or
# they won't work when run on some systems.
#
# NOTE: You will also need to add these to the list in /etc/sudoers
# if you are using env_reset there, or they will not get passed from
# this script to parallel.
env_args="--env TEST_OSTREE_REPO --env TEST_NFS_SERVER --env TEST_NFS_PATH --env TEST_ADDON_NFS_REPO --env TEST_ADDON_HTTP_REPO"
# Round up all the kickstart tests we want to run, skipping those that are not
# executable as well as this file itself.
find kickstart_tests -name '*sh' -a -perm -o+x -a \! -wholename 'kickstart_tests/run_*.sh' | \
if [[ "$TEST_REMOTES" != "" ]]; then
_IMAGE=kickstart_tests/$(basename ${IMAGE})
# (1) Copy everything to the remote systems. We do this ourselves because
# parallel doesn't like globs, and we need to put the boot image somewhere
# that qemu on the remote systems can read.
for remote in ${TEST_REMOTES}; do
scp -r kickstart_tests kstest@${remote}:
scp ${IMAGE} kstest@${remote}:kickstart_tests/
done
# (1a) We also need to copy the provided image to under kickstart_tests/ on
# the local system too. This is because parallel will attempt to run the
# same command line on every system and that requires the image to also be
# in the same location.
cp ${IMAGE} ${_IMAGE}
# (2) Run parallel. We always add the local system to the list of machines
# being passed to parallel. Don't add it yourself.
remote_args="--sshlogin :"
for remote in ${TEST_REMOTES}; do
remote_args="${remote_args} --sshlogin kstest@${remote}"
done
parallel --no-notice ${remote_args} \
${env_args} --jobs ${TEST_JOBS:-2} \
sudo kickstart_tests/run_one_ks.sh -i ${_IMAGE} -k ${KEEPIT} {}
rc=$?
# (3) Get all the results back from the remote systems, which will have already
# applied the KEEPIT setting. However if KEEPIT is 0 (meaning, don't save
# anything) there's no point in trying. We do this ourselves because, again,
# parallel doesn't like globs.
#
# We also need to clean up the stuff we copied over in step 1, and then clean up
# the results from the remotes too. We don't want to keep things scattered all
# over the place.
for remote in ${TEST_REMOTES}; do
if [[ ${KEEPIT} > 0 ]]; then
scp -r kstest@${remote}:/var/tmp/kstest-\* /var/tmp/
fi
ssh kstest@${remote} sudo rm -rf kickstart_tests /var/tmp/kstest-\*
done
# (3a) And then also remove the copy of the image we made earlier.
rm ${_IMAGE}
# (4) Exit the subshell defined by "find ... | " way up at the top. The exit
# code will be caught outside and converted into the overall exit code.
exit ${rc}
else
parallel --no-notice ${env_args} --jobs ${TEST_JOBS:-2} \
sudo kickstart_tests/run_one_ks.sh -i ${IMAGE} -k ${KEEPIT} {}
# For future expansion - any cleanup code can go in between the variable
# setting and the exit, like in the other branch of the if-else above.
rc=$?
exit ${rc}
fi
# Catch the exit code of the subshell and return it. This is structured for
# future expansion, too. Any extra global cleanup code can go in between the
# variable setting and the exit.
rc=$?
exit ${rc}
|
vpodzime/anaconda
|
tests/kickstart_tests/run_kickstart_tests.sh
|
Shell
|
gpl-2.0
| 6,635 |
#!/bin/bash
. `dirname $0`/functions.sh
check_one() {
cnt=0
log=$1
echo -n . >> quick3.log
text="$2"
shift 2
while [ $# -gt 0 ]; do
grep -q "^$text .*$1" $log || exit 40
cnt=$((++cnt))
shift
done
[ `grep "^$text " $log | wc -l` = $cnt ] || exit 41
}
check_log() {
log=$1
echo -n "Checking $1 " >> quick3.log
check_one $log 'Checking executable' $CHECKE
check_one $log 'Checking shared library' $CHECKL
check_one $log 'Assuming prelinked' $ASSUME
check_one $log 'Prelinking' $PREL
echo >> quick3.log
}
PRELINK=`echo $PRELINK \
| sed -e 's, \./\(prelink\.\(cache\|conf\)\), quick3.tree/etc/\1,g' \
-e 's,path=\.,path=quick3.tree/lib:quick3.tree/usr/lib,' \
-e 's,linker=\./,linker=quick3.tree/lib/,'`
CCLINK=`echo $CCLINK \
| sed -e 's,linker=\./,linker=quick3.tree/lib/,'`
rm -rf quick3.tree
rm -f quick3.log
mkdir -p quick3.tree/{lib,etc,usr/lib,usr/bin}
$CC -shared -O2 -fpic -o quick3.tree/usr/lib/lib1.so $srcdir/reloc1lib1.c
$CC -shared -O2 -fpic -o quick3.tree/usr/lib/lib2.so $srcdir/reloc1lib2.c \
-L quick3.tree/usr/lib -l1 -Wl,-soname,lib2.so
for lib in `cat syslib.list`; do
cp -p $lib.orig quick3.tree/lib/$lib
cp -p $lib.orig quick3.tree/lib/$lib.orig
done
for lib in `cat syslnk.list`; do
cp -dp $lib quick3.tree/lib
done
$CCLINK -o quick3.tree/usr/bin/bin1 $srcdir/reloc1.c \
-Wl,--rpath-link,quick3.tree/usr/lib -L quick3.tree/usr/lib -l2 -lc -l1
cat > quick3.tree/etc/prelink.conf <<EOF
quick3.tree/usr/bin
quick3.tree/lib
quick3.tree/usr/lib
EOF
LIBS="quick3.tree/usr/lib/lib1.so quick3.tree/usr/lib/lib2.so"
LIBS="$LIBS `sed 's|^|quick3.tree/lib/|' syslib.list`"
BINS="quick3.tree/usr/bin/bin1"
savelibs
chmod 644 `ls $BINS | sed 's|$|.orig|'`
# Make sure prelinked binaries and libraries will have different ctimes
# than mtimes
sleep 3s
echo $PRELINK ${PRELINK_OPTS--vm} -avvvvv > quick3.log
$PRELINK ${PRELINK_OPTS--vm} -avvvvv > quick3.tree/etc/log1 2>&1 || exit 1
cat quick3.tree/etc/log1 >> quick3.log
echo $PRELINK ${PRELINK_OPTS--vm} -aqvvvvv >> quick3.log
$PRELINK ${PRELINK_OPTS--vm} -aqvvvvv > quick3.tree/etc/log2 2>&1 || exit 2
cat quick3.tree/etc/log2 >> quick3.log
$CC -shared -O2 -fpic -o quick3.tree/usr/lib/lib2.so.0 $srcdir/reloc1lib2.c \
-L quick3.tree/usr/lib -l1 -Wl,-soname,lib2.so
rm -f quick3.tree/usr/lib/lib2.so{,.orig}
cp -p quick3.tree/usr/lib/lib2.so.0{,.orig}
ln -sf lib2.so.0 quick3.tree/usr/lib/lib2.so
sleep 3s
echo $PRELINK ${PRELINK_OPTS--vm} -aqvvvvv >> quick3.log
$PRELINK ${PRELINK_OPTS--vm} -aqvvvvv > quick3.tree/etc/log3 2>&1 || exit 3
cat quick3.tree/etc/log3 >> quick3.log
LD_LIBRARY_PATH=quick3.tree/lib:quick3.tree/usr/lib quick3.tree/usr/bin/bin1 || exit 4
LIBS="quick3.tree/usr/lib/lib1.so quick3.tree/usr/lib/lib2.so.0"
echo $PRELINK ${PRELINK_OPTS--vm} -aqvvvvv >> quick3.log
$PRELINK ${PRELINK_OPTS--vm} -aqvvvvv > quick3.tree/etc/log4 2>&1 || exit 5
cat quick3.tree/etc/log4 >> quick3.log
comparelibs >> quick3.log 2>&1 || exit 6
[ -L quick3.tree/usr/lib/lib2.so ] || exit 7
L=quick3.tree/usr/lib/lib
L1=${L}1.so; L2=${L}2.so; L3=${L}2.so.0
B1=quick3.tree/usr/bin/bin1
SL=`grep -f syslib.list quick3.tree/etc/log1 \
| sed -n '/^Prelinking/s|^.*\(quick3.tree/lib/\)|\1|p'`
CHECKE="$B1"; CHECKL="$SL $L1 $L2" PREL="$CHECKE $CHECKL"; ASSUME=""
check_log quick3.tree/etc/log1
CHECKE=""; CHECKL=""; PREL=""; ASSUME="$B1 $L1 $L2 $SL"
check_log quick3.tree/etc/log2
CHECKE="$B1"; CHECKL="$SL $L1 $L3"; PREL="$B1 $L3"; ASSUME=""
check_log quick3.tree/etc/log3
CHECKE=""; CHECKL=""; PREL=""; ASSUME="$B1 $L1 $L3 $SL"
check_log quick3.tree/etc/log4
for i in $B1 $SL $L1 $L3; do
cp -p $i $i.prelinked
done
for i in $B1; do
chmod 644 $i.prelinked
done
echo $PRELINK -uavvvvvv >> quick3.log
$PRELINK -uavvvvvv >> quick3.log 2>&1 || exit 31
for i in $B1 $SL $L1 $L3; do
cmp -s $i.orig $i || exit 32
mv -f $i.prelinked $i
done
chmod 755 $BINS
exit 0
|
leeduhem/prelink
|
testsuite/quick3.sh
|
Shell
|
gpl-2.0
| 3,897 |
#
# Copyright (C) 2010 OpenWrt.org
#
PART_NAME=firmware
REQUIRE_IMAGE_METADATA=1
RAMFS_COPY_BIN='fw_printenv fw_setenv'
RAMFS_COPY_DATA='/etc/fw_env.config /var/lock/fw_printenv.lock'
platform_check_image() {
return 0
}
platform_do_upgrade() {
local board=$(board_name)
case "$board" in
alfa-network,quad-e4g)
[ "$(fw_printenv -n dual_image 2>/dev/null)" = "1" ] &&\
[ -n "$(find_mtd_part backup)" ] && {
PART_NAME=backup
if [ "$(fw_printenv -n bootactive 2>/dev/null)" = "1" ]; then
fw_setenv bootactive 2 || exit 1
else
fw_setenv bootactive 1 || exit 1
fi
}
;;
ampedwireless,ally-00x19k|\
ampedwireless,ally-r1900k)
if [ "$(fw_printenv --lock / -n bootImage 2>/dev/null)" != "0" ]; then
fw_setenv --lock / bootImage 0 || exit 1
fi
;;
mikrotik,routerboard-750gr3|\
mikrotik,routerboard-760igs|\
mikrotik,routerboard-m11g|\
mikrotik,routerboard-m33g)
[ "$(rootfs_type)" = "tmpfs" ] && mtd erase firmware
;;
asus,rt-ac65p|\
asus,rt-ac85p)
echo "Backing up firmware"
dd if=/dev/mtd4 bs=1024 count=4096 > /tmp/backup_firmware.bin
dd if=/dev/mtd5 bs=1024 count=52224 >> /tmp/backup_firmware.bin
mtd -e firmware2 write /tmp/backup_firmware.bin firmware2
;;
esac
case "$board" in
ampedwireless,ally-00x19k|\
ampedwireless,ally-r1900k|\
asus,rt-ac65p|\
asus,rt-ac85p|\
dlink,dir-1960-a1|\
dlink,dir-2640-a1|\
dlink,dir-2660-a1|\
dlink,dir-853-a3|\
hiwifi,hc5962|\
jcg,q20|\
linksys,e5600|\
linksys,ea7300-v1|\
linksys,ea7300-v2|\
linksys,ea7500-v2|\
linksys,ea8100-v1|\
linksys,ea8100-v2|\
netgear,r6220|\
netgear,r6260|\
netgear,r6350|\
netgear,r6700-v2|\
netgear,r6800|\
netgear,r6850|\
netgear,wac104|\
netgear,wac124|\
netis,wf2881|\
sercomm,na502|\
xiaomi,mi-router-3g|\
xiaomi,mi-router-3-pro|\
xiaomi,mi-router-4|\
xiaomi,mi-router-ac2100|\
xiaomi,redmi-router-ac2100)
nand_do_upgrade "$1"
;;
iodata,wn-ax1167gr2|\
iodata,wn-ax2033gr|\
iodata,wn-dx1167r)
iodata_mstc_upgrade_prepare "0xfe75"
nand_do_upgrade "$1"
;;
iodata,wn-dx1200gr)
iodata_mstc_upgrade_prepare "0x1fe75"
nand_do_upgrade "$1"
;;
ubnt,edgerouter-x|\
ubnt,edgerouter-x-sfp)
platform_upgrade_ubnt_erx "$1"
;;
zyxel,nr7101)
fw_setenv CheckBypass 0
fw_setenv Image1Stable 0
CI_KERNPART="Kernel"
nand_do_upgrade "$1"
;;
zyxel,wap6805)
local kernel2_mtd="$(find_mtd_part Kernel2)"
[ "$(hexdump -n 4 -e '"%x"' $kernel2_mtd)" = "56190527" ] &&\
[ "$(hexdump -n 4 -s 104 -e '"%x"' $kernel2_mtd)" != "0" ] &&\
dd bs=4 count=1 seek=26 conv=notrunc if=/dev/zero of=$kernel2_mtd 2>/dev/null &&\
echo "Kernel2 sequence number was reset to 0"
CI_KERNPART="Kernel"
nand_do_upgrade "$1"
;;
*)
default_do_upgrade "$1"
;;
esac
}
|
teslamint/openwrt
|
target/linux/ramips/mt7621/base-files/lib/upgrade/platform.sh
|
Shell
|
gpl-2.0
| 2,741 |
AppVersionStrMajor=`./Version.sh | awk -F " " '{print $1}'`
AppVersion=`./Version.sh | awk -F " " '{print $2}'`
AppVersionStr=`./Version.sh | awk -F " " '{print $3}'`
DevChannel=`./Version.sh | awk -F " " '{print $4}'`
DevPostfix=''
DevParam=''
if [ "$DevChannel" != "0" ]; then
DevPostfix='.dev'
DevParam='-dev'
fi
if [ -d "./../Linux/Release/deploy/$AppVersionStrMajor/$AppVersionStr.dev" ]; then
echo "Deploy folder for version $AppVersionStr.dev already exists!"
exit 1
fi
if [ -d "./../Linux/Release/deploy/$AppVersionStrMajor/$AppVersionStr" ]; then
echo "Deploy folder for version $AppVersionStr already exists!"
exit 1
fi
if [ -f "./../Linux/Release/tlinuxupd$AppVersion" ]; then
echo "Update file for version $AppVersion already exists!"
exit 1
fi
if [ ! -f "./../Linux/Release/Telegram" ]; then
echo "Telegram not found!"
exit 1
fi
if [ ! -f "./../Linux/Release/Updater" ]; then
echo "Updater not found!"
exit 1
fi
echo "Preparing version $AppVersionStr$DevPostfix, executing Packer.."
cd ./../Linux/Release && ./Packer -path Telegram -path Updater -version $AppVersion $DevParam && cd ./../../Telegram
echo "Packer done!"
if [ ! -d "./../Linux/Release/deploy" ]; then
mkdir "./../Linux/Release/deploy"
fi
if [ ! -d "./../Linux/Release/deploy/$AppVersionStrMajor" ]; then
mkdir "./../Linux/Release/deploy/$AppVersionStrMajor"
fi
echo "Copying Telegram, Updater and tlinuxupd$AppVersion to deploy/$AppVersionStr$DevPostfix..";
mkdir "./../Linux/Release/deploy/$AppVersionStrMajor/$AppVersionStr$DevPostfix"
mkdir "./../Linux/Release/deploy/$AppVersionStrMajor/$AppVersionStr$DevPostfix/Telegram"
mv ./../Linux/Release/Telegram ./../Linux/Release/deploy/$AppVersionStrMajor/$AppVersionStr$DevPostfix/Telegram/
mv ./../Linux/Release/Updater ./../Linux/Release/deploy/$AppVersionStrMajor/$AppVersionStr$DevPostfix/Telegram/
mv ./../Linux/Release/tlinuxupd$AppVersion ./../Linux/Release/deploy/$AppVersionStrMajor/$AppVersionStr$DevPostfix/
cd ./../Linux/Release/deploy/$AppVersionStrMajor/$AppVersionStr$DevPostfix && tar -cJvf tsetup.$AppVersionStr$DevPostfix.tar.xz Telegram/ && cd ./../../../../../Telegram
echo "Version $AppVersionStr$DevPostfix prepared!";
|
Icenowy/tdesktop-vanilla
|
Telegram/PrepareLinux.sh
|
Shell
|
gpl-3.0
| 2,206 |
#!/usr/bin/env bash
set -o errexit
VERSION=1.27.0
DOWNLOAD=https://github.com/coreos/rkt/releases/download/v${VERSION}/rkt-v${VERSION}.tar.gz
function install_rkt() {
if [[ -e /usr/local/bin/rkt ]] ; then
if [ "rkt Version: ${VERSION}" == "$(rkt version | head -n1)" ] ; then
return
fi
fi
wget -q -O /tmp/rkt.tar.gz "${DOWNLOAD}"
tar -C /tmp -xvf /tmp/rkt.tar.gz
mv /tmp/rkt-v${VERSION}/rkt /usr/local/bin
mv /tmp/rkt-v${VERSION}/*.aci /usr/local/bin
}
function configure_rkt_networking() {
if [[ -e /etc/rkt/net.d/99-network.conf ]] ; then
return
fi
mkdir -p /etc/rkt/net.d
cat <<EOT > /etc/rkt/net.d/99-network.conf
{
"name": "default",
"type": "ptp",
"ipMasq": false,
"ipam": {
"type": "host-local",
"subnet": "172.16.28.0/24",
"routes": [
{
"dst": "0.0.0.0/0"
}
]
}
}
EOT
}
install_rkt
configure_rkt_networking
|
Ashald/nomad
|
scripts/vagrant-linux-priv-rkt.sh
|
Shell
|
mpl-2.0
| 889 |
#!/usr/bin/env bash
set -e
###############################################################################
#
# edx-all-tests.sh
#
# Execute all tests for edx-platform.
#
# This script can be called from a Jenkins
# multiconfiguration job that defines these environment
# variables:
#
# `TEST_SUITE` defines which kind of test to run.
# Possible values are:
#
# - "quality": Run the quality (pep8/pylint) checks
# - "unit": Run the JavaScript and Python unit tests
# (also tests building the Sphinx documentation,
# because we couldn't think of a better place to put it)
# - "lms-acceptance": Run the acceptance (Selenium) tests for the LMS
# - "cms-acceptance": Run the acceptance (Selenium) tests for Studio
# - "bok-choy": Run acceptance tests that use the bok-choy framework
#
# `SHARD` is a number (1, 2, or 3) indicating which subset of the tests
# to build. Currently, "lms-acceptance" and "bok-choy" each have two
# shards (1 and 2), "cms-acceptance" has three shards (1, 2, and 3),
# and all the other test suites have one shard.
#
# For the "bok-choy", the tests are put into shard groups using the nose
# 'attr' decorator (e.g. "@attr('shard_1')"). Currently, anything with
# the 'shard_1' attribute will run in the first shard. All other bok-choy
# tests will run in shard 2.
#
# For the lettuce acceptance tests, ("lms-" and "cms-acceptance") they
# are decorated with "@shard_{}" (e.g. @shard_1 for the first shard).
# The lettuce tests must have a shard specified to be run in jenkins,
# as there is no shard that runs unspecified tests.
#
#
# Jenkins configuration:
#
# - The edx-platform git repository is checked out by the Jenkins git plugin.
#
# - Jenkins logs in as user "jenkins"
#
# - The Jenkins file system root is "/home/jenkins"
#
# - An init script creates a virtualenv at "/home/jenkins/edx-venv"
# with some requirements pre-installed (such as scipy)
#
# Jenkins worker setup:
# See the edx/configuration repo for Jenkins worker provisioning scripts.
# The provisioning scripts install requirements that this script depends on!
#
###############################################################################
# Clean up previous builds
git clean -qxfd
source scripts/jenkins-common.sh
# Violations thresholds for failing the build
PYLINT_THRESHOLD=6400
# If the environment variable 'SHARD' is not set, default to 'all'.
# This could happen if you are trying to use this script from
# jenkins and do not define 'SHARD' in your multi-config project.
# Note that you will still need to pass a value for 'TEST_SUITE'
# or else no tests will be executed.
SHARD=${SHARD:="all"}
case "$TEST_SUITE" in
"quality")
echo "Finding fixme's and storing report..."
paver find_fixme > fixme.log || { cat fixme.log; EXIT=1; }
echo "Finding pep8 violations and storing report..."
paver run_pep8 > pep8.log || { cat pep8.log; EXIT=1; }
echo "Finding pylint violations and storing in report..."
paver run_pylint -l $PYLINT_THRESHOLD > pylint.log || { cat pylint.log; EXIT=1; }
# Run quality task. Pass in the 'fail-under' percentage to diff-quality
paver run_quality -p 100
mkdir -p reports
paver run_complexity > reports/code_complexity.log || echo "Unable to calculate code complexity. Ignoring error."
# Need to create an empty test result so the post-build
# action doesn't fail the build.
cat > reports/quality.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="quality" tests="1" errors="0" failures="0" skip="0">
<testcase classname="quality" name="quality" time="0.604"></testcase>
</testsuite>
END
exit $EXIT
;;
"unit")
case "$SHARD" in
"lms")
paver test_system -s lms --extra_args="--with-flaky" || { EXIT=1; }
paver coverage
;;
"cms-js-commonlib")
paver test_system -s cms --extra_args="--with-flaky" || { EXIT=1; }
paver test_js --coverage --skip_clean || { EXIT=1; }
paver test_lib --skip_clean --extra_args="--with-flaky" || { EXIT=1; }
paver coverage
;;
*)
paver test --extra_args="--with-flaky"
paver coverage
;;
esac
exit $EXIT
;;
"lms-acceptance")
case "$SHARD" in
"all")
paver test_acceptance -s lms --extra_args="-v 3"
;;
"2")
mkdir -p reports
mkdir -p reports/acceptance
cat > reports/acceptance/xunit.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="nosetests" tests="1" errors="0" failures="0" skip="0">
<testcase classname="lettuce.tests" name="shard_placeholder" time="0.001"></testcase>
</testsuite>
END
;;
*)
paver test_acceptance -s lms --extra_args="-v 3"
;;
esac
;;
"cms-acceptance")
case "$SHARD" in
"all"|"1")
paver test_acceptance -s cms --extra_args="-v 3"
;;
"2"|"3")
mkdir -p reports/acceptance
cat > reports/acceptance/xunit.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="nosetests" tests="1" errors="0" failures="0" skip="0">
<testcase classname="lettuce.tests" name="shard_placeholder" time="0.001"></testcase>
</testsuite>
END
;;
esac
;;
"bok-choy")
case "$SHARD" in
"all")
paver test_bokchoy || { EXIT=1; }
;;
"1")
paver test_bokchoy --extra_args="-a shard_1 --with-flaky" || { EXIT=1; }
;;
"2")
paver test_bokchoy --extra_args="-a 'shard_2' --with-flaky" || { EXIT=1; }
;;
"3")
paver test_bokchoy --extra_args="-a 'shard_3' --with-flaky" || { EXIT=1; }
;;
"4")
paver test_bokchoy --extra_args="-a 'shard_4' --with-flaky" || { EXIT=1; }
;;
"5")
paver test_bokchoy --extra_args="-a 'shard_5' --with-flaky" || { EXIT=1; }
;;
"6")
paver test_bokchoy --extra_args="-a shard_1=False,shard_2=False,shard_3=False,shard_4=False,shard_5=False --with-flaky" || { EXIT=1; }
;;
# Default case because if we later define another bok-choy shard on Jenkins
# (e.g. Shard 5) in the multi-config project and expand this file
# with an additional case condition, old branches without that commit
# would not execute any tests on the worker assigned to that shard
# and thus their build would fail.
# This way they will just report 1 test executed and passed.
*)
# Need to create an empty test result so the post-build
# action doesn't fail the build.
# May be unnecessary if we changed the "Skip if there are no test files"
# option to True in the jenkins job definitions.
mkdir -p reports/bok_choy
cat > reports/bok_choy/xunit.xml <<END
<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="nosetests" tests="1" errors="0" failures="0" skip="0">
<testcase classname="acceptance.tests" name="shard_placeholder" time="0.001"></testcase>
</testsuite>
END
;;
esac
# Move the reports to a directory that is unique to the shard
# so that when they are 'slurped' to the main flow job, they
# do not conflict with and overwrite reports from other shards.
mv reports/ reports_tmp/
mkdir -p reports/${TEST_SUITE}/${SHARD}
mv reports_tmp/* reports/${TEST_SUITE}/${SHARD}
rm -r reports_tmp/
exit $EXIT
;;
esac
|
dkarakats/edx-platform
|
scripts/all-tests.sh
|
Shell
|
agpl-3.0
| 8,161 |
export DISTRO_NAME=ubuntu
export DIB_RELEASE=${DIB_RELEASE:-trusty}
export DIB_DEBIAN_COMPONENTS=${DIB_DEBIAN_COMPONENTS:-main,restricted,universe}
export DIB_DISTRIBUTION_MIRROR=${DIB_DISTRIBUTION_MIRROR:-http://archive.ubuntu.com/ubuntu}
|
takahashinobuyuki/diskimage-builder
|
elements/ubuntu-minimal/environment.d/10-ubuntu-distro-name.bash
|
Shell
|
apache-2.0
| 240 |
#!/bin/bash
pkill -f script/server
|
varshavaradarajan/functional-tests
|
gadget_renderer/stop_server.sh
|
Shell
|
apache-2.0
| 35 |
#!/bin/bash
FN="BSgenome.Hsapiens.UCSC.hg17.masked_1.3.99.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.12/data/annotation/src/contrib/BSgenome.Hsapiens.UCSC.hg17.masked_1.3.99.tar.gz"
"https://bioarchive.galaxyproject.org/BSgenome.Hsapiens.UCSC.hg17.masked_1.3.99.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-bsgenome.hsapiens.ucsc.hg17.masked/bioconductor-bsgenome.hsapiens.ucsc.hg17.masked_1.3.99_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-bsgenome.hsapiens.ucsc.hg17.masked/bioconductor-bsgenome.hsapiens.ucsc.hg17.masked_1.3.99_src_all.tar.gz"
)
MD5="ff6ee5196f234c5a2a3bcdd052c3c08e"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
ostrokach/bioconda-recipes
|
recipes/bioconductor-bsgenome.hsapiens.ucsc.hg17.masked/post-link.sh
|
Shell
|
mit
| 1,585 |
#!/bin/bash
FN="pd.genomewidesnp.6_3.14.1.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/pd.genomewidesnp.6_3.14.1.tar.gz"
"https://bioarchive.galaxyproject.org/pd.genomewidesnp.6_3.14.1.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.genomewidesnp.6/bioconductor-pd.genomewidesnp.6_3.14.1_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.genomewidesnp.6/bioconductor-pd.genomewidesnp.6_3.14.1_src_all.tar.gz"
)
MD5="6e5369234e251c763f4f6c0220fbcb0c"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
Luobiny/bioconda-recipes
|
recipes/bioconductor-pd.genomewidesnp.6/post-link.sh
|
Shell
|
mit
| 1,473 |
#!/bin/ash
# jamesbond 2011, 2014
# updated Fatdog 700 - simplify, remove control, leave with "mixer" functionality only
# 131130 L18L internationalisation
# std localisation stanza
export TEXTDOMAIN=fatdog
. gettext.sh
# performance tweak - use "C" if there is no localisation
! [ -e $TEXTDOMAINDIR/${LANG%.*}/LC_MESSAGES/$TEXTDOMAIN.mo ] &&
! [ -e $TEXTDOMAINDIR/${LANG%_*}/LC_MESSAGES/$TEXTDOMAIN.mo ] && LANG=C
### configuration
APPNAME="$(gettext 'Alsa Equaliser')"
SPOT_HOME=$(awk -F: '$1=="spot" {print $6}' /etc/passwd)
ASOUNDRC=${ASOUNDRC:-/etc/asound.conf} # This is for Puppy
if grep -q ctl.equal $ASOUNDRC 2> /dev/null; then
alsamixer -D equal
else
Xdialog --title "$APPNAME" --infobox "$(eval_gettext '$APPNAME turned off, it must be turned on for this to work.')" 0 0 10000
fi
|
peabee/woof-CE
|
woof-code/rootfs-skeleton/usr/sbin/alsaequal.sh
|
Shell
|
gpl-2.0
| 797 |
# ib_csm_csv.sh - test for xtrabackup
# Copyright (C) 2009-2011 Percona Inc.
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
. inc/common.sh
init
run_mysqld --innodb_file_per_table
run_cmd ${MYSQL} ${MYSQL_ARGS} -e "create database csv"
run_cmd ${MYSQL} ${MYSQL_ARGS} -e "create table csm (a int NOT NULL ) ENGINE=CSV" csv
# Adding initial rows
vlog "Adding initial rows to database..."
numrow=100
count=0
while [ "$numrow" -gt "$count" ]
do
${MYSQL} ${MYSQL_ARGS} -e "insert into csm values ($count);" csv
let "count=count+1"
done
vlog "Initial rows added"
# Full backup
# backup root directory
mkdir -p $topdir/backup
vlog "Starting backup"
run_cmd ${IB_BIN} --user=root --socket=$mysql_socket $topdir/backup > $OUTFILE 2>&1
full_backup_dir=`grep "innobackupex: Backup created in directory" $OUTFILE | awk -F\' '{print $2}'`
vlog "Full backup done to directory $full_backup_dir"
# Saving the checksum of original table
checksum_a=`${MYSQL} ${MYSQL_ARGS} -Ns -e "checksum table csm;" csv | awk '{print $2}'`
vlog "Table checksum is $checksum_a"
vlog "Preparing backup"
# Prepare backup
echo "###########" >> $OUTFILE
echo "# PREPARE #" >> $OUTFILE
echo "###########" >> $OUTFILE
run_cmd ${IB_BIN} --apply-log $full_backup_dir >> $OUTFILE 2>&1
vlog "Data prepared for restore"
# Destroying mysql data
stop_mysqld
rm -rf $mysql_datadir/*
vlog "Data destroyed"
# Restore backup
vlog "Copying files"
echo "###########" >> $OUTFILE
echo "# RESTORE #" >> $OUTFILE
echo "###########" >> $OUTFILE
run_cmd ${IB_BIN} --copy-back $full_backup_dir >> $OUTFILE 2>&1
vlog "Data restored"
run_mysqld --innodb_file_per_table
vlog "Checking checksums"
checksum_b=`${MYSQL} ${MYSQL_ARGS} -Ns -e "checksum table csm" incremental_sample | awk '{print $2}'`
if [ $checksum_a -ne $checksum_b ]
then
vlog "Checksums are not equal"
exit -1
fi
vlog "Checksums are OK"
stop_mysqld
clean
|
0xffea/drizzle
|
plugin/innobase/xtrabackup/test/t/ib_csm_csv.sh
|
Shell
|
gpl-2.0
| 2,536 |
# ib_incremental.sh - test for xtrabackup
# Copyright (C) 2009-2011 Percona Inc.
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
. inc/common.sh
init
run_mysqld --innodb_file_per_table
load_dbase_schema incremental_sample
# Adding initial rows
vlog "Adding initial rows to database..."
numrow=100
count=0
while [ "$numrow" -gt "$count" ]
do
${MYSQL} ${MYSQL_ARGS} -e "insert into test values ($count, $numrow);" incremental_sample
let "count=count+1"
done
vlog "Initial rows added"
# Full backup
# backup root directory
mkdir -p $topdir/backup
vlog "Starting backup"
run_cmd ${IB_BIN} --user=root --socket=$mysql_socket $topdir/backup > $OUTFILE 2>&1
full_backup_dir=`grep "innobackupex: Backup created in directory" $OUTFILE | awk -F\' '{print $2}'`
vlog "Full backup done to directory $full_backup_dir"
# Changing data
vlog "Making changes to database"
let "count=numrow+1"
let "numrow=500"
while [ "$numrow" -gt "$count" ]
do
${MYSQL} ${MYSQL_ARGS} -e "insert into test values ($count, $numrow);" incremental_sample
let "count=count+1"
done
vlog "Changes done"
# Saving the checksum of original table
checksum_a=`${MYSQL} ${MYSQL_ARGS} -Ns -e "checksum table test;" incremental_sample | awk '{print $2}'`
vlog "Table checksum is $checksum_a"
vlog "Making incremental backup"
echo "###############" >> $OUTFILE
echo "# INCREMENTAL #" >> $OUTFILE
echo "###############" >> $OUTFILE
# Incremental backup
run_cmd ${IB_BIN} --socket=$mysql_socket --incremental --incremental-basedir=$full_backup_dir $topdir/backup >> $OUTFILE 2>&1
inc_backup_dir=`grep "innobackupex: Backup created in directory" $OUTFILE | tail -n 1 | awk -F\' '{print $2}'`
vlog "Incremental backup done to directory $inc_backup_dir"
vlog "Preparing backup"
# Prepare backup
echo "##############" >> $OUTFILE
echo "# PREPARE #1 #" >> $OUTFILE
echo "##############" >> $OUTFILE
run_cmd ${IB_BIN} --apply-log --redo-only $full_backup_dir >> $OUTFILE 2>&1
vlog "Log applied to full backup"
echo "##############" >> $OUTFILE
echo "# PREPARE #2 #" >> $OUTFILE
echo "##############" >> $OUTFILE
run_cmd ${IB_BIN} --apply-log --redo-only --incremental-dir=$inc_backup_dir $full_backup_dir >> $OUTFILE 2>&1
vlog "Delta applied to full backup"
echo "##############" >> $OUTFILE
echo "# PREPARE #3 #" >> $OUTFILE
echo "##############" >> $OUTFILE
run_cmd ${IB_BIN} --apply-log $full_backup_dir >> $OUTFILE 2>&1
vlog "Data prepared for restore"
# Destroying mysql data
stop_mysqld
rm -rf $mysql_datadir/*
vlog "Data destroyed"
# Restore backup
vlog "Copying files"
echo "###########" >> $OUTFILE
echo "# RESTORE #" >> $OUTFILE
echo "###########" >> $OUTFILE
run_cmd ${IB_BIN} --copy-back $full_backup_dir >> $OUTFILE 2>&1
vlog "Data restored"
run_mysqld --innodb_file_per_table
vlog "Checking checksums"
checksum_b=`${MYSQL} ${MYSQL_ARGS} -Ns -e "checksum table test;" incremental_sample | awk '{print $2}'`
if [ $checksum_a -ne $checksum_b ]
then
vlog "Checksums are not equal"
exit -1
fi
vlog "Checksums are OK"
stop_mysqld
clean
|
0xffea/drizzle
|
plugin/innobase/xtrabackup/test/t/ib_incremental.sh
|
Shell
|
gpl-2.0
| 3,676 |
#!/bin/sh
# Created on: Jan 16, 2009
# Author: catalin.pop
errorMsg(){
echo $1
exit 1
}
PID_FILE=/tmp/SystemManager.pid
[ ! -f $PID_FILE ] && errorMsg "Pid file not found at $PID_FILE. SystemManager not started."
PID=`cat $PID_FILE`
kill -USR1 $PID
echo "Signal for refresh sent to SystemManager with PID=$PID"
|
TheSkorm/ISA100.11a-Gateway
|
AccessNode/config/FW_mesh_HW_vr900/release_isa/SysManager/sm_refreshLog.sh
|
Shell
|
gpl-3.0
| 324 |
#!/bin/sh
#
# Copyright (C) 2011, 2012, 2014, 2016 Internet Systems Consortium, Inc. ("ISC")
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
if $PERL -e 'use Net::DNS;' 2>/dev/null
then
if $PERL -e 'use Net::DNS; die if ($Net::DNS::VERSION >= 0.69 && $Net::DNS::VERSION <= 0.74);' 2>/dev/null
then
:
else
echo "I:Net::DNS versions 0.69 to 0.74 have bugs that cause this test to fail: please update." >&2
exit 1
fi
else
echo "I:This test requires the Net::DNS library." >&2
exit 1
fi
|
pecharmin/bind9
|
bin/tests/system/xfer/prereq.sh
|
Shell
|
mpl-2.0
| 686 |
#!/bin/bash
if [ $# -ne 1 ]; then
echo "Usage: $0 DATABASE_PATH"
echo " e.g.: $0 ~/database/db"
exit 1
fi
readonly DATABASE_PATH="$1"
groonga "${DATABASE_PATH}" \
schema | \
jq ".[1].tables[].name" | \
while read table; do \
groonga \
"${DATABASE_PATH}" \
object_inspect \
"${table}" \
--output_pretty yes
done
groonga "${DATABASE_PATH}" \
schema | \
jq ".[1].tables[].columns[].full_name" | \
while read column; do \
groonga \
"${DATABASE_PATH}" \
object_inspect \
"${column}" \
--output_pretty yes
done
|
groonga/groonga
|
tools/object-inspect-all.sh
|
Shell
|
lgpl-2.1
| 599 |
corebuild -pkg ctypes.foreign ncurses.inferred.mli
cp _build/ncurses.inferred.mli .
|
yminsky/examples
|
code/ffi/infer_ncurses.sh
|
Shell
|
unlicense
| 84 |
#!/bin/bash
ciao_bin="$HOME/local"
ciao_gobin="$GOPATH"/bin
event_counter=0
#Utility functions
function exitOnError {
local exit_code="$1"
local error_string="$2"
if [ $1 -ne 0 ]
then
echo "FATAL ERROR exiting: " "$error_string" "$exit_code"
exit 1
fi
}
#Checks that no network artifacts are left behind
function checkForNetworkArtifacts() {
#Verify that there are no ciao related artifacts left behind
ciao_networks=`sudo docker network ls --filter driver=ciao -q | wc -l`
if [ $ciao_networks -ne 0 ]
then
echo "FATAL ERROR: ciao docker networks not cleaned up"
sudo docker network ls --filter driver=ciao
exit 1
fi
#The only ciao interfaces left behind should be CNCI VNICs
#Once we can delete tenants we should not even have them around
cnci_vnics=`ip -d link | grep alias | grep cnci | wc -l`
ciao_vnics=`ip -d link | grep alias | wc -l`
if [ $cnci_vnics -ne $ciao_vnics ]
then
echo "FATAL ERROR: ciao network interfaces not cleaned up"
ip -d link | grep alias
exit 1
fi
}
function rebootCNCI {
ssh -T -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "$CIAO_SSH_KEY" cloud-admin@"$ssh_ip" <<-EOF
sudo reboot now
EOF
#Now wait for it to come back up
ping -w 90 -c 3 $ssh_ip
exitOnError $? "Unable to ping CNCI after restart"
#Dump the tables for visual verification
ssh -T -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "$CIAO_SSH_KEY" cloud-admin@"$ssh_ip" <<-EOF
sudo iptables-save
sudo ip l
sudo ip a
EOF
echo "Rebooted the CNCI"
}
function checkExtIPConnectivity {
#We checked the event before calling this, so the mapping should exist
testip=`"$ciao_gobin"/ciao-cli external-ip list -f '{{with index . 0}}{{.ExternalIP}}{{end}}'`
test_instance=`"$ciao_gobin"/ciao-cli instance list -f '{{with index . 0}}{{.ID}}{{end}}'`
sudo ip route add 203.0.113.0/24 dev ciaovlan
ping -w 90 -c 3 $testip
ping_result=$?
#Make sure we are able to reach the VM
test_hostname=`ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "$CIAO_SSH_KEY" demouser@"$testip" hostname -s`
sudo ip route del 203.0.113.0/24 dev ciaovlan
exitOnError $ping_result "Unable to ping external IP"
if [ "$test_hostname" == "$test_instance" ]
then
echo "SSH connectivity using external IP verified"
else
echo "FATAL ERROR: Unable to ssh via external IP"
exit 1
fi
}
#There are too many failsafes in the CNCI. Hence just disable iptables utility to trigger failure
#This also ensures that the CNCI is always left in a consistent state (sans the permission)
# this function to be run on the CNCI
function triggerIPTablesFailure {
ssh -T -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "$CIAO_SSH_KEY" cloud-admin@"$ssh_ip" <<-EOF
sudo chmod -x /usr/bin/iptables
EOF
}
#Restore the iptables so that the cluster is usable
# this function to be run on the CNCI
function restoreIPTables {
ssh -T -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i "$CIAO_SSH_KEY" cloud-admin@"$ssh_ip" <<-EOF
sudo chmod +x /usr/bin/iptables
EOF
}
function clearAllEvents() {
#Clear out all prior events. Currently this is an admin only operation.
ciao_user=$CIAO_USERNAME
ciao_passwd=$CIAO_PASSWORD
export CIAO_USERNAME=$CIAO_ADMIN_USERNAME
export CIAO_PASSWORD=$CIAO_ADMIN_PASSWORD
"$ciao_gobin"/ciao-cli event delete
#Wait for the event count to drop to 0
retry=0
ciao_events=0
until [ $retry -ge 6 ]
do
ciao_events=`"$ciao_gobin"/ciao-cli event list -f '{{len .}}'`
if [ $ciao_events -eq 0 ]
then
break
fi
let retry=retry+1
sleep 1
done
export CIAO_USERNAME=$ciao_user
export CIAO_PASSWORD=$ciao_passwd
exitOnError $ciao_events "ciao events not deleted properly"
}
function checkEventStatus {
local event_index="$1"
local event_code="$2"
local retry=0
local ciao_events=0
local total_events=0
local code=""
#We only need to wait for as many events as the index
total_events=$((event_index + 1))
until [ $retry -ge 6 ]
do
ciao_events=`"$ciao_gobin"/ciao-cli event list -f '{{len .}}'`
if [ $ciao_events -eq $total_events ]
then
break
fi
let retry=retry+1
sleep 1
done
if [ $ciao_events -ne $total_events ]
then
echo "FATAL ERROR: ciao event not reported. Events seen =" $ciao_events
"$ciao_gobin"/ciao-cli event list
exit 1
fi
code=$("$ciao_gobin"/ciao-cli event list -f "{{(index . ${event_index}).Message}}" | cut -d ' ' -f 1)
if [ "$event_code" != "$code" ]
then
echo "FATAL ERROR: Unknown event $code. Looking for $event_code"
"$ciao_gobin"/ciao-cli event list
exit 1
fi
"$ciao_gobin"/ciao-cli event list
}
function createExternalIPPool() {
# first create a new external IP pool and add a subnet to it.
# this is an admin only operation, so make sure our env variables
# are set accordingly. Since user admin might belong to more than one
# tenant, make sure to specify that we are logging in as part of the
# "admin" tenant/project.
ciao_user=$CIAO_USERNAME
ciao_passwd=$CIAO_PASSWORD
export CIAO_USERNAME=$CIAO_ADMIN_USERNAME
export CIAO_PASSWORD=$CIAO_ADMIN_PASSWORD
"$ciao_gobin"/ciao-cli -tenant-name admin pool create -name test
"$ciao_gobin"/ciao-cli -tenant-name admin pool add -subnet 203.0.113.0/24 -name test
export CIAO_USERNAME=$ciao_user
export CIAO_PASSWORD=$ciao_passwd
}
function deleteExternalIPPool() {
#Cleanup the pool
export CIAO_USERNAME=$CIAO_ADMIN_USERNAME
export CIAO_PASSWORD=$CIAO_ADMIN_PASSWORD
"$ciao_gobin"/ciao-cli -tenant-name admin pool delete -name test
exitOnError $? "Unable to delete pool"
export CIAO_USERNAME=$ciao_user
export CIAO_PASSWORD=$ciao_passwd
}
# Read cluster env variables
. $ciao_bin/demo.sh
vm_wlid=$("$ciao_gobin"/ciao-cli workload list -f='{{if gt (len .) 0}}{{(index . 0).ID}}{{end}}')
exitOnError $? "Unable to list workloads"
"$ciao_gobin"/ciao-cli instance add --workload=$vm_wlid --instances=2
exitOnError $? "Unable to launch VMs"
"$ciao_gobin"/ciao-cli instance list
exitOnError $? "Unable to list instances"
#Launch containers
#Pre-cache the image to reduce the start latency
sudo docker pull debian
debian_wlid=$("$ciao_gobin"/ciao-cli workload list -f='{{$x := filter . "Name" "Debian latest test container"}}{{if gt (len $x) 0}}{{(index $x 0).ID}}{{end}}')
echo "Starting workload $debian_wlid"
"$ciao_gobin"/ciao-cli instance add --workload=$debian_wlid --instances=1
exitOnError $? "Unable to launch containers"
sleep 5
"$ciao_gobin"/ciao-cli instance list
exitOnError $? "Unable to list instances"
container_1=`sudo docker ps -q -l`
container_1_ip=`sudo docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $container_1`
"$ciao_gobin"/ciao-cli instance add --workload=$debian_wlid --instances=1
exitOnError $? "Unable to launch containers"
sleep 5
"$ciao_gobin"/ciao-cli instance list
exitOnError $? "Unable to list instances"
container_2=`sudo docker ps -q -l`
#Check SSH connectivity
"$ciao_gobin"/ciao-cli instance list
#The VM takes time to boot as you are running on two
#layers of virtualization. Hence wait a bit
retry=0
until [ $retry -ge 6 ]
do
ssh_ip=""
ssh_ip=$("$ciao_gobin"/ciao-cli instance list --workload=$vm_wlid -f='{{if gt (len .) 0}}{{(index . 0).SSHIP}}{{end}}')
if [ "$ssh_ip" == "" ]
then
echo "Waiting for instance to boot"
let retry=retry+1
sleep 30
continue
fi
ssh_check=$(head -1 < /dev/tcp/"$ssh_ip"/33002)
echo "Attempting to ssh to: $ssh_ip"
if [[ "$ssh_check" == *SSH-2.0-OpenSSH* ]]
then
echo "SSH connectivity verified"
break
else
let retry=retry+1
echo "Retrying ssh connection $retry"
fi
sleep 30
done
if [ $retry -ge 6 ]
then
echo "Unable check ssh connectivity into VM"
exit 1
fi
#Check docker networking
echo "Checking Docker Networking"
sudo docker exec $container_2 /bin/ping -w 90 -c 3 $container_1_ip
exitOnError $? "Unable to ping across containers"
echo "Container connectivity verified"
#Clear out all prior events
clearAllEvents
#Test External IP Assignment support
#Pick the first instance which is a VM, as we can even SSH into it
#We have already checked that the VM is up.
createExternalIPPool
testinstance=`"$ciao_gobin"/ciao-cli instance list -f '{{with index . 0}}{{.ID}}{{end}}'`
"$ciao_gobin"/ciao-cli external-ip map -instance $testinstance -pool test
#Wait for the CNCI to report successful map
checkEventStatus $event_counter "Mapped"
"$ciao_gobin"/ciao-cli event list
"$ciao_gobin"/ciao-cli external-ip list
checkExtIPConnectivity
#Check that the CNCI retains state after reboot
#If state has been restored, the Ext IP should be reachable
rebootCNCI
checkExtIPConnectivity
"$ciao_gobin"/ciao-cli external-ip unmap -address $testip
#Wait for the CNCI to report successful unmap
event_counter=$((event_counter+1))
checkEventStatus $event_counter "Unmapped"
"$ciao_gobin"/ciao-cli external-ip list
#Test for External IP Failures
#Map failure
triggerIPTablesFailure
"$ciao_gobin"/ciao-cli external-ip map -instance $testinstance -pool test
#Wait for the CNCI to report unsuccessful map
event_counter=$((event_counter+1))
checkEventStatus $event_counter "Failed"
restoreIPTables
#Unmap failure
"$ciao_gobin"/ciao-cli external-ip map -instance $testinstance -pool test
event_counter=$((event_counter+1))
checkEventStatus $event_counter "Mapped"
triggerIPTablesFailure
"$ciao_gobin"/ciao-cli external-ip unmap -address $testip
event_counter=$((event_counter+1))
checkEventStatus $event_counter "Failed"
restoreIPTables
#Cleanup
"$ciao_gobin"/ciao-cli external-ip unmap -address $testip
event_counter=$((event_counter+1))
checkEventStatus $event_counter "Unmapped"
#Cleanup pools
deleteExternalIPPool
#Now delete all instances
"$ciao_gobin"/ciao-cli instance delete --all
exitOnError $? "Unable to delete instances"
"$ciao_gobin"/ciao-cli instance list
#Wait for all the instance deletions to be reported back
event_counter=$((event_counter+4))
checkEventStatus $event_counter "Deleted"
#Verify that there are no ciao related artifacts left behind
checkForNetworkArtifacts
echo "###########################################"
echo "-----------All checks passed!--------------"
echo "###########################################"
|
erick0z/ciao
|
testutil/singlevm/verify.sh
|
Shell
|
apache-2.0
| 10,240 |
mvn clean site site:stage -Preporting scm-publish:publish-scm $@
|
emsouza/archiva
|
archiva-modules/deploySite.sh
|
Shell
|
apache-2.0
| 65 |
#!/bin/bash
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [[ $UID -ne 0 ]] ; then
echo You must run this as root.
exit 1
fi
# hopefully we were passed some command-line params
while [[ -n "$1" ]]; do
case "$1" in
-i)
shift
CMD_IFACE=$1
;;
-p)
shift
CMD_PERSONA=$1
;;
-o)
shift
CMD_OCTET=$1
;;
esac
shift
done
PERSONA=${CMD_PERSONA}
OCTET=${CMD_OCTET:-42}
IFACE=${CMD_IFACE:-eth0}
if [[ ( $PERSONA -lt 1 ) || ( $PERSONA -gt 8 ) ]] ; then
echo invalid screen number $PERSONA
echo please choose a number 1..8
exit 2
fi
echo lg${PERSONA} > /etc/hostname
rm -f /etc/network/if-up.d/*-lg_alias
cat >/etc/network/if-up.d/${OCTET}-lg_alias <<EOF
#!/bin/sh
PATH=/sbin:/bin:/usr/sbin:/usr/bin
# This file created automatically by $0
# to define an alias where lg systems can communicate
ifconfig ${IFACE}:${OCTET} 10.42.${OCTET}.${PERSONA} netmask 255.255.255.0
# end of file
EOF
chmod 0755 /etc/network/if-up.d/${OCTET}-lg_alias
# we start counting FRAMES at "0"
FRAME=`expr $PERSONA - 1`
echo $FRAME > /lg/frame
# we start counting screens per-frame at 1
#echo $DHCP_LG_SCREENS > /lg/screen
if [ "${OCTET}" -ne 42 ]; then
sed -i -e "s:10\.42\.42\.:10.42.${OCTET}.:g" /etc/hosts
sed -i -e "s:10\.42\.42\.:10.42.${OCTET}.:g" /etc/hosts.squid
sed -i -e "s:10\.42\.42\.:10.42.${OCTET}.:g" /etc/iptables.conf
sed -i -e "s:10\.42\.42\.:10.42.${OCTET}.:g" /etc/ssh/ssh_known_hosts
fi
service hostname start
/etc/network/if-pre-up.d/iptables
/etc/network/if-up.d/${OCTET}-lg_alias
service rsyslog restart &
echo "You should have a persona configured now.
Adjusted the following files (using sed):
/etc/hosts
/etc/hosts.squid
/etc/iptables.conf
/etc/ssh/ssh_known_hosts
if you selected a special third octet."
## we need to think about signaling the whole setup when the persona changes
#service lxdm restart
initctl emit --no-wait persona-ok
|
oak11/liquid-galaxy.lg-root-fs
|
home/lg/bin/personality.sh
|
Shell
|
apache-2.0
| 2,548 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.