code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
/usr/sbin/sshd -D &
bash /usr/local/bin/wrapdocker
|
orius123/dind-ssh
|
start_sshd.sh
|
Shell
|
apache-2.0
| 68 |
# -----------------------------------------------------------------------------
#
# Package : sorted-object
# Version : 2.0.1
# Source repo : https://github.com/domenic/sorted-object
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=sorted-object
PACKAGE_VERSION=2.0.1
PACKAGE_URL=https://github.com/domenic/sorted-object
yum -y update && yum install -y yum-utils nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git gcc gcc-c++ libffi libffi-devel ncurses git jq make cmake
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/appstream/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/baseos/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/7Server/ppc64le/optional/
yum install -y firefox liberation-fonts xdg-utils && npm install n -g && n latest && npm install -g npm@latest && export PATH="$PATH" && npm install --global yarn grunt-bump xo testem acorn
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME" > /home/tester/output/clone_fails
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails" > /home/tester/output/version_tracker
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
PACKAGE_VERSION=$(jq -r ".version" package.json)
# run the test command from test.sh
if ! npm install && npm audit fix && npm audit fix --force; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! npm test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
s/sorted-object/sorted-object_rhel_8.3.sh
|
Shell
|
apache-2.0
| 3,070 |
echo 'on'
echo '============================================================='
echo '$ $'
echo '$ Nepxion Thunder $'
echo '$ $'
echo '$ $'
echo '$ $'
echo '$ Nepxion Studio All Right Reserved $'
echo '$ Copyright (C) 2017-2050 $'
echo '$ $'
echo '============================================================='
echo '.'
echo 'off'
title=Nepxion Thunder
color=0a
mvn clean install -DskipTests
|
Nepxion/Thunder
|
install.sh
|
Shell
|
apache-2.0
| 782 |
info "1.3 - Controller Manager"
check_1_3_1="1.3.1 - Ensure that the --terminated-pod-gc-threshold argument is set as appropriate"
if check_argument "$CIS_MANAGER_CMD" '--terminated-pod-gc-threshold' >/dev/null 2>&1; then
threshold=$(get_argument_value "$CIS_MANAGER_CMD" '--terminated-pod-gc-threshold')
pass "$check_1_3_1"
pass " * terminated-pod-gc-threshold: $threshold"
else
warn "$check_1_3_1"
fi
check_1_3_2="1.3.2 - Ensure that the --profiling argument is set to false"
if check_argument "$CIS_MANAGER_CMD" '--profiling=false' >/dev/null 2>&1; then
pass "$check_1_3_2"
else
warn "$check_1_3_2"
fi
check_1_3_3="1.3.3 - Ensure that the --insecure-experimental-approve-all-kubelet-csrs-for-group argument is not set"
if check_argument "$CIS_MANAGER_CMD" '--insecure-experimental-approve-all-kubelet-csrs-for-group' >/dev/null 2>&1; then
warn "$check_1_3_3"
else
pass "$check_1_3_3"
fi
check_1_3_4="1.3.4 - Ensure that the --use-service-account-credentials argument is set to true"
if check_argument "$CIS_MANAGER_CMD" '--use-service-account-credentials' >/dev/null 2>&1; then
pass "$check_1_3_4"
else
warn "$check_1_3_4"
fi
check_1_3_5="1.3.5 - Ensure that the --service-account-private-key-file argument is set as appropriate"
if check_argument "$CIS_MANAGER_CMD" '--service-account-private-key-file' >/dev/null 2>&1; then
keyfile=$(get_argument_value "$CIS_MANAGER_CMD" '--service-account-private-key-file')
pass "$check_1_3_5"
pass " * service-account-private-key-file: $keyfile"
else
warn "$check_1_3_5"
fi
check_1_3_6="1.3.6 - Ensure that the --root-ca-file argument is set as appropriate"
if check_argument "$CIS_MANAGER_CMD" '--root-ca-file' >/dev/null 2>&1; then
cafile=$(get_argument_value "$CIS_MANAGER_CMD" '--root-ca-file')
pass "$check_1_3_6"
pass " * root-ca-file: $cafile"
else
warn "$check_1_3_6"
fi
|
neuvector/kubernetes-cis-benchmark
|
1.0.0/master/master_3_contoller_manager.sh
|
Shell
|
apache-2.0
| 1,923 |
export JAVA_HOME=/usr/lib/jvm/java-8-oracle/
|
spennihana/h2o-3
|
docker/hadoop/hdp/conf/hadoop-env.sh
|
Shell
|
apache-2.0
| 45 |
#!/bin/bash
QUERY=$(cat <<EOF
{
"query": {
"bool": {
"must": [
{
"query_string": {
"query": "*"
}
}
]
}
},
"aggs" : {
"environment" : {
"terms" : { "field" : "environment", "size": 0 },
"aggs" : {
"cluster" : {
"terms" : { "field" : "cluster", "size": 0 },
"aggs" : {
"host" : {
"terms" : { "field" : "host", "size": 0 },
"aggs" : {
"service" : {
"terms" : { "field" : "service", "size": 0 },
"aggs" : {
"event_source" : {
"terms" : { "field" : "event_source", "size": 0}
}
}
}
}
}
}
}
}
}
},
"size": 0
}
EOF
)
echo $QUERY | curl --silent -d @- -XGET "http://api.meta.logsearch.io:9200/.component-status/_search?pretty"
|
logsearch/logsearch-monitoring-prototype
|
component-api/GET-hierarchy-environment_cluster_host_service_event_source.sh
|
Shell
|
apache-2.0
| 1,320 |
#!/usr/bin/env bash
echo "re-creating minikube then running the system test"
APP="minikube"
${APP} delete
sleep 5
gofabric8 start --open-console=false
sleep 30
gofabric8 wait-for --all --timeout=60m
./systest-local.sh
|
fabric8io/fabric8-forge
|
systest-minikube.sh
|
Shell
|
apache-2.0
| 224 |
#!/bin/bash
if [ -z ${SANDBOX_ENV_PATH} ];
then
echo '[ERROR] SANDBOX_ENV_PATH is not defined'
exit 1
else
source ${SANDBOX_ENV_PATH%%/}/lib/build-libs.sh
fi
git_latest() {
docker build -t "ownport/git:latest" \
--no-cache \
$(get_default_args) \
${SANDBOX_ENV_PATH%%/}/dockerfiles/git
}
$@
|
ownport/docker-env
|
sandbox/dockerfiles/git/build.sh
|
Shell
|
apache-2.0
| 326 |
#!/bin/bash
this is \
bad continuation as it does not match 1st arg or %4
a \
bad indent
if foo; then
testing a \
bad indent, indented
fi
|
openstack-dev/bashate
|
bashate/tests/samples/E003_bad.sh
|
Shell
|
apache-2.0
| 154 |
#!/bin/sh
#
# Make a temporary Apache instance for testing.
if [ " $#" -eq 0 ]; then
echo "Usage: $0 /path/to/instance"
exit 1
fi
target=$1
echo "Creating instance in $target"
mkdir -p $target
cd $target
mkdir alias
mkdir bin
mkdir conf
mkdir conf.d
mkdir logs
mkdir run
mkdir content
mkdir cgi-bin
mkdir lib
# Create the content
mkdir content/rc4_cipher
mkdir content/openssl_rc4_cipher
mkdir content/openssl_aes_cipher
mkdir content/acl
mkdir content/protocolssl2
mkdir content/protocolssl3
mkdir content/protocoltls1
mkdir content/protocoltls11
mkdir content/protocoltls12
cat > content/index.html << EOF
<html>
Basic index page
</html
EOF
cp content/index.html content/acl/aclS01.html
cp content/index.html content/acl/aclS02.html
cp content/index.html content/acl/aclS03.html
cp content/index.html content/secret-test.html
cp content/index.html content/protocolssl2/index.html
cp content/index.html content/protocolssl3/index.html
cp content/index.html content/protocoltls1/index.html
cp content/index.html content/protocoltls11/index.html
cp content/index.html content/protocoltls12/index.html
ln -s /etc/httpd/modules modules
dn="E=alpha@`hostname`,CN=Frank Alpha,UID=alpha,OU=People,O=example.com,C=US"
cat > conf/htpasswd << EOF
/${dn}:xxj31ZMTZzkVA
EOF
# Create start/stop scripts
cat << EOF > start
#!/bin/sh
HTTPD=/usr/sbin/httpd
\$HTTPD -k start -d . -f ./conf/httpd.conf
EOF
cat << EOF > stop
#!/bin/sh
HTTPD=/usr/sbin/httpd
\$HTTPD -k stop -d . -f ./conf/httpd.conf
EOF
chmod 0755 start stop
|
shawnwhit/httpd_modnss
|
test/createinstance.sh
|
Shell
|
apache-2.0
| 1,527 |
#!/bin/bash
print_help() {
echo "Usage:"
echo " rolling-upgrade.sh -p PARAMETERS_FILE"
echo ""
echo "Options:"
echo " -p, --param-file: Path to the parameters file"
}
get_replicas_count() {
local REPLICAS=$(oc get dc $1 -n $NAMESPACE -o yaml | grep -w availableReplicas: | sed -n 's/.*availableReplicas: \([0-9]*\)/\1/p')
echo "$REPLICAS"
}
shutdown() {
echo "Shuting down $1"
oc scale dc $1 -n $NAMESPACE --replicas=0
echo "Waiting for $1 to be down"
while [ true ]; do
local REPLICAS=$(get_replicas_count $1)
if [ $REPLICAS == 0 ]; then
echo "$1 is down"
return 0
else
echo "."
sleep 10
fi
done
}
deploy() {
echo "Deploying $1"
oc scale dc $1 -n $NAMESPACE --replicas=1
echo "Waiting for $1 to be up"
while [ true ]; do
local REPLICAS=$(get_replicas_count $1)
if [ $REPLICAS == 1 ]; then
echo "$1 is up"
return 0
else
echo "."
sleep 10
fi
done
}
PARAMETERS_FILE=""
while [ "$1" != "" ]; do
case $1 in
-p | --param-file )
shift
PARAMETERS_FILE=$1
esac
shift
done
if [ "$PARAMETERS_FILE" == "" ]; then
print_help
exit -1
fi
if [ ! -f $PARAMETERS_FILE ]; then
echo "File $PARAMETERS_FILE not found"
exit -2
fi
while IFS='' read -r line || [[ -n "$line" ]]; do
export "$line"
done < "$PARAMETERS_FILE"
# Update image stream
echo "Upgrading image stream"
printenv | oc process artifactory-imagestream-template --ignore-unknown-parameters --param-file=- | oc replace -f -
# Update primary node
shutdown "$NAME-primary"
echo "Upgrading $NAME-primary"
printenv | oc process artifactory-nfs-primary-deployment-template --ignore-unknown-parameters --param-file=- | oc replace -f -
deploy "$NAME-primary"
# Update secondary node
shutdown "$NAME-secondary"
echo "Upgrading $NAME-secondary"
printenv | oc process artifactory-nfs-secondary-deployment-template --ignore-unknown-parameters --param-file=- | oc replace -f -
deploy "$NAME-secondary"
echo "$NAME upgraded successfully"
|
JFrogDev/artifactory-docker-examples
|
openshift/artifactory/artifactory-ha-nfs/rolling-upgrade.sh
|
Shell
|
apache-2.0
| 2,177 |
#!/usr/bin/env bash
#
# Cookbook Name:: db_percona
#
# Copyright RightScale, Inc. All rights reserved.
# All access and use subject to the RightScale Terms of Service available at
# http://www.rightscale.com/terms.php and, if applicable, other agreements
# such as a RightScale Master Subscription Agreement.
CONFIG_FILE=/etc/my.cnf
string="
\n
# IMPORTANT: Additional settings that can override those from this file!\n
# The files must end with '.cnf', otherwise they'll be ignored.\n
#\n
!includedir /etc/mysql/conf.d/"
# if /etc/my.cnf already exists
# check if includedir line is present in the file, else append the line
# if /etc/my.cnf does not exist
# create it with just the includedir line in it
if [ -e $CONFIG_FILE ]
then
if ! grep -Eq "\s*\!includedir\s*/etc/mysql/conf\.d" $CONFIG_FILE
then
echo -e $string >> $CONFIG_FILE
fi
else
echo -e $string > $CONFIG_FILE
fi
|
rs-services/cookbooks_internal
|
cookbooks/db_percona/files/default/setup_my_cnf.sh
|
Shell
|
apache-2.0
| 898 |
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright Clairvoyant 2016
PATH=/usr/bin:/usr/sbin:/bin:/sbin:/usr/local/bin
# Function to discover basic OS details.
discover_os() {
if command -v lsb_release >/dev/null; then
# CentOS, Ubuntu, RedHatEnterpriseServer, Debian, SUSE LINUX
# shellcheck disable=SC2034
OS=$(lsb_release -is)
# CentOS= 6.10, 7.2.1511, Ubuntu= 14.04, RHEL= 6.10, 7.5, SLES= 11
# shellcheck disable=SC2034
OSVER=$(lsb_release -rs)
# 7, 14
# shellcheck disable=SC2034
OSREL=$(echo "$OSVER" | awk -F. '{print $1}')
# Ubuntu= trusty, wheezy, CentOS= Final, RHEL= Santiago, Maipo, SLES= n/a
# shellcheck disable=SC2034
OSNAME=$(lsb_release -cs)
else
if [ -f /etc/redhat-release ]; then
if [ -f /etc/centos-release ]; then
# shellcheck disable=SC2034
OS=CentOS
# 7.5.1804.4.el7.centos, 6.10.el6.centos.12.3
# shellcheck disable=SC2034
OSVER=$(rpm -qf /etc/centos-release --qf='%{VERSION}.%{RELEASE}\n' | awk -F. '{print $1"."$2}')
# shellcheck disable=SC2034
OSREL=$(rpm -qf /etc/centos-release --qf='%{VERSION}\n')
else
# shellcheck disable=SC2034
OS=RedHatEnterpriseServer
# 7.5, 6Server
# shellcheck disable=SC2034
OSVER=$(rpm -qf /etc/redhat-release --qf='%{VERSION}\n')
if [ "$OSVER" == "6Server" ]; then
# shellcheck disable=SC2034
OSVER=$(rpm -qf /etc/redhat-release --qf='%{RELEASE}\n' | awk -F. '{print $1"."$2}')
# shellcheck disable=SC2034
OSNAME=Santiago
else
# shellcheck disable=SC2034
OSNAME=Maipo
fi
# shellcheck disable=SC2034
OSREL=$(echo "$OSVER" | awk -F. '{print $1}')
fi
elif [ -f /etc/SuSE-release ]; then
if grep -q "^SUSE Linux Enterprise Server" /etc/SuSE-release; then
# shellcheck disable=SC2034
OS="SUSE LINUX"
fi
# shellcheck disable=SC2034
OSVER=$(rpm -qf /etc/SuSE-release --qf='%{VERSION}\n' | awk -F. '{print $1}')
# shellcheck disable=SC2034
OSREL=$(rpm -qf /etc/SuSE-release --qf='%{VERSION}\n' | awk -F. '{print $1}')
# shellcheck disable=SC2034
OSNAME="n/a"
fi
fi
}
echo "********************************************************************************"
echo "*** $(basename "$0")"
echo "********************************************************************************"
# Check to see if we are on a supported OS.
discover_os
if [ "$OS" != RedHatEnterpriseServer ] && [ "$OS" != CentOS ] && [ "$OS" != Debian ] && [ "$OS" != Ubuntu ]; then
echo "ERROR: Unsupported OS."
exit 3
fi
if [ "$OS" == RedHatEnterpriseServer ] || [ "$OS" == CentOS ]; then
yum -y -e1 -d1 install jq ksh curl
elif [ "$OS" == Debian ] || [ "$OS" == Ubuntu ]; then
export DEBIAN_FRONTEND=noninteractive
apt-get -y -q install jq ksh curl
fi
#cp -p {start,stop}_cluster_all.ksh /usr/local/sbin/
#chown 0:0 /usr/local/sbin/{start,stop}_cluster_all.ksh
#chmod 700 /usr/local/sbin/{start,stop}_cluster_all.ksh
install -o root -g root -m 0755 "$(dirname "$0")/start_cluster_all.ksh" /usr/local/sbin/start_cluster_all.ksh
install -o root -g root -m 0755 "$(dirname "$0")/stop_cluster_all.ksh" /usr/local/sbin/stop_cluster_all.ksh
#rm -f /tmp/$$
#crontab -l | egrep -v 'stop_cluster_all.ksh|start_cluster_all.ksh' >/tmp/$$
#echo '00 08 * * * /usr/local/sbin/start_cluster_all.ksh >/dev/null'>>/tmp/$$
#echo '00 18 * * * /usr/local/sbin/stop_cluster_all.ksh >/dev/null'>>/tmp/$$
#crontab /tmp/$$
#rm -f /tmp/$$
|
teamclairvoyant/hadoop-deployment-bash
|
api/install_startstop_cluster.sh
|
Shell
|
apache-2.0
| 4,103 |
#!/bin/bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
set -x
source tensorflow/tools/ci_build/release/common.sh
install_ubuntu_16_python_pip_deps python3.7
# Update bazel
install_bazelisk
# Export required variables for running pip.sh
export OS_TYPE="UBUNTU"
export CONTAINER_TYPE="GPU"
export TF_PYTHON_VERSION='python3.7'
# Run configure.
export PYTHON_BIN_PATH=$(which ${TF_PYTHON_VERSION})
yes "" | "$PYTHON_BIN_PATH" configure.py
# Get the default test targets for bazel.
source tensorflow/tools/ci_build/build_scripts/DEFAULT_TEST_TARGETS.sh
# Export optional variables for running pip.sh
export TF_TEST_FILTER_TAGS='gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py37,-no_cuda11'
export TF_BUILD_FLAGS="--config=release_gpu_linux_cuda_11_2 "
export TF_TEST_FLAGS="--test_tag_filters=${TF_TEST_FILTER_TAGS} --build_tag_filters=${TF_TEST_FILTER_TAGS} \
--distinct_host_configuration=false \
--action_env=TF_CUDA_VERSION=11.2 --action_env=TF_CUDNN_VERSION=8.1 --test_env=TF2_BEHAVIOR=1 \
--config=cuda --test_output=errors --local_test_jobs=4 --test_lang_filters=py \
--verbose_failures=true --keep_going --define=no_tensorflow_py_deps=true \
--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute "
export TF_TEST_TARGETS="${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/... "
export TF_PIP_TESTS="test_pip_virtualenv_non_clean test_pip_virtualenv_clean"
#export IS_NIGHTLY=0 # Not nightly; uncomment if building from tf repo.
export TF_PROJECT_NAME="tensorflow" # single pip package!
export TF_PIP_TEST_ROOT="pip_test"
# To build both tensorflow and tensorflow-gpu pip packages
export TF_BUILD_BOTH_GPU_PACKAGES=1
./tensorflow/tools/ci_build/builds/pip_new.sh
|
annarev/tensorflow
|
tensorflow/tools/ci_build/rel/ubuntu_cuda11_2/gpu_py37_pip.sh
|
Shell
|
apache-2.0
| 2,347 |
#!/bin/sh
APPNAME="plcrtd"
STRIP="none" #ppi"
LINKTYPE="static" #allow-dynamic"
BIN_DIR="bin"
RC_FILE=${HOME}/.staticperlrc
SP_FILE=${HOME}/staticperl
BOOT_FILE="../src/main.pl"
if [ -r ${RC_FILE} ]; then
. ${RC_FILE}
else
echo "${RC_FILE}: not found"
exit 1
fi
[ -d ${BIN_DIR} ] || mkdir ${BIN_DIR} || exit 1
${SP_FILE} mkapp ${BIN_DIR}/$APPNAME --boot ${BOOT_FILE} \
-MConfig \
-MConfig_heavy.pl \
-Msort.pm \
-Mfeature.pm \
-Mvars \
-Mutf8 \
-Mutf8_heavy.pl \
-MErrno \
-MFcntl \
-MPOSIX \
-MSocket \
-MCarp \
-MEncode \
-Mcommon::sense \
-MEnv \
-MEV \
-MGuard \
-MAnyEvent \
-MAnyEvent::Handle \
-MAnyEvent::Socket \
-MAnyEvent::Impl::EV \
-MAnyEvent::Util \
-MAnyEvent::Log \
-MGetopt::Long \
-MFile::Spec::Functions \
-MJSON::XS \
-MSys::Syslog \
-MFeersum \
-MHTTP::Body \
-MHTML::Entities \
-MFile::Path \
-MData::Dumper \
-MTemplate \
-MTemplate::Filters \
-MTemplate::Stash::XS \
-MDBI \
-MDBD::SQLite \
-MIO::FDPass \
-MProc::FastSpawn \
-MAnyEvent::Fork \
-MAnyEvent::Fork::RPC \
-MAnyEvent::Fork::Pool \
--strip ${STRIP} \
--${LINKTYPE} \
--usepacklists \
--add "../src/app/feersum.pl app/feersum.pl" \
--add "../src/backend/feersum.pl backend/feersum.pl" \
--add "../src/modules/Local/Server/Hooks.pm Local/Server/Hooks.pm" \
--add "../src/modules/Local/Server/Settings.pm Local/Server/Settings.pm" \
--add "../src/modules/Local/DB.pm Local/DB.pm" \
--add "../src/modules/Local/DB/SQLite.pm Local/DB/SQLite.pm" \
--add "../src/modules/Local/OpenSSL/Command.pm Local/OpenSSL/Command.pm" \
--add "../src/modules/Local/Data/JSON.pm Local/Data/JSON.pm" \
--add "../src/modules/Local/Run.pm Local/Run.pm" \
--add "../src/modules/Local/OpenSSL/Conf.pm Local/OpenSSL/Conf.pm" \
--add "../src/modules/Local/OpenSSL/Script/Revoke.pm Local/OpenSSL/Script/Revoke.pm" \
$@
|
gh0stwizard/plcrtd
|
misc/static.sh
|
Shell
|
artistic-2.0
| 1,784 |
#!/usr/bin/env ./tester
#
include var
include assert
testcase_begin "$@"
unset undefined
defined='content'
empty=
test_var_is_set_succeeds_if_variable_is_defined() {
assert that var_is_set defined returns 0
}
test_var_is_set_fails_if_variable_is_undefined() {
assert that var_is_set undefined returns 1
}
test_var_not_set_fails_if_variable_is_defined() {
assert that var_not_set defined returns 1
}
test_var_not_set_succeeds_if_variable_is_undefined() {
assert that var_not_set undefined returns 0
}
test_var_is_empty_succeeds_if_variable_is_empty() {
assert that var_is_empty empty returns 0
}
test_var_is_empty_succeeds_if_variable_is_undefined() {
assert that var_is_empty undefined returns 0
}
test_var_is_empty_fails_if_variable_is_not_empty() {
assert that var_is_empty defined returns 1
}
test_var_not_empty_fails_if_variable_is_empty() {
assert that var_not_empty empty returns 1
}
test_var_not_empty_fails_if_variable_is_undefined() {
assert that var_not_empty undefined returns 1
}
test_var_not_empty_succeeds_if_variable_is_not_empty() {
assert that var_not_empty defined returns 0
}
teststage_proceed
test_var_escape() {
assert that var_escape '"$\' writes '"\"\$\\"'
}
teststage_proceed
test_var_print() {
assert that var_print defined writes 'defined="content"'
}
test_var_print_prints_nothing_for_undefined_variable() {
assert that var_print undefined writes ''
}
test_var_print_monkeytest() {
assert that var_print 'defined|q 09)|834-3+' writes ''
}
test_var_print_escapes_correctly() {
needs_escaping='"$
\'
assert that var_print needs_escaping writes 'needs_escaping="\"\$
\\"' #"
}
teststage_proceed
test_var_expand() {
assert that var_expand defined writes content
}
test_var_expand_array_variable() {
array=( some text )
assert that var_expand array[*] writes "some text"
}
test_var_expand_multiple_arguments() {
other=more
assert that var_expand other defined writes "more content"
}
test_var_expand_advanced_expansion() {
assert that var_expand "defined/c/k" writes "kontent"
}
test_var_expand_indirect_expansion() {
pointer=defined
assert that var_expand !pointer writes "content"
}
teststage_proceed
test_@() {
assert that @ defined writes "content"
}
teststage_proceed
test_var() {
var var=value
assert that var contains value
}
test_var_set_if_undefined_sets_value() {
var @{var-value}
assert that var contains "value"
}
test_var_set_if_undefined_keeps_value() {
var @{defined-"bad content"}
assert that defined contains "content"
}
teststage_proceed
test_var_sets_an_array_element() {
var var[10]=10
assert that var[0] contains ''
assert that var[10] contains 10
}
test_var_resets_an_undefined_array_element() {
var @{var[10]-10}
assert that var[0] contains ''
assert that var[10] contains 10
}
test_var_not_resets_an_defined_array_element() {
var[10]=10
var @{var[10]-0}
assert that var[0] contains ''
assert that var[10] contains 10
}
test_var_fails_for_invalid_varname() {
assert that var @=0 returns $err_INVAL
}
test_var_reset_fails_with_empty_varname() {
assert that var @{-fails} returns $err_INVAL
}
test_var_multiple_arguments(){
var var[0]=10 @{var[10]-10} @{var[10]-0} var[0]=0
assert that var[@] contains "0 10"
}
teststage_proceed
test_var_init() {
var_init undefined "content"
assert that undefined contains content
}
test_var_init_does_not_resets_predefined() {
var_init defined "bad content"
assert that defined contains content
}
test_var_init_resets_empty() {
empty=
var_init empty "content"
assert that empty contains content
}
test_var_init_does_not_resets_empty_if_3rd_parameter_is_set() {
empty=
var_init empty "content" ''
assert that empty contains ''
}
teststage_proceed
test_var_script_init_prefixes_variable_name() {
SCRIPT_PREFIX=script
var_script_init var content
assert that var contains ''
assert that script_var contains content
}
test_var_script_prefixes_variable_name() {
SCRIPT_PREFIX=script
var_script var1=content1 var2=content2 @{var3-content3}
assert that var1 contains ''
assert that var2 contains ''
assert that script_var1 contains content1
assert that script_var2 contains content2
assert that script_var3 contains content3
}
testcase_end "$@"
|
git-e/bash_include
|
test/test-var.sh
|
Shell
|
bsd-2-clause
| 4,204 |
#!/bin/bash
OCAML_RELEASE=3.11
OCAML_VERSION=${OCAML_RELEASE}.2
OCAMLDIST=ocaml-${OCAML_VERSION}
cat <<EOF
This script will download, build, and install the following
components:
1. OCaml version ${OCAML_VERSION}
EOF
echo "Getting OCaml ${OCAML_VERSION}..."
if test -f ${OCAMLDIST}.tar.gz ; then
echo " (already present)"
else
wget -q http://caml.inria.fr/pub/distrib/ocaml-${OCAML_RELEASE}/${OCAMLDIST}.tar.gz
fi
echo "Unpacking OCaml ${OCAML_VERSION} into ${OCAMLDIST}..."
if test -d ${OCAMLDIST} ; then
echo " (already unpacked)"
else
tar xzf ${OCAMLDIST}.tar.gz
fi
echo "Building OCaml ${OCAML_VERSION}..."
if test -f ${OCAMLDIST}/.built ; then
echo " (already built)"
else
cd ${OCAMLDIST}
if test -f .configured ; then
echo " (already configured)"
else
./configure -no-shared-libs
touch .configured
fi
# cat >ocamldoc/remove_DEBUG <<\EOF
# #!/bin/sh
# echo "# 1 \"$1\""
# LC_ALL=C sed -e '/DEBUG/c\
# (* DEBUG statement removed *)' "$1"
# EOF
make world.opt
make install
touch .built
fi
cat <<EOF
*******************************************************************************
OCaml ${OCAML_VERSION} has been successfully installed
*******************************************************************************
EOF
|
cartazio/tlaps
|
tools/installer/install-ocaml-3.11.2.sh
|
Shell
|
bsd-2-clause
| 1,254 |
#!/bin/bash
conda install -y conda=4.0.6
conda-env remove -y -n pydata_exp3
conda create -y -n pydata_exp3 python=3.5.*
conda install -c conda-forge -n pydata_exp3 -y \
'bokeh=0.11' \
'cartopy=0.14*' \
'cython=0.23.*' \
'dask=0.8*' \
'fiona=1.6*' \
'gdal=1.11*' \
'ipython=4.2.*' \
'ipywidgets=4.1*' \
'jupyter=1.*' \
'krb5' \
'libgdal=1.11*' \
'networkx=1.11' \
'numpy=1.10*' \
'matplotlib=1.5*' \
'pandas=0.18*' \
'pip' \
'psycopg2' \
'pyproj=1.9.4' \
'pytables=3.2*' \
'rasterio=0.32*' \
'seaborn=0.7*' \
'shapely=1.5*' \
'six' \
'scipy=0.17' \
'scikit-learn=0.17*' \
'sqlalchemy=1.0.12' \
'statsmodels=0.6*' \
'xlrd=0.9*' \
'xlsxwriter=0.8.*'
# rpy2 as in http://stackoverflow.com/questions/24987932/installing-rpy2-on-mac-osx-with-anaconda-python-3-4-and-r-3-1-installed-via-macp
conda install -n _build -y --no-update-deps patchelf
conda skeleton pypi rpy2 --version 2.8.0
conda build rpy2
conda install -n pydata_exp3 -y --use-local rpy2
rm -r rpy2
conda install -c anaconda-nb-extensions -n pydata_exp3 -y \
'nbpresent=2.*' \
'nbbrowserpdf'
source activate pydata_exp3
pip install -U pip==8.1.*
pip install -U --no-deps geopy descartes mplleaflet brewer2mpl
pip install -U --no-deps git+https://github.com/quantopian/[email protected]
pip install -U --no-deps git+https://github.com/pysal/pysal.git@dev
pip install --process-dependency-links git+https://github.com/pymc-devs/pymc3
pip install -U --no-deps git+git://github.com/geopandas/geopandas.git@master
rm -f pydata_test.html
jupyter nbconvert --to notebook --nbformat 3 pydata_check.ipynb
jupyter nbconvert --to notebook --execute --allow-errors pydata_check.v3.ipynb --output pydata_test.ipynb
jupyter nbconvert --to html pydata_test.ipynb
rm pydata_test.ipynb pydata_check.v3.ipynb
|
darribas/envs
|
pydata_exp3.sh
|
Shell
|
bsd-2-clause
| 1,882 |
#!/bin/bash
rm *.so
python setup.py build_ext --inplace
export LD_LIBRARY_PATH="$HOME/usr/lib"
python main.py
|
teoliphant/numpy-refactor
|
experiment/do.sh
|
Shell
|
bsd-3-clause
| 114 |
#!/bin/sh
# https://docs.docker.com/engine/reference/builder/#exec-form-entrypoint-example
trap "echo TRAPed signal" HUP INT QUIT TERM
/4store/bin/4s-backend -D default &
backend_pid=$(pidof 4s-backend)
/4store/bin/4s-httpd -D default
echo "killing backend"
kill -TERM $backend_pid
echo "exited"
|
jaredjennings/docker-4store
|
backend-and-httpd.sh
|
Shell
|
bsd-3-clause
| 301 |
#!/bin/bash
# Raw motifs
curl --output jaspar.zip 'http://jaspar.genereg.net/download/CORE/JASPAR2020_CORE_vertebrates_non-redundant_pfms_jaspar.zip'
unzip jaspar.zip
rm jaspar.zip
cat *.jaspar | gzip -c > jaspar.gz
rm *.jaspar
# Clustered motifs
curl --output JASPAR_2020_matrix_clustering_vertebrates_cluster_root_motifs.tf http://folk.uio.no/jamondra/JASPAR_2020_clusters/vertebrates/interactive_trees/JASPAR_2020_matrix_clustering_vertebrates_cluster_root_motifs.tf
python ./convert.py -f JASPAR_2020_matrix_clustering_vertebrates_cluster_root_motifs.tf > jaspar.cluster
rm JASPAR_2020_matrix_clustering_vertebrates_cluster_root_motifs.tf
gzip jaspar.cluster
|
tobiasrausch/alfred
|
motif/downloadMotifs.sh
|
Shell
|
bsd-3-clause
| 666 |
#!/bin/bash
#
# before_install
#
cwd=$(pwd)
wget -q https://www.aps.anl.gov/epics/download/base/baseR3.14.12.6.tar.gz \
-O /tmp/base.tar.gz
tar xvf /tmp/base.tar.gz -C $HOME
cd $HOME/base-3.14.12.6
make -j2
cd ${cwd}
|
archman/phantasy
|
ci/build_epics.sh
|
Shell
|
bsd-3-clause
| 223 |
#! /bin/csh
foreach filename (`ls -d *.BHZ.sac`)
#echo $filename |cut -d'.' -f2 | read station
#echo $filename |cut -d'.' -f3 | read component
set station = `echo $filename | awk '{split($0,a,"."); print a[1]}'`
echo $station
sac << sacend
read $station.BHE.sac
rmean
transfer from polezero s ../../zp/${station}.BHE.zp
mul 100
write tmpe
read $station.BHN.sac
rmean
transfer from polezero s ../../zp/${station}.BHN.zp
mul 100
write tmpn
read $station.BHZ.sac
rmean
transfer from polezero s ../../zp/${station}.BHZ.zp
mul 100
write tmpz
read tmpe tmpn
rot to gcp
read more tmpz
int
bp co 0.02 0.05 p 2
interpolate delta 1.0
write tmp2 tmp1 tmp3
quit
sacend
sac2helm out={$station}.data
end
|
qingkaikong/useful_script
|
Shell/01_use_SAC_in_shell.sh
|
Shell
|
bsd-3-clause
| 697 |
#!/bin/bash
# Created by the VLSCI job script generator for SLURM on x86
# Thu Oct 17 2013 16:48:06 GMT+1100 (EST)
# Partition for the job:
#SBATCH -p main
# The name of the job:
#SBATCH --job-name="illum"
# Maximum number of CPU cores used by the job:
#SBATCH --ntasks=1
# The amount of memory in megabytes per process in the job:
#SBATCH --mem-per-cpu=1024
# Send yourself an email when the job:
# aborts abnormally (fails)
#SBATCH --mail-type=FAIL
# Use this email address:
#SBATCH [email protected]
# The maximum running time of the job in days-hours:mins:sec
#SBATCH --time=0-12:0:00
# Run the job from the directory where it was launched (default):
# The job command(s):
export PATH=${HOME}/anaconda/envs/husc/bin:$PATH
husc illum $@
|
jni/vlsci-jobs
|
husc-illum.sh
|
Shell
|
bsd-3-clause
| 765 |
#!/usr/bin/env bash
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
set -ex
#dpkg --list
sudo add-apt-repository ppa:ubuntu-toolchain-r/test
time sudo apt-get update
time sudo apt-get -q install -y \
git \
cmake \
ninja-build \
g++ \
ccache \
libboost-dev libboost-thread-dev \
libboost-filesystem-dev libboost-regex-dev \
libtiff-dev \
libilmbase-dev libopenexr-dev \
python-dev python-numpy \
libgif-dev \
libpng-dev \
flex bison libbison-dev \
opencolorio-tools \
libsquish-dev \
libpugixml-dev \
qt5-default
export CMAKE_PREFIX_PATH=/usr/lib/x86_64-linux-gnu:$CMAKE_PREFIX_PATH
if [[ "$CXX" == "g++-4.8" ]] ; then
time sudo apt-get install -y g++-4.8
elif [[ "$CXX" == "g++-6" ]] ; then
time sudo apt-get install -y g++-6
elif [[ "$CXX" == "g++-7" ]] ; then
time sudo apt-get install -y g++-7
elif [[ "$CXX" == "g++-8" ]] ; then
time sudo apt-get install -y g++-8
elif [[ "$CXX" == "g++-9" ]] ; then
time sudo apt-get install -y g++-9
elif [[ "$CXX" == "g++-10" ]] ; then
time sudo apt-get install -y g++-10
fi
# time sudo apt-get install -y clang
# time sudo apt-get install -y llvm
#time sudo apt-get install -y libopenjpeg-dev
#time sudo apt-get install -y libjpeg-turbo8-dev
#dpkg --list
# Build or download LLVM
source src/build-scripts/build_llvm.bash
# Build pybind11
CXX="ccache $CXX" source src/build-scripts/build_pybind11.bash
source src/build-scripts/build_pugixml.bash
if [[ "$OPENEXR_VERSION" != "" ]] ; then
CXX="ccache $CXX" source src/build-scripts/build_openexr.bash
fi
if [[ "$OPENCOLORIO_VERSION" != "" ]] ; then
# Temporary (?) fix: GH ninja having problems, fall back to make
CMAKE_GENERATOR="Unix Makefiles" \
source src/build-scripts/build_opencolorio.bash
fi
if [[ "$OPENIMAGEIO_VERSION" != "" ]] ; then
# There are many parts of OIIO we don't need to build
export ENABLE_iinfo=0 ENABLE_iv=0 ENABLE_igrep=0
export ENABLE_iconvert=0 ENABLE_testtex=0
export ENABLE_BMP=0 ENABLE_cineon=0 ENABLE_DDS=0 ENABLE_DPX=0 ENABLE_FITS=0
export ENABLE_ICO=0 ENABLE_iff=0 ENABLE_jpeg2000=0 ENABLE_PNM=0 ENABLE_PSD=0
export ENABLE_RLA=0 ENABLE_SGI=0 ENABLE_SOCKET=0 ENABLE_SOFTIMAGE=0
export ENABLE_TARGA=0 ENABLE_WEBP=0
export OPENIMAGEIO_MAKEFLAGS="OIIO_BUILD_TESTS=0 USE_OPENGL=0"
source src/build-scripts/build_openimageio.bash
fi
|
imageworks/OpenShadingLanguage
|
src/build-scripts/gh-installdeps.bash
|
Shell
|
bsd-3-clause
| 2,533 |
#
# NON-COARRAY
#
export SWALS_SRC='../../../src'
source ${SWALS_SRC}/test_run_commands
# Clean existing binary
rm ./model
rm -r ./OUTPUTS
# Build the code
make -B -f make_model > build_outfile.log
# Run the code
eval "$OMP_RUN_COMMAND ./model '' > outfile.log"
# Plot and report tests
Rscript plot_results.R
|
GeoscienceAustralia/ptha
|
propagation/SWALS/examples/nthmp/Submerged_Island_Lab/run_model.sh
|
Shell
|
bsd-3-clause
| 311 |
#!/usr/bin/env bash
# Install composer dependencies
composer install
mysql -e 'CREATE DATABASE IF NOT EXISTS test;'
cp tests/app/config/db.mysql.php.dist tests/app/config/db.php
php tests/app/yii migrate --interactive=0
php tests/app/yii fixture/load * --interactive=0
|
yiisolutions/yii2-user-module
|
ci-scripts/before_script.sh
|
Shell
|
bsd-3-clause
| 273 |
###########
# How to use docker.sh
# $ export AUTO_TEST=true # false will prompt you to a shell inside the container
# $ export DOCKER_IMAGE="hadim/py3-env" # or hadim/py2-env
# $ export BRANCH="master" # branch to build
###########
# If AUTO_TEST=true then scikit-tracker will be automatic
# If AUTO_TEST=false you will be prompted to a shell inside the container
AUTO_TEST=${AUTO_TEST:-true}
BRANCH=${BRANCH:-master}
DOCKER_IMAGE=${DOCKER_IMAGE:-hadim/py3-env}
DOCKER_CONTAINER="py-env"
TMP_BUILDER="/tmp/docker_builder_script"
CURRENT_DIR=$(pwd)
mkdir -p $TMP_BUILDER
builder="export PATH=/miniconda/bin:$PATH;
export BRANCH="$BRANCH";
cd /
git clone https://github.com/bnoi/scikit-tracker.git;
cd scikit-tracker/;
git checkout $BRANCH;
make init;
python setup.py build_ext --inplace;
python setup.py install;
python setup.py bdist_wheel;
nosetests sktracker --with-coverage --cover-package=sktracker -v;
make doc;"
printf "$builder" > $TMP_BUILDER/builder.sh
docker pull $DOCKER_IMAGE
docker rm -vf $DOCKER_CONTAINER 2> /dev/null
if [ "$AUTO_TEST" = true ] ; then
docker run --name $DOCKER_CONTAINER -v $TMP_BUILDER:/builder:ro $DOCKER_IMAGE sh /builder/builder.sh
fi
echo "You are now prompted to an interactive shell inside a container."
echo "You can launch ./sh/builder/builder.sh to build scikit-tracker"
docker run -i -t --name $DOCKER_CONTAINER -v $TMP_BUILDER:/builder:ro $DOCKER_IMAGE /bin/bash
|
bnoi/scikit-tracker
|
docker.sh
|
Shell
|
bsd-3-clause
| 1,442 |
#!/bin/sh
#
# Script to write BTOR from Verilog design
#
if [ "$#" -ne 3 ]; then
echo "Usage: $0 input.v output.btor top-module-name" >&2
exit 1
fi
if ! [ -e "$1" ]; then
echo "$1 not found" >&2
exit 1
fi
FULL_PATH=$(readlink -f $1)
DIR=$(dirname $FULL_PATH)
./yosys -q -p "
read_verilog -sv $1;
hierarchy -top $3;
hierarchy -libdir $DIR;
hierarchy -check;
proc;
opt; opt_expr -mux_undef; opt;
rename -hide;;;
#techmap -map +/pmux2mux.v;;
splice; opt;
memory_dff -wr_only;
memory_collect;;
flatten;;
memory_unpack;
splitnets -driver;
setundef -zero -undriven;
opt;;;
write_btor $2;"
|
swallat/yosys
|
backends/btor/verilog2btor.sh
|
Shell
|
isc
| 596 |
#!/bin/bash
FN="PCHiCdata_1.12.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.9/data/experiment/src/contrib/PCHiCdata_1.12.0.tar.gz"
"https://bioarchive.galaxyproject.org/PCHiCdata_1.12.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pchicdata/bioconductor-pchicdata_1.12.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pchicdata/bioconductor-pchicdata_1.12.0_src_all.tar.gz"
)
MD5="18936bf2c0bf4b12b969b6bf8141773a"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
jfallmann/bioconda-recipes
|
recipes/bioconductor-pchicdata/post-link.sh
|
Shell
|
mit
| 1,409 |
#!/usr/bin/env bash
protoc --go_out=plugins=grpc:. keyvalue/keyvalue.proto
|
agtorre/go-cookbook
|
chapter7/grpcjson/grpc.sh
|
Shell
|
mit
| 75 |
#!/usr/bin/env bash
#
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
cd "build/pivx-$HOST" || (echo "could not enter distdir build/pivx-$HOST"; exit 1)
if [ "$RUN_UNIT_TESTS" = "true" ]; then
BEGIN_FOLD unit-tests
DOCKER_EXEC LD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/depends/$HOST/lib make $MAKEJOBS check VERBOSE=1
END_FOLD
fi
if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then
BEGIN_FOLD functional-tests
DOCKER_EXEC test/functional/test_runner.py --combinedlogslen=4000 ${TEST_RUNNER_EXTRA}
END_FOLD
fi
cd ${TRAVIS_BUILD_DIR} || (echo "could not enter travis build dir $TRAVIS_BUILD_DIR"; exit 1)
|
Mrs-X/Darknet
|
.travis/test_06_script_b.sh
|
Shell
|
mit
| 766 |
#!/bin/bash
export BEAVER_MODE=connect
## Individual files to send
#export BEAVER_FILES=/var/log/syslog
## Send all files under path
#export BEAVER_PATH=/var/log
## Redis
#export BEAVER_TRANSPORT=redis
#export REDIS_NAMESPACE='logstash'
#export REDIS_URL='redis://redis:6379/0'
## ZeroMQ
#export ZEROMQ_ADDRESS='tcp://indexer:5556'
## RabbitMQ
#export RABBITMQ_HOST=rabbit
#export RABBITMQ_PORT=5672
#export RABBITMQ_VHOST='/'
#export RABBITMQ_USERNAME='guest'
#export RABBITMQ_PASSWORD='guest'
#export RABBITMQ_QUEUE='logstash-queue'
#export RABBITMQ_KEY='logstash-key'
#export RABBITMQ_EXCHANGE='logstash-exchange'
AFTER_CRASH_WAIT=20
{
while true
do
/usr/local/bin/beaver
## If you would prefer to use a config file, use this line instead
#exec /usr/local/bin/beaver -c /etc/beaver.conf
echo "$0: Waiting for $AFTER_CRASH_WAIT seconds before retrying."
sleep $AFTER_CRASH_WAIT
done
} > /var/log/beaver.log 2>&1
|
Appdynamics/beaver
|
contrib/beaver-bash.sh
|
Shell
|
mit
| 929 |
brew install the_silver_searcher
|
redhotvengeance/dotfiles
|
ag/install.sh
|
Shell
|
mit
| 33 |
#!/usr/bin/env bash
echo "Installing System Tools..."
echo " Update package..."
sudo apt-get update -y >/dev/null 2>&1
echo " Upgrade package..."
sudo apt-get upgrade -y >/dev/null 2>&1
echo " Install curl..."
sudo apt-get install -y curl >/dev/null 2>&1
echo " Install unzip..."
sudo apt-get install -y unzip >/dev/null 2>&1
echo " Install udftools..."
sudo apt-get install -y udftools
echo " Install libc6-i386 lib32stdc++6 lib32gcc1 lib32ncurses5 lib32z1..."
sudo apt-get install -y libc6-i386 lib32stdc++6 lib32gcc1 lib32ncurses5 lib32z1 >/dev/null 2>&1
echo " Install apt-file..."
sudo apt-get update -y >/dev/null 2>&1
sudo apt-get install -y apt-file && apt-file update
echo " Install python-software-properties..."
sudo apt-get install -y python-software-properties >/dev/null 2>&1
echo " Install french langue"
sudo apt-get install -y language-pack-fr language-pack-fr-base language-pack-gnome-fr language-pack-gnome-fr-base >/dev/null 2>&1
sudo apt-get install -y language-support-fr >/dev/null 2>&1
# http://askubuntu.com/questions/147400/problems-with-eclipse-and-android-sdk
echo " Install ia32-libs..."
sudo apt-get install -y ia32-libs >/dev/null 2>&1
# Install a desktop for the Android graphical tooling, e.g. Eclipse
#echo "What is your preferred Ubuntu desktop?"
#echo "1) Unity desktop (Ubuntu default)"
#echo "2) Gnome desktop"
#echo "3) Gnome Classic desktop"
#echo "4) xfce (lightweight desktop)"
#echo "5) KDE desktop"
#echo "6) Do not install a desktop (use the command line interface only)"
#read case;
#case $case in
# 1) echo "Installing Unity desktop..." | sudo aptitude install -y --without-recommends ubuntu-desktop >/dev/null 2>&1;;
# 2) echo "Installing Gnome desktop..." | sudo apt-get install -y ubuntu-desktop >/dev/null 2>&1;;
# 3) echo "Installing Gnome Classic desktop..." | sudo apt-get install -y gnome-panel >/dev/null 2>&1;;
# 4) echo "Installing xfce lightweight desktop..." | sudo apt-get install -y xubuntu-desktop >/dev/null 2>&1;;
# 5) echo "Installing KDE desktop..." | sudo apt-get install -y kubuntu-desktop >/dev/null 2>&1;;
# 6) exit
#esac
echo "Installing Ubuntu Unity Desktop..."
sudo aptitude install -y --without-recommends ubuntu-desktop >/dev/null 2>&1
# Or, the following desktop...
#echo "Installing Ubuntu Gnome Desktop..."
#sudo apt-get install -y ubuntu-desktop >/dev/null 2>&1
# Or, the following desktop...
#echo "Installing Ubuntu xfce lightweight desktop..."
#sudo apt-get install -y xubuntu-desktop >/dev/null 2>&1
# Or, the following desktop...
#echo "Installing Ubuntu KDE Desktop..."
#sudo apt-get install -y kubuntu-desktop >/dev/null 2>&1
echo "Installing Android ADT Bundle with SDK and Eclipse..."
cd /tmp
sudo curl -O https://dl.google.com/android/adt/adt-bundle-linux-x86_64-20140702.zip
sudo unzip /tmp/adt-bundle-linux-x86_64-20140702.zip >/dev/null 2>&1
sudo mv /tmp/adt-bundle-linux-x86_64-20140702 /usr/local/android/
sudo rm -rf /tmp/adt-bundle-linux-x86_64-20140702.zip
#echo "Installing Android NDK..."
#cd /tmp
#sudo curl -O http://dl.google.com/android/ndk/android-ndk-r9-linux-x86_64.tar.bz2
#sudo tar -jxf /tmp/android-ndk-r9-linux-x86_64.tar.bz2 >/dev/null 2>&1
#sudo mv /tmp/android-ndk-r9 /usr/local/android/ndk
#sudo rm -rf /tmp/android-ndk-r9-linux-x86_64.tar.bz2
sudo mkdir /usr/local/android/sdk/add-ons
sudo chmod -R 755 /usr/local/android
sudo ln -s /usr/local/android/sdk/tools/android /usr/bin/android
sudo ln -s /usr/local/android/sdk/platform-tools/adb /usr/bin/adb
echo "Updating ANDROID_HOME..."
cd ~/
cat << End >> .profile
export ANDROID_HOME="/usr/local/android/sdk"
export PATH=$ANDROID_HOME/tools:$ANDROID_HOME/platform-tools:$PATH
End
echo "Updating and installing android SDK"
android list sdk -a
printf 'y\n' | sudo android update sdk --no-ui -t tools,platform-tools,doc-19,android-19,source-19
echo "Adding USB device driver information..."
echo "For more detail see http://developer.android.com/tools/device.html"
sudo cp /vagrant/51-android.rules /etc/udev/rules.d
sudo chmod a+r /etc/udev/rules.d/51-android.rules
sudo service udev restart
sudo android update adb
sudo adb kill-server
sudo adb start-server
echo "Installing QEO SDK"
cd /tmp
sudo curl -L "https://dl.bintray.com/elendrim/generic/qeo-sdk-1.1.0-20150529.113620-83.zip" -o qeo-sdk-1.1.0-20150529.113620-83.zip
sudo unzip /tmp/qeo-sdk-1.1.0-20150529.113620-83.zip >/dev/null 2>&1
sudo mv /tmp/QeoSDK-1.1.0 /usr/local/QeoSDK/
sudo rm -rf /tmp/QeoSDK-1.1.0
echo " "
echo " "
echo " "
echo "[ Next Steps ]================================================================"
echo " "
echo "1. Manually setup a USB connection for your Android device to the new VM"
echo " "
echo " If using VMware Fusion (for example, will be similar for VirtualBox):"
echo " 1. Plug your android device hardware into the computers USB port"
echo " 2. Open the 'Virtual Machine Library'"
echo " 3. Select the VM, e.g. 'android-vm: default', right-click and choose"
echo " 'Settings...'"
echo " 4. Select 'USB & Bluetooth', check the box next to your device and set"
echo " the 'Plug In Action' to 'Connect to Linux'"
echo " 5. Plug the device into the USB port and verify that it appears when "
echo " you run 'lsusb' from the command line"
echo " "
echo "2. Your device should appear when running 'lsusb' enabling you to use adb, e.g."
echo " "
echo " $ adb devices"
echo " ex. output,"
echo " List of devices attached"
echo " 007jbmi6 device"
echo " "
echo " $ adb shell"
echo " i.e. to log into the device (be sure to enable USB debugging on the device)"
echo " "
echo "See the included README.md for more detail on how to run and work with this VM."
echo " "
echo "[ Start your Ubuntu VM ]======================================================"
echo " "
echo "To start the VM, "
echo " To use with VirtualBox (free),"
echo " "
echo " $ vagrant up"
echo " "
echo " To use with VMware Fusion (OS X) (requires paid plug-in),"
echo " "
echo " $ vagrant up --provider=vmware_fusion"
echo " "
echo " To use VMware Workstation (Windows, Linux) (requires paid plug-in),"
echo " "
echo " $ vagrant up --provider=vmware_workstation"
echo " "
echo " "
echo "See the included README.md for more detail on how to run and work with this VM."
|
elendrim/qeo-vm
|
provision.sh
|
Shell
|
mit
| 6,348 |
#!/bin/bash
FN="pd.aragene.1.0.st_3.12.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.11/data/annotation/src/contrib/pd.aragene.1.0.st_3.12.0.tar.gz"
"https://bioarchive.galaxyproject.org/pd.aragene.1.0.st_3.12.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.aragene.1.0.st/bioconductor-pd.aragene.1.0.st_3.12.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-pd.aragene.1.0.st/bioconductor-pd.aragene.1.0.st_3.12.0_src_all.tar.gz"
)
MD5="ff87a0793fd4b713c4a45b6c1d4a4977"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
roryk/recipes
|
recipes/bioconductor-pd.aragene.1.0.st/post-link.sh
|
Shell
|
mit
| 1,466 |
#!/bin/sh
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
set -e
ROOTDIR=dist
BUNDLE="${ROOTDIR}/Goldcoin-Qt.app"
CODESIGN=codesign
TEMPDIR=sign.temp
TEMPLIST=${TEMPDIR}/signatures.txt
OUT=signature.tar.gz
OUTROOT=osx
if [ ! -n "$1" ]; then
echo "usage: $0 <codesign args>"
echo "example: $0 -s MyIdentity"
exit 1
fi
rm -rf ${TEMPDIR} ${TEMPLIST}
mkdir -p ${TEMPDIR}
${CODESIGN} -f --file-list ${TEMPLIST} "$@" "${BUNDLE}"
grep -v CodeResources < "${TEMPLIST}" | while read i; do
TARGETFILE="${BUNDLE}/`echo "${i}" | sed "s|.*${BUNDLE}/||"`"
SIZE=`pagestuff "$i" -p | tail -2 | grep size | sed 's/[^0-9]*//g'`
OFFSET=`pagestuff "$i" -p | tail -2 | grep offset | sed 's/[^0-9]*//g'`
SIGNFILE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}.sign"
DIRNAME="`dirname "${SIGNFILE}"`"
mkdir -p "${DIRNAME}"
echo "Adding detached signature for: ${TARGETFILE}. Size: ${SIZE}. Offset: ${OFFSET}"
dd if="$i" of="${SIGNFILE}" bs=1 skip=${OFFSET} count=${SIZE} 2>/dev/null
done
grep CodeResources < "${TEMPLIST}" | while read i; do
TARGETFILE="${BUNDLE}/`echo "${i}" | sed "s|.*${BUNDLE}/||"`"
RESOURCE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}"
DIRNAME="`dirname "${RESOURCE}"`"
mkdir -p "${DIRNAME}"
echo "Adding resource for: "${TARGETFILE}""
cp "${i}" "${RESOURCE}"
done
rm ${TEMPLIST}
tar -C "${TEMPDIR}" -czf "${OUT}" .
rm -rf "${TEMPDIR}"
echo "Created ${OUT}"
|
goldcoin/Goldcoin-GLD
|
contrib/macdeploy/detached-sig-create.sh
|
Shell
|
mit
| 1,526 |
#sudo apt-get update
sudo apt-get install nodejs octave octave-audio octave-control octave-general octave-signal sox vorbis-tools mplayer qarecord ffmpeg
mkdir data tmp
mkdir data/classifiers data/database data/dataset data/processed_records data/stream data/training
cp Nothing/* data/classifiers
cp Nothing/Nothing.dat data/dataset
coffee --compile --output dist src
|
ResEnv/TidmarShazam
|
install.sh
|
Shell
|
mit
| 373 |
#!/bin/bash
set -eo pipefail
# if command starts with an option, prepend mysqld
if [ "${1:0:1}" = '-' ]; then
set -- mysqld "$@"
fi
if [ "$1" = 'mysqld' ]; then
# Get config
DATADIR="$("$@" --verbose --help --log-bin-index=`mktemp -u` 2>/dev/null | awk '$1 == "datadir" { print $2; exit }')"
if [ ! -d "$DATADIR/mysql" ]; then
if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" -a -z "$MYSQL_RANDOM_ROOT_PASSWORD" ]; then
echo >&2 'error: database is uninitialized and password option is not specified '
echo >&2 ' You need to specify one of MYSQL_ROOT_PASSWORD, MYSQL_ALLOW_EMPTY_PASSWORD and MYSQL_RANDOM_ROOT_PASSWORD'
exit 1
fi
mkdir -p "$DATADIR"
chown -R mysql:mysql "$DATADIR"
echo 'Initializing database'
mysql_install_db --user=mysql --datadir="$DATADIR" --rpm
echo 'Database initialized'
"$@" --skip-networking &
pid="$!"
mysql=( mysql --protocol=socket -uroot )
for i in {30..0}; do
if echo 'SELECT 1' | "${mysql[@]}" &> /dev/null; then
break
fi
echo 'MySQL init process in progress...'
sleep 1
done
if [ "$i" = 0 ]; then
echo >&2 'MySQL init process failed.'
exit 1
fi
if [ -z "$MYSQL_INITDB_SKIP_TZINFO" ]; then
# sed is for https://bugs.mysql.com/bug.php?id=20545
mysql_tzinfo_to_sql /usr/share/zoneinfo | sed 's/Local time zone must be set--see zic manual page/FCTY/' | "${mysql[@]}" mysql
fi
if [ ! -z "$MYSQL_RANDOM_ROOT_PASSWORD" ]; then
MYSQL_ROOT_PASSWORD="$(pwgen -1 32)"
echo "GENERATED ROOT PASSWORD: $MYSQL_ROOT_PASSWORD"
fi
"${mysql[@]}" <<-EOSQL
-- What's done in this file shouldn't be replicated
-- or products like mysql-fabric won't work
SET @@SESSION.SQL_LOG_BIN=0;
DELETE FROM mysql.user ;
CREATE USER 'root'@'%' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ;
GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION ;
DROP DATABASE IF EXISTS test ;
FLUSH PRIVILEGES ;
EOSQL
if [ ! -z "$MYSQL_ROOT_PASSWORD" ]; then
mysql+=( -p"${MYSQL_ROOT_PASSWORD}" )
fi
if [ "$MYSQL_DATABASE" ]; then
echo "CREATE DATABASE IF NOT EXISTS \`$MYSQL_DATABASE\` ;" | "${mysql[@]}"
mysql+=( "$MYSQL_DATABASE" )
fi
if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then
echo "CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;" | "${mysql[@]}"
if [ "$MYSQL_DATABASE" ]; then
echo "GRANT ALL ON \`$MYSQL_DATABASE\`.* TO '$MYSQL_USER'@'%' ;" | "${mysql[@]}"
fi
echo 'FLUSH PRIVILEGES ;' | "${mysql[@]}"
fi
echo
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
*.sql) echo "$0: running $f"; "${mysql[@]}" < "$f"; echo ;;
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${mysql[@]}"; echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
if ! kill -s TERM "$pid" || ! wait "$pid"; then
echo >&2 'MySQL init process failed.'
exit 1
fi
echo
echo 'MySQL init process done. Ready for start up.'
echo
fi
chown -R mysql:mysql "$DATADIR"
fi
exec "$@"
|
dprasanthv/DockerFiles
|
mariadb/docker-entrypoint.sh
|
Shell
|
mit
| 3,041 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2010-2017 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Doing touch ok.o; rm -f ok.o ok.c; tup upd shouldn't keep ok.o in the dag
. ./tup.sh
touch ok.c
cat > Tupfile << HERE
: foreach *.c |> gcc -c %f -o %o |> %B.o
HERE
tup touch ok.c Tupfile
update
sleep 1
touch ok.o
tup touch ok.o
rm ok.c ok.o
update
check_not_exist ok.o
tup_object_no_exist . ok.o
eotup
|
fasterthanlime/tup-fuseless
|
test/t5071-touch-rm-output.sh
|
Shell
|
gpl-2.0
| 1,057 |
#!/bin/sh
TEST_PURPOSE=regress
TEST_PROB_REPORT=0
TEST_TYPE=umlplutotest
TESTNAME=ikev2-06-6msg
EASTHOST=east
WESTHOST=west
WEST_ARPREPLY=1
EAST_INPUT=../../klips/inputs/01-sunrise-sunset-ping.pcap
REF_WEST_OUTPUT=../../klips/west-icmp-01/spi1-cleartext.txt
REF_WEST_FILTER="no-arp-pcap2.pl"
WEST_ARPREPLY=true
#THREEEIGHT=true
REF_EAST_CONSOLE_OUTPUT=east-console.txt
REF26_EAST_CONSOLE_OUTPUT=east-console.txt
REF_WEST_CONSOLE_OUTPUT=west-console.txt
REF26_WEST_CONSOLE_OUTPUT=west-console.txt
REF_CONSOLE_FIXUPS="kern-list-fixups.sed nocr.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS east-prompt-splitline.pl"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS script-only.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS cutout.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS wilog.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS klips-debug-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS ipsec-setup-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS pluto-whack-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS host-ping-sanitize.sed"
REF_CONSOLE_FIXUPS="$REF_CONSOLE_FIXUPS ipsec-look-esp-sanitize.pl"
EAST_INIT_SCRIPT=eastinit.sh
WEST_INIT_SCRIPT=westinit.sh
WEST_RUN_SCRIPT=westrun.sh
EAST_FINAL_SCRIPT=final.sh
WEST_FINAL_SCRIPT=final.sh
|
qianguozheng/Openswan
|
testing/pluto/ikev2-06-6msg/testparams.sh
|
Shell
|
gpl-2.0
| 1,255 |
#!/bin/sh
set -exu
zypper --non-interactive install lttng-modules-kmp-default lttng-tools babeltrace
|
PSRCode/lttng-ci-1
|
scripts/packaging/sles12/install-lttng-packages.sh
|
Shell
|
gpl-2.0
| 104 |
#!/bin/sh
# Copyright (C) 2013 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
SKIP_WITH_LVMPOLLD=1
export LVM_TEST_THIN_REPAIR_CMD=${LVM_TEST_THIN_REPAIR_CMD-/bin/false}
. lib/inittest
aux have_thin 1 0 0 || skip
aux prepare_pvs 3
vgcreate -s 128k $vg "$dev1" "$dev2"
vgcreate -s 128k $vg2 "$dev3"
lvcreate -V10M -L10M -T $vg/pool -n $lv1
lvcreate -L10M -n $lv2 $vg
lvchange -an $vg/$lv1
# Test activation
lvchange -aly $vg/$lv1
check active $vg $lv1
lvchange -aln $vg/$lv1
check inactive $vg $lv1
# Test for allowable changes
#
# contiguous_ARG
lvchange -C y $vg/$lv1
lvchange -C n $vg/$lv1
# permission_ARG
lvchange -p r $vg/$lv1
lvchange -p rw $vg/$lv1
# FIXME
#should lvchange -p r $vg/pool
#should lvchange -p rw $vg/pool
# readahead_ARG
lvchange -r none $vg/$lv1
lvchange -r auto $vg/$lv1
# FIXME
# Think about more support
# minor_ARG
lvchange --yes -M y --minor 234 --major 253 $vg/$lv1
lvchange -M n $vg/$lv1
# cannot change major minor for pools
not lvchange --yes -M y --minor 235 --major 253 $vg/pool
not lvchange -M n $vg/pool
# addtag_ARG
lvchange --addtag foo $vg/$lv1
lvchange --addtag foo $vg/pool
# deltag_ARG
lvchange --deltag foo $vg/$lv1
lvchange --deltag foo $vg/pool
# discards_ARG
lvchange --discards nopassdown $vg/pool
lvchange --discards passdown $vg/pool
# zero_ARG
lvchange --zero n $vg/pool
lvchange --zero y $vg/pool
#
# Test for disallowed metadata changes
#
# resync_ARG
not lvchange --resync $vg/$lv1
# alloc_ARG
#not lvchange --alloc anywhere $vg/$lv1
# discards_ARG
not lvchange --discards ignore $vg/$lv1
# zero_ARG
not lvchange --zero y $vg/$lv1
#
# Ensure that allowed args don't cause disallowed args to get through
#
not lvchange --resync -ay $vg/$lv1
not lvchange --resync --addtag foo $vg/$lv1
#
# Play with tags and activation
#
TAG=$(uname -n)
aux lvmconf "activation/volume_list = [ \"$vg/$lv2\", \"@mytag\" ]"
lvchange -ay $vg/$lv1
check inactive $vg $lv1
lvchange --addtag mytag $vg/$lv1
lvchange -ay @mytag_fake
check inactive $vg $lv1
lvchange -ay $vg/$lv1
# Volume has matching tag
check active $vg $lv1
lvchange -an $vg/$lv1
lvchange -ay @mytag
check active $vg $lv1
# Fails here since it cannot clear device header
not lvcreate -Zy -L10 -n $lv3 $vg2
# OK when zeroing is disabled
lvcreate -Zn -L10 -n $lv3 $vg2
check inactive $vg2 $lv3
aux lvmconf "activation/volume_list = [ \"$vg2\" ]"
vgchange -an $vg
vgchange -ay $vg $vg2
lvs -a -o+lv_active $vg $vg2
aux lvmconf "activation/volume_list = [ \"$vg\", \"$vg2\" ]"
vgremove -ff $vg $vg2
|
shehbazj/DyRe
|
test/shell/lvchange-thin.sh
|
Shell
|
gpl-2.0
| 2,911 |
#!/bin/bash
if [ $# -eq 2 ]
then
if [ $1 = "rootpath" ]
then
find $2 -name "halconf.h" -exec bash update_halconf.sh "{}" \;
else
echo "Usage: update_halconf.sh [rootpath <path>]"
fi
elif [ $# -eq 1 ]
then
declare conffile=$(<$1)
# if egrep -q "" <<< "$conffile"
# then
echo Processing: $1
egrep -e "\#define\s+[a-zA-Z0-9_]*\s+[a-zA-Z0-9_]" <<< "$conffile" | sed 's/\#define //g; s/ */=/g' > ./values.txt
if ! fmpp -q -C halconf.fmpp
then
echo
echo "aborted"
exit 1
fi
cp ./halconf.h $1
rm ./halconf.h ./values.txt
# fi
else
echo "Usage: update_halconf.sh [rootpath <root path>]"
echo " update_halconf.sh <configuration file>]"
fi
|
serdzz/ChibiOS
|
tools/updater/update_halconf.sh
|
Shell
|
gpl-3.0
| 708 |
# run.sh : code_swarm launching script
# need the config file as first parameter
params=$@
default_config="data/sample.config"
code_swarm_jar="dist/code_swarm.jar"
# command line parameters basic check
if [ $# = 0 ]; then
# asking user for a config file
echo "code_swarm project !"
echo -n "Specify a config file, or ENTER for default one [$default_config] : "
read config
if [ ${#config} = 0 ]; then
params=$default_config
else
params=$config
fi
else
if [ $1 == "-h" ] || [ $1 == "--help" ]; then
# if help needed, print it and exit
echo "usage: run.sh <configfile>"
echo ""
echo " data/sample.config is the default config file"
echo ""
exit
else
echo "code_swarm project !"
fi
fi
# checking for code_swarm java binaries
if [ ! -f $code_swarm_jar ]; then
echo "no code_swarm binaries !"
echo "needing to build it with 'ant' and 'javac' (java-sdk)"
echo ""
echo "auto-trying the ant command..."
if ant; then
echo ""
else
echo ""
echo "ERROR, please verify 'ant' and 'java-sdk' installation"
echo -n "press a key to exit"
read key
echo "bye"
exit
fi
fi
# running
#if java -Xmx1000m -classpath dist/code_swarm.jar:lib/core.jar:lib/xml.jar:lib/vecmath.jar:. code_swarm $params; then
#if java -Xmx1000m -classpath dist/code_swarm.jar:lib/gluegen-rt.jar:lib/jogl.jar:lib/jogl-natives-macosx-universal.jar:lib/core.jar:lib/opengl.jar:lib/xml.jar:lib/vecmath.jar:. code_swarm $params; then
if java -Xmx1000m -classpath dist/code_swarm.jar:lib/gluegen-rt.jar:lib/jogl.jar:lib/jogl-natives-macosx-universal.jar:lib/core.jar:lib/opengl.jar:lib/xml.jar:lib/vecmath.jar:. -Djava.library.path=lib/ code_swarm $params; then
# always on error due to no "exit buton" on rendering window
echo "bye"
# echo -n "error, press a key to exit"
# read key
else
echo "bye"
fi
|
artzub/code_swarm-gource-my-conf
|
tools/codeswarm/run.sh
|
Shell
|
gpl-3.0
| 1,970 |
#!/bin/sh
# requires resttest (https://github.com/lightblue-platform/pyresttest) and argparse module
if [ "x$1" == "x" ]; then
echo "Must specify base URL as first command line argument."
exit 1
fi
# Global:
INTERACTIVE="False"
LOGGING_LEVEL="debug"
if [ "x$2" != "x" ]; then
LOGGING_LEVEL=$2
fi
export ENTITY_NAME="nmalik-$(date +'%Y%m%d%H%M%S')";
# Metadata:
export ENTITY_VERSION_1="1.0.0";
export ENTITY_VERSION_2="2.0.0"
# CRUD:
export ENTITY_VERSION="${ENTITY_VERSION_2}"
echo "Running tests for new entity: $ENTITY_NAME"
python -c "import resttest; args=dict(); args['url']='$1'; args['test']='all.yaml'; args['log']='$LOGGING_LEVEL'; args['interactive']=$INTERACTIVE; args['print_bodies']=$INTERACTIVE; resttest.main(args)" 2>&1
#unset ENTITY_NAME
#unset ENTITY_VERSION_1
#unset ENTITY_VERSION_2
|
derek63/lightblue-tests
|
functional/all.sh
|
Shell
|
gpl-3.0
| 824 |
#!/bin/bash
set -e
SCRIPT=$(readlink -f "$0")
EXEPATH=$(dirname "$SCRIPT")
./dev_test.elf $*
|
joseluisquiroga/parallella-baby-steps
|
jlq-test-22ok/run.sh
|
Shell
|
gpl-3.0
| 96 |
#!/bin/sh
outputfile="profile.pstats"
path="profile_data/"
prefix="profile_"
extension=".pstats"
if [ -z $1 ]
then
filename="`date +%s`"
else
filename=$1
fi
save=$path$prefix$filename$extension
echo "Saved $outputfile to $save"
cp $outputfile $save
|
slipperyhank/pyphi
|
profiling/saveprofile.sh
|
Shell
|
gpl-3.0
| 258 |
docker exec -it --user=solr composepulsecrawler_crawl_state_solr_1 bin/solr create_core -c crawl_state
|
anjackson/wren
|
compose-pulse-crawler/create_solr_core.sh
|
Shell
|
agpl-3.0
| 103 |
#!/bin/sh
#
# Run this before configure
#
# This file blatantly ripped off from subversion.
#
# Note: this dependency on Perl is fine: only developers use autogen.sh
# and we can state that dev people need Perl on their machine
#
# TODO: make sure that ac_pkg_swig.m4 gets installed in m4 directory ...
rm -f autogen.err
automake --version | perl -ne 'if (/\(GNU automake\) (([0-9]+).([0-9]+))/) {print; if ($2 < 1 || ($2 == 1 && $3 < 4)) {exit 1;}}'
if [ $? -ne 0 ]; then
echo "Error: you need automake 1.4 or later. Please upgrade."
exit 1
fi
if test ! -d `aclocal --print-ac-dir 2>> autogen.err`; then
echo "Bad aclocal (automake) installation"
exit 1
fi
libtoolize --force --copy || {
echo "error: libtoolize failed"
exit 1
}
# Produce aclocal.m4, so autoconf gets the automake macros it needs
#
ACLOCAL_BINRELOC='-I ac-helpers'
echo "Creating aclocal.m4: aclocal $ACLOCAL_FLAGS $ACLOCAL_BINRELOC"
aclocal $ACLOCAL_FLAGS $ACLOCAL_BINRELOC 2>> autogen.err
# Produce all the `GNUmakefile.in's and create neat missing things
# like `install-sh', etc.
#
echo "automake --add-missing --copy --foreign"
automake --add-missing --copy --foreign 2>> autogen.err || {
echo ""
echo "* * * warning: possible errors while running automake - check autogen.err"
echo ""
}
# If there's a config.cache file, we may need to delete it.
# If we have an existing configure script, save a copy for comparison.
if [ -f config.cache ] && [ -f configure ]; then
cp configure configure.$$.tmp
fi
# Produce ./configure
#
echo "Creating configure..."
autoconf 2>> autogen.err || {
echo ""
echo "* * * warning: possible errors while running automake - check autogen.err"
echo ""
}
run_configure=true
for arg in $*; do
case $arg in
--no-configure)
run_configure=false
;;
*)
;;
esac
done
if $run_configure; then
mkdir -p build
cd build
../configure --enable-maintainer-mode "$@"
echo
echo "Now type 'make' to compile link-grammar."
else
echo
echo "Now run 'configure' and 'make' to compile link-grammar."
fi
|
lagleki/jorne
|
autogen.sh
|
Shell
|
lgpl-2.1
| 2,142 |
#!/bin/bash
export PYWIGNER=`pwd`
cd && git clone https://github.com/dynamiq-md/dynamiq_engine
cd dynamiq_engine && python setup.py install
cd && git clone https://github.com/dynamiq-md/dynamiq_samplers
cd dynamiq_samplers && python setup.py install
cd $PYWIGNER
$PYTHON setup.py install
|
dynamiq-md/pyWigner
|
devtools/conda-recipe/build.sh
|
Shell
|
lgpl-2.1
| 290 |
#!/usr/bin/env sh
## Test case header - START
# Get directory path of current test case
CWD="$(dirname $(readlink -f "$0"))"
# Get test case name
TEST_CASE="$( basename ${CWD} | tr '[:lower:]' '[:upper:]' )"
# Get ESCAPE command
ESCAPE="$( readlink -f ${CWD}/../../escape.py )"
if which time >> /dev/null; then
RUN_WITH_MEASUREMENT="$(which time) -v"
elif which bash >> /dev/null; then
RUN_WITH_MEASUREMENT=""
fi
# Print header
echo
echo "==============================================================================="
echo "== TEST $TEST_CASE =="
echo "==============================================================================="
echo
# Print test case description
cat ${CWD}/README.txt
echo
echo "=============================== START TEST CASE ==============================="
echo
## Test case header - END
# Define run command here
ESCAPE_CMD="${ESCAPE} --debug --test --quit --log ${CWD}/escape.log \
--config ${CWD}/test-config.yaml --service ${CWD}/consumer-sap-as-anchor-point-req.nffg"
# Invoke ESCAPE with test parameters
${RUN_WITH_MEASUREMENT} ${ESCAPE_CMD} $@
## Test case footer - START
echo
echo "===================================== END ====================================="
echo
## Test case footer - END
|
hsnlab/escape
|
test/case45/run.sh
|
Shell
|
apache-2.0
| 1,315 |
#!/bin/bash -eux
apt-get -y purge libx11-data xauth libxmuu1 libxcb1 libx11-6 libxext6
apt-get -y purge ppp pppconfig pppoeconf
apt-get -y purge popularity-contest
apt-get -y clean
rm -rf VBoxGuestAdditions_*.iso VBoxGuestAdditions_*.iso.?
|
maxf-/packer
|
scripts/ubuntu/cleanup.sh
|
Shell
|
apache-2.0
| 241 |
#!/bin/bash
# Author: yeho <lj2007331 AT gmail.com>
# BLOG: https://blog.linuxeye.com
#
# Notes: OneinStack for CentOS/RadHat 5+ Debian 6+ and Ubuntu 12+
#
# Project home page:
# http://oneinstack.com
# https://github.com/lj2007331/oneinstack
Install_MySQL-5-5()
{
cd $oneinstack_dir/src
src_url=http://cdn.mysql.com/Downloads/MySQL-5.5/mysql-$mysql_5_version.tar.gz && Download_src
id -u mysql >/dev/null 2>&1
[ $? -ne 0 ] && useradd -M -s /sbin/nologin mysql
mkdir -p $mysql_data_dir;chown mysql.mysql -R $mysql_data_dir
tar zxf mysql-$mysql_5_version.tar.gz
cd mysql-$mysql_5_version
if [ "$je_tc_malloc" == '1' ];then
EXE_LINKER="-DCMAKE_EXE_LINKER_FLAGS='-ljemalloc'"
elif [ "$je_tc_malloc" == '2' ];then
EXE_LINKER="-DCMAKE_EXE_LINKER_FLAGS='-ltcmalloc'"
fi
make clean
[ ! -d "$mysql_install_dir" ] && mkdir -p $mysql_install_dir
cmake . -DCMAKE_INSTALL_PREFIX=$mysql_install_dir \
-DMYSQL_DATADIR=$mysql_data_dir \
-DSYSCONFDIR=/etc \
-DWITH_INNOBASE_STORAGE_ENGINE=1 \
-DWITH_PARTITION_STORAGE_ENGINE=1 \
-DWITH_FEDERATED_STORAGE_ENGINE=1 \
-DWITH_BLACKHOLE_STORAGE_ENGINE=1 \
-DWITH_MYISAM_STORAGE_ENGINE=1 \
-DWITH_ARCHIVE_STORAGE_ENGINE=1 \
-DWITH_READLINE=1 \
-DENABLED_LOCAL_INFILE=1 \
-DENABLE_DTRACE=0 \
-DDEFAULT_CHARSET=utf8mb4 \
-DDEFAULT_COLLATION=utf8mb4_general_ci \
-DWITH_EMBEDDED_SERVER=1 \
$EXE_LINKER
make -j `grep processor /proc/cpuinfo | wc -l`
make install
if [ -d "$mysql_install_dir/support-files" ];then
echo "${CSUCCESS}MySQL install successfully! ${CEND}"
cd ..
rm -rf mysql-$mysql_6_version
else
rm -rf $mysql_install_dir
echo "${CFAILURE}MySQL install failed, Please contact the author! ${CEND}"
kill -9 $$
fi
/bin/cp $mysql_install_dir/support-files/mysql.server /etc/init.d/mysqld
chmod +x /etc/init.d/mysqld
OS_CentOS='chkconfig --add mysqld \n
chkconfig mysqld on'
OS_Debian_Ubuntu='update-rc.d mysqld defaults'
OS_command
cd ..
# my.cf
[ -d "/etc/mysql" ] && /bin/mv /etc/mysql{,_bk}
cat > /etc/my.cnf << EOF
[client]
port = 3306
socket = /tmp/mysql.sock
default-character-set = utf8mb4
[mysqld]
port = 3306
socket = /tmp/mysql.sock
basedir = $mysql_install_dir
datadir = $mysql_data_dir
pid-file = $mysql_data_dir/mysql.pid
user = mysql
bind-address = 0.0.0.0
server-id = 1
init-connect = 'SET NAMES utf8mb4'
character-set-server = utf8mb4
skip-name-resolve
#skip-networking
back_log = 300
max_connections = 1000
max_connect_errors = 6000
open_files_limit = 65535
table_open_cache = 128
max_allowed_packet = 4M
binlog_cache_size = 1M
max_heap_table_size = 8M
tmp_table_size = 16M
read_buffer_size = 2M
read_rnd_buffer_size = 8M
sort_buffer_size = 8M
join_buffer_size = 8M
key_buffer_size = 4M
thread_cache_size = 8
query_cache_type = 1
query_cache_size = 8M
query_cache_limit = 2M
ft_min_word_len = 4
log_bin = mysql-bin
binlog_format = mixed
expire_logs_days = 30
log_error = $mysql_data_dir/mysql-error.log
slow_query_log = 1
long_query_time = 1
slow_query_log_file = $mysql_data_dir/mysql-slow.log
performance_schema = 0
#lower_case_table_names = 1
skip-external-locking
default_storage_engine = InnoDB
#default-storage-engine = MyISAM
innodb_file_per_table = 1
innodb_open_files = 500
innodb_buffer_pool_size = 64M
innodb_write_io_threads = 4
innodb_read_io_threads = 4
innodb_thread_concurrency = 0
innodb_purge_threads = 1
innodb_flush_log_at_trx_commit = 2
innodb_log_buffer_size = 2M
innodb_log_file_size = 32M
innodb_log_files_in_group = 3
innodb_max_dirty_pages_pct = 90
innodb_lock_wait_timeout = 120
bulk_insert_buffer_size = 8M
myisam_sort_buffer_size = 8M
myisam_max_sort_file_size = 10G
myisam_repair_threads = 1
interactive_timeout = 28800
wait_timeout = 28800
[mysqldump]
quick
max_allowed_packet = 16M
[myisamchk]
key_buffer_size = 8M
sort_buffer_size = 8M
read_buffer = 4M
write_buffer = 4M
EOF
if [ $Mem -gt 1500 -a $Mem -le 2500 ];then
sed -i 's@^thread_cache_size.*@thread_cache_size = 16@' /etc/my.cnf
sed -i 's@^query_cache_size.*@query_cache_size = 16M@' /etc/my.cnf
sed -i 's@^myisam_sort_buffer_size.*@myisam_sort_buffer_size = 16M@' /etc/my.cnf
sed -i 's@^key_buffer_size.*@key_buffer_size = 16M@' /etc/my.cnf
sed -i 's@^innodb_buffer_pool_size.*@innodb_buffer_pool_size = 128M@' /etc/my.cnf
sed -i 's@^tmp_table_size.*@tmp_table_size = 32M@' /etc/my.cnf
sed -i 's@^table_open_cache.*@table_open_cache = 256@' /etc/my.cnf
elif [ $Mem -gt 2500 -a $Mem -le 3500 ];then
sed -i 's@^thread_cache_size.*@thread_cache_size = 32@' /etc/my.cnf
sed -i 's@^query_cache_size.*@query_cache_size = 32M@' /etc/my.cnf
sed -i 's@^myisam_sort_buffer_size.*@myisam_sort_buffer_size = 32M@' /etc/my.cnf
sed -i 's@^key_buffer_size.*@key_buffer_size = 64M@' /etc/my.cnf
sed -i 's@^innodb_buffer_pool_size.*@innodb_buffer_pool_size = 512M@' /etc/my.cnf
sed -i 's@^tmp_table_size.*@tmp_table_size = 64M@' /etc/my.cnf
sed -i 's@^table_open_cache.*@table_open_cache = 512@' /etc/my.cnf
elif [ $Mem -gt 3500 ];then
sed -i 's@^thread_cache_size.*@thread_cache_size = 64@' /etc/my.cnf
sed -i 's@^query_cache_size.*@query_cache_size = 64M@' /etc/my.cnf
sed -i 's@^myisam_sort_buffer_size.*@myisam_sort_buffer_size = 64M@' /etc/my.cnf
sed -i 's@^key_buffer_size.*@key_buffer_size = 256M@' /etc/my.cnf
sed -i 's@^innodb_buffer_pool_size.*@innodb_buffer_pool_size = 1024M@' /etc/my.cnf
sed -i 's@^tmp_table_size.*@tmp_table_size = 128M@' /etc/my.cnf
sed -i 's@^table_open_cache.*@table_open_cache = 1024@' /etc/my.cnf
fi
$mysql_install_dir/scripts/mysql_install_db --user=mysql --basedir=$mysql_install_dir --datadir=$mysql_data_dir
chown mysql.mysql -R $mysql_data_dir
service mysqld start
[ -z "`grep ^'export PATH=' /etc/profile`" ] && echo "export PATH=$mysql_install_dir/bin:\$PATH" >> /etc/profile
[ -n "`grep ^'export PATH=' /etc/profile`" -a -z "`grep $mysql_install_dir /etc/profile`" ] && sed -i "s@^export PATH=\(.*\)@export PATH=$mysql_install_dir/bin:\1@" /etc/profile
. /etc/profile
$mysql_install_dir/bin/mysql -e "grant all privileges on *.* to root@'127.0.0.1' identified by \"$dbrootpwd\" with grant option;"
$mysql_install_dir/bin/mysql -e "grant all privileges on *.* to root@'localhost' identified by \"$dbrootpwd\" with grant option;"
$mysql_install_dir/bin/mysql -uroot -p$dbrootpwd -e "delete from mysql.user where Password='';"
$mysql_install_dir/bin/mysql -uroot -p$dbrootpwd -e "delete from mysql.db where User='';"
$mysql_install_dir/bin/mysql -uroot -p$dbrootpwd -e "delete from mysql.proxies_priv where Host!='localhost';"
$mysql_install_dir/bin/mysql -uroot -p$dbrootpwd -e "drop database test;"
$mysql_install_dir/bin/mysql -uroot -p$dbrootpwd -e "reset master;"
rm -rf /etc/ld.so.conf.d/{mysql,mariadb,percona}*.conf
echo "$mysql_install_dir/lib" > mysql.conf
ldconfig
service mysqld stop
}
|
sdlyhu/oneinstack
|
include/mysql-5.5.sh
|
Shell
|
apache-2.0
| 6,826 |
echo "# thread,first time,second time,third time," > fio.csv
for i in `seq 1 7`
do
echo -n "${i}," >> fio.csv
for j in `seq 1 3`
do
sleep 1
sudo /home/kamiya/hpcs/aries/tool/init_fio.exe > /dev/null
(/usr/bin/time -f "%e" sudo /home/kamiya/hpcs/aries/aries_fio_batch.exe 10000 $i) 2>&1 | tr "\n" "," >> fio.csv
done
echo "" >> fio.csv
done
|
kmikmy/p-wal
|
script/trash/bench_fio_batch_10000.sh
|
Shell
|
apache-2.0
| 370 |
#!/bin/sh
docker-compose -f dev.yml $@
|
raptorbox/raptor
|
scripts/dev.sh
|
Shell
|
apache-2.0
| 40 |
#!/bin/bash
# Smooths upgrades/roll-backs where the release of kubernetes jumps a release
# It kills old controllers so that this one takes over all api functions, so we don't get an
# extended period of old and new running side-by-side and the incompatibilities that this can bring.
# It also removes any mutating and validating webhooks in the system so that install-kube-system can run without interference.
#
# A request to disable is a configmap matching the hostname and kubernetes version containing a list of core service to stop: -
# apiVersion: v1
# kind: ConfigMap
# metadata:
# name: kube-aws-migration-disable-ip-10-29-26-83.us-west-2.compute.internal
# namespace: kube-system
# data:
# kubernetesVersion: v1.9.3
# disable: "kube-apiserver kube-controller-manager kube-scheduler"
retries=5
hyperkube_image="{{ .Config.HyperkubeImage.RepoWithTag }}"
my_kubernetes_version="{{ .Config.HyperkubeImage.Tag }}"
myhostname=$(hostname -f)
disable_webhooks="{{ .Values.disableWebhooks }}"
webhooks_save_path="/srv/kubernetes"
kubectl() {
/usr/bin/docker run -i --rm -v /etc/kubernetes:/etc/kubernetes:ro -v ${webhooks_save_path}:${webhooks_save_path}:rw --net=host ${hyperkube_image} /hyperkube kubectl --kubeconfig=/etc/kubernetes/kubeconfig/admin.yaml "$@"
}
kubectl_with_retries() {
local tries=0
local result_text=""
local return_code=0
while [ "$tries" -lt "$retries" ]; do
result_text=$(kubectl "$@")
return_code=$?
if [ "$return_code" -eq "0" ]; then
echo "${result_text}"
break
fi
sleep 10
tries=$((tries+1))
done
return $return_code
}
log() {
echo "$@" >&2
}
get_masters() {
kubectl get nodes -l kubernetes.io/role=master --no-headers -o custom-columns=NAME:metadata.name,VERSION:status.nodeInfo.kubeletVersion | awk '{printf "%s:%s\n", $1, $2}'
}
valid_version() {
match=$(echo $1 | awk -e '(/^v[0-9]+\.[0-9]+\.[0-9]+/){print "match"}')
[[ "$match" == "match" ]]
}
version_jumps() {
# only a minor release change is NOT a version jump
if [[ "${1%.*}" != "${2%.*}" ]]; then
return 0
fi
return 1
}
# stop a controller by writing a special kube-aws disable service configmap
disable_controller() {
local controller=$1
local version=$2
local request="$(cat <<EOT
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-aws-migration-disable-${controller}
namespace: kube-system
data:
kubernetesVersion: ${version}
disable: "kube-controller-manager kube-scheduler kube-apiserver"
EOT
)"
log "Creating disable service configmap kube-system/kube-aws-migration-disable-${controller}"
echo "${request}" | kubectl_with_retries -n kube-system apply -f - || return 1
return 0
}
find_pod() {
local name=$1
local host=$2
kubectl -n kube-system get pod "${name}-${host}" --no-headers -o wide --ignore-not-found
}
node_running() {
local node=$1
ready=$(kubectl -n kube-system get node "${node}" --no-headers --ignore-not-found | awk '{print $2}')
if [[ "${ready}" == "Ready" ]]; then
return 0
fi
return 1
}
wait_stopped_or_timeout() {
local controllers=$1
log ""
log "WAITING FOR ALL MATCHED CONTROLLERS TO STOP:-"
log "${controllers}"
log ""
local max_wait=300
local wait=0
local test=1
while [ "$test" -eq "1" ]; do
test=0
for cont in $controllers; do
if node_running $cont; then
test=1
fi
done
if [ "$test" -eq "1" ]; then
if [[ "${wait}" -ge "${max_wait}" ]]; then
log "Wait for controllers timed out after ${wait} seconds."
break
fi
log "Controllers still active, waiting 5 seconds..."
wait=$[$wait+5]
sleep 5
else
log "All target controllers are now inactive."
fi
done
}
save_webhooks() {
local type=$1
local file=$2
echo "Storing and removing all ${type} webhooks"
if [[ -s "${file}.index" ]]; then
echo "${file}.index already saved"
else
local hooks=$(kubectl get ${type}webhookconfigurations -o custom-columns=NAME:.metadata.name --no-headers)
local count=$(echo "${hooks}" | wc -w | sed -e 's/ //g')
echo "Found ${count} ${type} webhooks..."
if [[ -n "${hooks}" ]]; then
echo -n "${hooks}" >${file}.index
for h in ${hooks}; do
echo "backing up ${type} webhook ${h}..."
kubectl get ${type}webhookconfiguration ${h} -o yaml --export >${file}.${type}.${h}.yaml
echo "deleting $type webhook ${h}..."
ensuredelete ${file}.${type}.${h}.yaml
done
fi
fi
}
ensuredelete() {
kubectl delete --cascade=true --ignore-not-found=true -f $(echo "$@" | tr ' ' ',')
}
# MAIN
if ! $(valid_version ${my_kubernetes_version}); then
log "My kubernetes version ${my_kubernetes_version} is invalid - aborting!"
exit 1
fi
while ! kubectl get ns kube-system; do
echo "waiting for apiserver to be available..."
sleep 3
done
# Disable all mutating and validating webhooks because they can interfere with the stack migration)
if [[ "${disable_webhooks}" == "true" ]]; then
echo "Storing and removing all validating and mutating webhooks..."
mkdir -p ${webhooks_save_path}
save_webhooks validating ${webhooks_save_path}/validating_webhooks
save_webhooks mutating ${webhooks_save_path}/mutating_webhooks
fi
log ""
log "CHECKING CONTROLLER VERSIONS..."
log ""
found=""
for controller in $(get_masters); do
controller_name=$(echo "${controller%%:*}")
controller_version=$(echo "${controller##*:}")
if [[ "${controller_name}" != "$myhostname" ]]; then
if ! $(valid_version ${controller_version}); then
log "Controller ${controller_name} has an invalid version number ${controller_version}!"
continue
fi
if $(version_jumps ${my_kubernetes_version} ${controller_version}); then
log "Detected a version jump on ${controller_name}: my version is ${my_kubernetes_version} and theirs is ${controller_version}"
log "Disabling kube-apiserver, kube-scheduler and kube-controller-manager..."
if [[ -z "${found}" ]]; then
found="${controller_name}"
else
found="${found} ${controller_name}"
fi
disable_controller ${controller_name} ${controller_version}
else
log "No version jump on ${controller_name}: my version is ${my_kubernetes_version} and theirs is ${controller_version}"
fi
fi
done
if [[ -n "${found}" ]]; then
log ""
log "WAITING FOR FOUND CONTROLLERS TO STOP..."
log ""
wait_stopped_or_timeout "${found}"
fi
exit 0
|
kubernetes-incubator/kube-aws
|
builtin/files/plugins/upgrade-helper/assets/upgrade-helper-pre.sh
|
Shell
|
apache-2.0
| 6,468 |
#!/bin/sh
export CUDA_SDK_PATH=$HOME/NVIDIA_GPU_Computing_SDK
export CUDA_INSTALL_PATH=/pkgs_local/cuda-4.2/
export PYTHON_INCLUDE_PATH=/usr/include/python2.7/
export NUMPY_INCLUDE_PATH=/usr/include/python2.7/numpy/
#export ATLAS_LIB_PATH=/usr/lib/atlas-base/atlas
make $*
|
hqxu/deepnet
|
cudamat_conv/build.sh
|
Shell
|
bsd-3-clause
| 275 |
#!/bin/bash
set -e
set +x
owner=$USER
phpversionname="$1"
echo "Preparing PHP-FPM"
# The php-fpm conf file
file="/home/${owner}/.phpenv/versions/${phpversionname}/etc/php-fpm.conf"
cp /home/${owner}/.phpenv/versions/${phpversionname}/etc/php-fpm.conf.default /home/${owner}/.phpenv/versions/${phpversionname}/etc/php-fpm.conf
# Check if we should using www.conf instead
if [ -f /home/${owner}/.phpenv/versions/${phpversionname}/etc/php-fpm.d/www.conf.default ]
then
cp /home/${owner}/.phpenv/versions/${phpversionname}/etc/php-fpm.d/www.conf.default /home/${owner}/.phpenv/versions/${phpversionname}/etc/php-fpm.d/www.conf
file=/home/${owner}/.phpenv/versions/${phpversionname}/etc/php-fpm.d/www.conf
fi;
# Make any updates to the conf file
sed -e "s,;listen.mode = 0660,listen.mode = 0666,g" --in-place ${file}
# possible other edits
#sed -e "s,listen = 127.0.0.1:9000,listen = /tmp/php${phpversionname:0:1}-fpm.sock,g" --in-place ${file}
#sed -e "s,;listen.owner = nobody,listen.owner = www-data,g" --in-place ${file}
#sed -e "s,;listen.group = nobody,listen.group = www-data,g" --in-place ${file}
#sed -e "s,user = nobody,;user = www-data,g" --in-place ${file}
#sed -e "s,group = nobody,;group = www-data,g" --in-place ${file}
|
nwsw/Elkarte
|
tests/travis-ci/travis-fpm.sh
|
Shell
|
bsd-3-clause
| 1,237 |
#!/bin/bash
FN="diggitdata_1.16.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.9/data/experiment/src/contrib/diggitdata_1.16.0.tar.gz"
"https://bioarchive.galaxyproject.org/diggitdata_1.16.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-diggitdata/bioconductor-diggitdata_1.16.0_src_all.tar.gz"
)
MD5="a16c7ace0ba38076d549222dc0afb374"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
zachcp/bioconda-recipes
|
recipes/bioconductor-diggitdata/post-link.sh
|
Shell
|
mit
| 1,301 |
#!/bin/bash
# Run the container for testing.
docker run --name jenkins-for-testing \
-v "$(pwd)/jobs":/var/jenkins_home/jobs \
-d \
-p 8080:8080 \
-p 50000:50000 \
--env JENKINS_SLAVE_AGENT_PORT=50000 \
--env JENKINS_OPTS=--httpPort=8080 \
--hostname jenkins-for-test \
jenkins-with-plugins
#
# --net bridge \
# ip addr
|
aerogear/java-client-api
|
jenkins-client-it-docker/start-docker.sh
|
Shell
|
mit
| 337 |
#!/bin/bash
# helper script to run bwdb and/or restart it
# execute thie script and then simply tail /tmp/bwdb-out
# e.g. ./contrib/restart_bwdb.sh && tail -f /tmp/bwdb-out
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
pkill -2 -x bwdb
wait
exec $DIR/../bin/bwdb --regtest >> /tmp/bwdb-out 2>&1 &
|
bitpay/bwdb
|
contrib/restart_bwdb.sh
|
Shell
|
mit
| 311 |
#! /usr/bin/env bash
#
# Copyright (C) 2013-2015 Zhang Rui <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
IJK_OPENSSL_UPSTREAM=https://github.com/openssl/openssl
IJK_OPENSSL_FORK=https://github.com/Bilibili/openssl.git
IJK_OPENSSL_COMMIT=OpenSSL_1_0_1i
IJK_OPENSSL_LOCAL_REPO=extra/openssl
set -e
TOOLS=tools
echo "== pull openssl base =="
sh $TOOLS/pull-repo-base.sh $IJK_OPENSSL_UPSTREAM $IJK_OPENSSL_LOCAL_REPO
function pull_fork()
{
echo "== pull openssl fork $1 =="
sh $TOOLS/pull-repo-ref.sh $IJK_OPENSSL_FORK ios/openssl-$1 ${IJK_OPENSSL_LOCAL_REPO}
cd ios/openssl-$1
git checkout ${IJK_OPENSSL_COMMIT}
cd -
}
pull_fork "armv7"
pull_fork "armv7s"
pull_fork "arm64"
pull_fork "i386"
pull_fork "x86_64"
|
techery/ijkplayer
|
init-ios-openssl.sh
|
Shell
|
gpl-2.0
| 1,257 |
#!/bin/sh
#----------------------------------------------------------------
# This script should deploy TAG on a new Ubuntu 14.04 vm.
#----------------------------------------------------------------
NCORES=8
export MAKE="/usr/bin/make -j $(( $NCORES+1 ))"
CODENAME=`lsb_release -c | sed -e "s/Codename:\t//"`
### Preliminaries
sudo dd if=/dev/zero of=/swap bs=1M count=1024
sudo mkswap /swap
sudo swapon /swap
sudo cat "/swap swap swap defaults 0 0" >> /etc/fstab
sudo apt-get update
yes | sudo apt-get upgrade
sudo apt-get install git tmux openjdk-7-jre openjdk-7-jdk libgsl0-dev libxml2-dev libclang-3.6-dev clang-3.5 git
### Set up R
sudo cat "deb http://cran.rstudio.com/bin/linux/ubuntu $CODENAME/" >> /etc/apt/sources.list
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E084DAB9
sudo apt-get update
sudo apt-get install r-base-dev
sudo R CMD javareconf
### Get devtools
sudo apt-get install libcurl4-openssl-dev
echo "options(repos=structure(c(CRAN='http://cran.rstudio.com/')))" | sudo tee --append /etc/R/Rprofile.site > /dev/null
sudo Rscript -e "install.packages('devtools')"
### Install shiny server
sudo Rscript -e "install.packages('shiny')"
cd /tmp
wget https://download3.rstudio.org/ubuntu-12.04/x86_64/shiny-server-1.3.0.403-amd64.deb
sudo gdebi shiny-server-1.3.0.403-amd64.deb
### Install TAG - this will take a while!
sudo Rscript -e "devtools::install_github('XSEDEScienceGateways/TAG')"
TAGPATH=`Rscript -e "cat(file.path(system.file('tag', package='TAG')))"`
sudo cp -r $TAGPATH /srv/shiny-server
|
XSEDEScienceGateways/textgateway
|
inst/deploy/bootstrap/deploy.sh
|
Shell
|
agpl-3.0
| 1,551 |
#!/usr/bin/env bash
set -e -x
source bosh-cpi-release/ci/tasks/utils.sh
cd bosh-cpi-release
source /etc/profile.d/chruby.sh
chruby 2.1.2
set +x
echo creating config/private.yml with blobstore secrets
cat > config/private.yml << EOF
---
blobstore:
s3:
access_key_id: $aws_access_key_id
secret_access_key: $aws_secret_access_key
EOF
set -x
echo "using bosh CLI version..."
bosh version
echo "finalizing CPI release..."
bosh finalize release ../bosh-cpi-dev-artifacts/*.tgz
rm config/private.yml
version=`git diff releases/*/index.yml | grep -E "^\+.+version" | sed s/[^0-9]*//g`
git diff | cat
git add .
git config --global user.email [email protected]
git config --global user.name CI
git commit -m "New final release v $version"
|
sedouard/bosh-aws-cpi-release
|
ci/tasks/promote-candidate.sh
|
Shell
|
apache-2.0
| 754 |
#!/bin/sh
sleep 0.8
./resource_client 127.0.0.1 2048 0 2800 1 20 &
sleep 1.2
./resource_client 127.0.0.1 2048 0 2300 1 25 &
sleep 1.0
./resource_client 127.0.0.1 2048 0 1800 1 30 &
sleep 0.7
./resource_client 127.0.0.1 2048 0 1300 1 35 &
sleep 1.4
./resource_client 127.0.0.1 2048 0 800 1 60 &
|
minjinsong/libcoap-code
|
resource8/scritps/start_reqresp_10.sh
|
Shell
|
bsd-2-clause
| 300 |
#!/bin/sh
url="https://github.com/MailCore/mailcore2-deps"
url_prefix="$url/raw/master"
if test x$1 != xskipprebuilt ; then
file_timestamp=0
if test -f prebuilt.list ; then
file_timestamp=`stat -f '%m' prebuilt.list`
fi
timestamp=`ruby -e 'puts Time.now.to_i'`
age=$((($timestamp-$file_timestamp)/3600)) # in hours
if test ! -d ../Externals/prebuilt ; then
age=1
fi
if test $age -gt 0 ; then
networkerror=no
#echo "$url_prefix/prebuilt.list"
curl -3 -s -L "$url_prefix/prebuilt.list" > prebuilt.list.tmp
if test x$? != x0 ; then
networkerror=yes
fi
if test x$networkerror = xyes ; then
echo WARNING: could not get prebuilt.list from repository
exit 1
fi
mv prebuilt.list.tmp prebuilt.list
if test -f prebuilt.list ; then
files=`cat prebuilt.list`
mkdir -p ../Externals/builds/builds
mkdir -p ../Externals/prebuilt
pushd ../Externals/prebuilt
rm -rf .git
echo Getting prebuilt libraries...
if test -d mailcore2-deps ; then
cd mailcore2-deps
git pull --rebase
cd ..
else
git clone --depth 1 "$url"
fi
rsync --exclude=.git -av mailcore2-deps/ ../builds/builds/
popd
fi
fi
fi
|
rlaferla/mailcore2
|
scripts/get-prebuilt.sh
|
Shell
|
bsd-3-clause
| 1,269 |
#!/bin/sh
set -e
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
3)
TARGET_DEVICE_ARGS="--target-device tv"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
install_resource()
{
if [[ "$1" = /* ]] ; then
RESOURCE_PATH="$1"
else
RESOURCE_PATH="${PODS_ROOT}/$1"
fi
if [[ ! -e "$RESOURCE_PATH" ]] ; then
cat << EOM
error: Resource "$RESOURCE_PATH" not found. Run 'pod install' to update the copy resources script.
EOM
exit 1
fi
case $RESOURCE_PATH in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}"
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}"
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.framework)
echo "mkdir -p ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av $RESOURCE_PATH ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH"`.mom\""
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd\""
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm\""
xcrun mapc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE="$RESOURCE_PATH"
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
*)
echo "$RESOURCE_PATH"
echo "$RESOURCE_PATH" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "BaiduMapKit/BaiduMapKit/BaiduMapAPI_Map.framework/Resources/mapapi.bundle"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "BaiduMapKit/BaiduMapKit/BaiduMapAPI_Map.framework/Resources/mapapi.bundle"
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]] && [[ "${SKIP_INSTALL}" == "NO" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "$XCASSET_FILES" ]
then
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "${PODS_ROOT}*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
zhangpoor/MyExercise
|
ZP_Base/Pods/Target Support Files/Pods-SwiftExs/Pods-SwiftExs-resources.sh
|
Shell
|
mit
| 5,351 |
#!/bin/sh
# Copyright (C) 1999-2006 ImageMagick Studio LLC
#
# This program is covered by multiple licenses, which are described in
# LICENSE. You should have received a copy of LICENSE with this
# package; otherwise see http://www.imagemagick.org/script/license.php.
. ${srcdir}/tests/common.shi
${RUNENV} ${MEMCHECK} ./rwfile ${SRCDIR}/input_truecolor10.pam VIFF
|
ipwndev/DSLinux-Mirror
|
user/imagemagick/src/tests/rwfile_VIFF_truecolor10.sh
|
Shell
|
gpl-2.0
| 366 |
# Set the different path for this activity
# This is sourced by runit.sh
path=$1
activity=enumerate
plugindir=$path/.libs
pythonplugindir=$path
resourcedir=$path/resources
section="/math/numeration"
|
keshashah/GCompris
|
src/enumerate-activity/init_path.sh
|
Shell
|
gpl-2.0
| 200 |
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012 Vincent C. Passaro ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
#!/bin/bash
######################################################################
#By Tummy a.k.a Vincent C. Passaro #
#Vincent[.]Passaro[@]gmail[.]com #
#www.vincentpassaro.com #
######################################################################
#_____________________________________________________________________
#| Version | Change Information | Author | Date |
#|__________|_______________________|____________________|____________|
#| 1.0 | Initial Script | Vincent C. Passaro | 20-oct-2011|
#| | Creation | | |
#|__________|_______________________|____________________|____________|
#
#
# - Updated by Shannon Mitchell([email protected]) on
# 02-jan-2012 to fix the logging conditional. Added a check for root dir's
# existance before running the mkdir.
#######################DISA INFORMATION###############################
#Group ID (Vulid): V-774
#Group Title: The root account home directory has not been chang
#Rule ID: SV-774r8_rule
#Severity: CAT III
#Rule Version (STIG-ID): GEN000900
#Rule Title: The root user's home directory must not be the root
#directory (/).
#
#Vulnerability Discussion: Changing the root home directory to something
#other than / and assigning it a 0700 protection makes it more difficult
#for intruders to manipulate the system by reading the files that root
#places in its default directory. It also gives root the same discretionary
#access control for root's home directory as for the other plain user
#home directories.
#
#Responsibility: System Administrator
#IAControls: ECCD-1, ECCD-2
#
#Check Content:
#Determine if root is assigned a home directory other than / by listing
#its home directory.
#
#Procedure:
# grep "^root" /etc/passwd | awk -F":" '{print $6}'
#
#If the root user home directory is /, this is a finding.
#
#Fix Text: The root home directory should be something other than /
#(such as /roothome).
#
#Procedure:
# mkdir /rootdir
# chown root /rootdir
# chgrp root /rootdir
# chmod 700 /rootdir
# cp -r /.??* /rootdir/.
#Then, edit the passwd file and change the root home directory
#to /rootdir. The cp -r /.??* command copies all files and subdirectories
#of file names that begin with "." into the new root directory, which
#preserves the previous root environment. Ensure you are in the "/"
#directory when executing the "cp" command.
#######################DISA INFORMATION###############################
#Global Variables#
PDI=GEN000900
BADROOTDIR=`grep "^root" /etc/passwd | awk -F":" '{print $6}' | grep "^/$" | wc -l`
#Start-Lockdown
if [ $BADROOTDIR -ge 1 ]
then
if [ ! -e "/root" ]
then
mkdir -p /root
fi
chown root:root /root
chmod 700 /root
sed -i 's/root:\/:/root:\/root:/g' /etc/passwd
fi
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5-beta/prod/GEN000900.sh
|
Shell
|
apache-2.0
| 3,694 |
#!/bin/bash
# This file contains some utilities to test the the .deb/.rpm
# packages and the SysV/Systemd scripts.
# WARNING: This testing file must be executed as root and can
# dramatically change your system. It should only be executed
# in a throw-away VM like those made by the Vagrantfile at
# the root of the Elasticsearch source code. This should
# cause the script to fail if it is executed any other way:
[ -f /etc/is_vagrant_vm ] || {
>&2 echo "must be run on a vagrant VM"
exit 1
}
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Checks if necessary commands are available to run the tests
if [ ! -x /usr/bin/which ]; then
echo "'which' command is mandatory to run the tests"
exit 1
fi
if [ ! -x "`which wget 2>/dev/null`" ]; then
echo "'wget' command is mandatory to run the tests"
exit 1
fi
if [ ! -x "`which curl 2>/dev/null`" ]; then
echo "'curl' command is mandatory to run the tests"
exit 1
fi
if [ ! -x "`which pgrep 2>/dev/null`" ]; then
echo "'pgrep' command is mandatory to run the tests"
exit 1
fi
if [ ! -x "`which unzip 2>/dev/null`" ]; then
echo "'unzip' command is mandatory to run the tests"
exit 1
fi
if [ ! -x "`which tar 2>/dev/null`" ]; then
echo "'tar' command is mandatory to run the tests"
exit 1
fi
if [ ! -x "`which unzip 2>/dev/null`" ]; then
echo "'unzip' command is mandatory to run the tests"
exit 1
fi
if [ ! -x "`which java 2>/dev/null`" ]; then
echo "'java' command is mandatory to run the tests"
exit 1
fi
# Returns 0 if the 'dpkg' command is available
is_dpkg() {
[ -x "`which dpkg 2>/dev/null`" ]
}
# Returns 0 if the 'rpm' command is available
is_rpm() {
[ -x "`which rpm 2>/dev/null`" ]
}
# Skip test if the 'dpkg' command is not supported
skip_not_dpkg() {
is_dpkg || skip "dpkg is not supported"
}
# Skip test if the 'rpm' command is not supported
skip_not_rpm() {
is_rpm || skip "rpm is not supported"
}
skip_not_dpkg_or_rpm() {
is_dpkg || is_rpm || skip "only dpkg or rpm systems are supported"
}
# Returns 0 if the system supports Systemd
is_systemd() {
[ -x /bin/systemctl ]
}
# Skip test if Systemd is not supported
skip_not_systemd() {
if [ ! -x /bin/systemctl ]; then
skip "systemd is not supported"
fi
}
# Returns 0 if the system supports SysV
is_sysvinit() {
[ -x "`which service 2>/dev/null`" ]
}
# Skip test if SysV is not supported
skip_not_sysvinit() {
if [ -x "`which service 2>/dev/null`" ] && is_systemd; then
skip "sysvinit is supported, but systemd too"
fi
if [ ! -x "`which service 2>/dev/null`" ]; then
skip "sysvinit is not supported"
fi
}
# Skip if tar is not supported
skip_not_tar_gz() {
if [ ! -x "`which tar 2>/dev/null`" ]; then
skip "tar is not supported"
fi
}
# Skip if unzip is not supported
skip_not_zip() {
if [ ! -x "`which unzip 2>/dev/null`" ]; then
skip "unzip is not supported"
fi
}
assert_file_exist() {
local file="$1"
if [ ! -e "$file" ]; then
echo "Should exist: ${file} but does not"
fi
local file=$(readlink -m "${file}")
[ -e "$file" ]
}
assert_file_not_exist() {
local file="$1"
if [ -e "$file" ]; then
echo "Should not exist: ${file} but does"
fi
local file=$(readlink -m "${file}")
[ ! -e "$file" ]
}
assert_file() {
local file="$1"
local type=$2
local user=$3
local group=$4
local privileges=$5
assert_file_exist "$file"
if [ "$type" = "d" ]; then
if [ ! -d "$file" ]; then
echo "[$file] should be a directory but is not"
fi
[ -d "$file" ]
else
if [ ! -f "$file" ]; then
echo "[$file] should be a regular file but is not"
fi
[ -f "$file" ]
fi
if [ "x$user" != "x" ]; then
realuser=$(find "$file" -maxdepth 0 -printf "%u")
if [ "$realuser" != "$user" ]; then
echo "Expected user: $user, found $realuser [$file]"
fi
[ "$realuser" = "$user" ]
fi
if [ "x$group" != "x" ]; then
realgroup=$(find "$file" -maxdepth 0 -printf "%g")
if [ "$realgroup" != "$group" ]; then
echo "Expected group: $group, found $realgroup [$file]"
fi
[ "$realgroup" = "$group" ]
fi
if [ "x$privileges" != "x" ]; then
realprivileges=$(find "$file" -maxdepth 0 -printf "%m")
if [ "$realprivileges" != "$privileges" ]; then
echo "Expected privileges: $privileges, found $realprivileges [$file]"
fi
[ "$realprivileges" = "$privileges" ]
fi
}
assert_module_or_plugin_directory() {
local directory=$1
shift
#owner group and permissions vary depending on how es was installed
#just make sure that everything is the same as $CONFIG_DIR, which was properly set up during install
config_user=$(find "$ESHOME" -maxdepth 0 -printf "%u")
config_owner=$(find "$ESHOME" -maxdepth 0 -printf "%g")
assert_file $directory d $config_user $config_owner 755
}
assert_module_or_plugin_file() {
local file=$1
shift
assert_file_exist "$(readlink -m $file)"
assert_file $file f $config_user $config_owner 644
}
assert_output() {
echo "$output" | grep -E "$1"
}
assert_recursive_ownership() {
local directory=$1
local user=$2
local group=$3
realuser=$(find $directory -printf "%u\n" | sort | uniq)
[ "$realuser" = "$user" ]
realgroup=$(find $directory -printf "%g\n" | sort | uniq)
[ "$realgroup" = "$group" ]
}
# Deletes everything before running a test file
clean_before_test() {
# List of files to be deleted
ELASTICSEARCH_TEST_FILES=("/usr/share/elasticsearch" \
"/etc/elasticsearch" \
"/var/lib/elasticsearch" \
"/var/log/elasticsearch" \
"/tmp/elasticsearch" \
"/etc/default/elasticsearch" \
"/etc/sysconfig/elasticsearch" \
"/var/run/elasticsearch" \
"/usr/share/doc/elasticsearch" \
"/tmp/elasticsearch" \
"/usr/lib/systemd/system/elasticsearch.conf" \
"/usr/lib/tmpfiles.d/elasticsearch.conf" \
"/usr/lib/sysctl.d/elasticsearch.conf")
# Kills all processes of user elasticsearch
if id elasticsearch > /dev/null 2>&1; then
pkill -u elasticsearch 2>/dev/null || true
fi
# Kills all running Elasticsearch processes
ps aux | grep -i "org.elasticsearch.bootstrap.Elasticsearch" | awk {'print $2'} | xargs kill -9 > /dev/null 2>&1 || true
purge_elasticsearch
# Removes user & group
userdel elasticsearch > /dev/null 2>&1 || true
groupdel elasticsearch > /dev/null 2>&1 || true
# Removes all files
for d in "${ELASTICSEARCH_TEST_FILES[@]}"; do
if [ -e "$d" ]; then
rm -rf "$d"
fi
done
if is_systemd; then
systemctl unmask systemd-sysctl.service
fi
}
purge_elasticsearch() {
# Removes RPM package
if is_rpm; then
rpm --quiet -e elasticsearch > /dev/null 2>&1 || true
fi
if [ -x "`which yum 2>/dev/null`" ]; then
yum remove -y elasticsearch > /dev/null 2>&1 || true
fi
# Removes DEB package
if is_dpkg; then
dpkg --purge elasticsearch > /dev/null 2>&1 || true
fi
if [ -x "`which apt-get 2>/dev/null`" ]; then
apt-get --quiet --yes purge elasticsearch > /dev/null 2>&1 || true
fi
}
# Start elasticsearch and wait for it to come up with a status.
# $1 - expected status - defaults to green
start_elasticsearch_service() {
local desiredStatus=${1:-green}
local index=$2
local commandLineArgs=$3
run_elasticsearch_service 0 $commandLineArgs
wait_for_elasticsearch_status $desiredStatus $index
if [ -r "/tmp/elasticsearch/elasticsearch.pid" ]; then
pid=$(cat /tmp/elasticsearch/elasticsearch.pid)
[ "x$pid" != "x" ] && [ "$pid" -gt 0 ]
echo "Looking for elasticsearch pid...."
ps $pid
elif is_systemd; then
run systemctl is-active elasticsearch.service
[ "$status" -eq 0 ]
run systemctl status elasticsearch.service
[ "$status" -eq 0 ]
elif is_sysvinit; then
run service elasticsearch status
[ "$status" -eq 0 ]
fi
}
# Start elasticsearch
# $1 expected status code
# $2 additional command line args
run_elasticsearch_service() {
local expectedStatus=$1
local commandLineArgs=$2
# Set the CONF_DIR setting in case we start as a service
if [ ! -z "$CONF_DIR" ] ; then
if is_dpkg; then
echo "CONF_DIR=$CONF_DIR" >> /etc/default/elasticsearch;
elif is_rpm; then
echo "CONF_DIR=$CONF_DIR" >> /etc/sysconfig/elasticsearch;
fi
fi
if [ -f "/tmp/elasticsearch/bin/elasticsearch" ]; then
# we must capture the exit code to compare so we don't want to start as background process in case we expect something other than 0
local background=""
local timeoutCommand=""
if [ "$expectedStatus" = 0 ]; then
background="-d"
else
timeoutCommand="timeout 60s "
fi
# su and the Elasticsearch init script work together to break bats.
# sudo isolates bats enough from the init script so everything continues
# to tick along
run sudo -u elasticsearch bash <<BASH
# If jayatana is installed then we try to use it. Elasticsearch should ignore it even when we try.
# If it doesn't ignore it then Elasticsearch will fail to start because of security errors.
# This line is attempting to emulate the on login behavior of /usr/share/upstart/sessions/jayatana.conf
[ -f /usr/share/java/jayatanaag.jar ] && export JAVA_TOOL_OPTIONS="-javaagent:/usr/share/java/jayatanaag.jar"
# And now we can start Elasticsearch normally, in the background (-d) and with a pidfile (-p).
export CONF_DIR=$CONF_DIR
export ES_JAVA_OPTS=$ES_JAVA_OPTS
$timeoutCommand/tmp/elasticsearch/bin/elasticsearch $background -p /tmp/elasticsearch/elasticsearch.pid $commandLineArgs
BASH
[ "$status" -eq "$expectedStatus" ]
elif is_systemd; then
run systemctl daemon-reload
[ "$status" -eq 0 ]
run systemctl enable elasticsearch.service
[ "$status" -eq 0 ]
run systemctl is-enabled elasticsearch.service
[ "$status" -eq 0 ]
run systemctl start elasticsearch.service
[ "$status" -eq "$expectedStatus" ]
elif is_sysvinit; then
run service elasticsearch start
[ "$status" -eq "$expectedStatus" ]
fi
}
stop_elasticsearch_service() {
if [ -r "/tmp/elasticsearch/elasticsearch.pid" ]; then
pid=$(cat /tmp/elasticsearch/elasticsearch.pid)
[ "x$pid" != "x" ] && [ "$pid" -gt 0 ]
kill -SIGTERM $pid
elif is_systemd; then
run systemctl stop elasticsearch.service
[ "$status" -eq 0 ]
run systemctl is-active elasticsearch.service
[ "$status" -eq 3 ]
echo "$output" | grep -E 'inactive|failed'
elif is_sysvinit; then
run service elasticsearch stop
[ "$status" -eq 0 ]
run service elasticsearch status
[ "$status" -ne 0 ]
fi
}
# Waits for Elasticsearch to reach some status.
# $1 - expected status - defaults to green
wait_for_elasticsearch_status() {
local desiredStatus=${1:-green}
local index=$2
echo "Making sure elasticsearch is up..."
wget -O - --retry-connrefused --waitretry=1 --timeout=60 --tries 60 http://localhost:9200/_cluster/health || {
echo "Looks like elasticsearch never started. Here is its log:"
if [ -e "$ESLOG/elasticsearch.log" ]; then
cat "$ESLOG/elasticsearch.log"
else
echo "The elasticsearch log doesn't exist. Maybe /var/log/messages has something:"
tail -n20 /var/log/messages
fi
false
}
if [ -z "index" ]; then
echo "Tring to connect to elasticsearch and wait for expected status $desiredStatus..."
curl -sS "http://localhost:9200/_cluster/health?wait_for_status=$desiredStatus&timeout=60s&pretty"
else
echo "Trying to connect to elasticsearch and wait for expected status $desiredStatus for index $index"
curl -sS "http://localhost:9200/_cluster/health/$index?wait_for_status=$desiredStatus&timeout=60s&pretty"
fi
if [ $? -eq 0 ]; then
echo "Connected"
else
echo "Unable to connect to Elasticsearch"
false
fi
echo "Checking that the cluster health matches the waited for status..."
run curl -sS -XGET 'http://localhost:9200/_cat/health?h=status&v=false'
if [ "$status" -ne 0 ]; then
echo "error when checking cluster health. code=$status output="
echo $output
false
fi
echo $output | grep $desiredStatus || {
echo "unexpected status: '$output' wanted '$desiredStatus'"
false
}
}
# Checks the current elasticsearch version using the Info REST endpoint
# $1 - expected version
check_elasticsearch_version() {
local version=$1
local versionToCheck=$(echo $version | sed -e 's/-SNAPSHOT//')
run curl -s localhost:9200
[ "$status" -eq 0 ]
echo $output | grep \"number\"\ :\ \"$versionToCheck\" || {
echo "Installed an unexpected version:"
curl -s localhost:9200
false
}
}
# Executes some basic Elasticsearch tests
run_elasticsearch_tests() {
# TODO this assertion is the same the one made when waiting for
# elasticsearch to start
run curl -XGET 'http://localhost:9200/_cat/health?h=status&v=false'
[ "$status" -eq 0 ]
echo "$output" | grep -w "green"
curl -s -H "Content-Type: application/json" -XPOST 'http://localhost:9200/library/book/1?refresh=true&pretty' -d '{
"title": "Book #1",
"pages": 123
}'
curl -s -H "Content-Type: application/json" -XPOST 'http://localhost:9200/library/book/2?refresh=true&pretty' -d '{
"title": "Book #2",
"pages": 456
}'
curl -s -XGET 'http://localhost:9200/_count?pretty' |
grep \"count\"\ :\ 2
curl -s -XDELETE 'http://localhost:9200/_all'
}
# Move the config directory to another directory and properly chown it.
move_config() {
local oldConfig="$ESCONFIG"
export ESCONFIG="${1:-$(mktemp -d -t 'config.XXXX')}"
echo "Moving configuration directory from $oldConfig to $ESCONFIG"
# Move configuration files to the new configuration directory
mv "$oldConfig"/* "$ESCONFIG"
chown -R elasticsearch:elasticsearch "$ESCONFIG"
assert_file_exist "$ESCONFIG/elasticsearch.yml"
assert_file_exist "$ESCONFIG/jvm.options"
assert_file_exist "$ESCONFIG/log4j2.properties"
}
# permissions from the user umask with the executable bit set
executable_privileges_for_user_from_umask() {
local user=$1
shift
echo $((0777 & ~$(sudo -E -u $user sh -c umask) | 0111))
}
# permissions from the user umask without the executable bit set
file_privileges_for_user_from_umask() {
local user=$1
shift
echo $((0777 & ~$(sudo -E -u $user sh -c umask) & ~0111))
}
|
sneivandt/elasticsearch
|
qa/vagrant/src/test/resources/packaging/utils/utils.bash
|
Shell
|
apache-2.0
| 16,088 |
source ../testsupport.sh
run
grep -q "Stage hello" test.out || err "Failed to find expected stage hello"
true
|
vivovip/bpipe
|
tests/parallel_only/run.sh
|
Shell
|
bsd-3-clause
| 113 |
#!/bin/sh
dir=$(cd `dirname $0` && pwd)
$dir/../vendor/bin/tester -p php -c $dir/php.ini $@
|
Scalesoft/nextras-forms
|
tests/run.sh
|
Shell
|
mit
| 92 |
#!/bin/bash
FN="TxDb.Athaliana.BioMart.plantsmart25_3.1.3.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.13/data/annotation/src/contrib/TxDb.Athaliana.BioMart.plantsmart25_3.1.3.tar.gz"
"https://bioarchive.galaxyproject.org/TxDb.Athaliana.BioMart.plantsmart25_3.1.3.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-txdb.athaliana.biomart.plantsmart25/bioconductor-txdb.athaliana.biomart.plantsmart25_3.1.3_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-txdb.athaliana.biomart.plantsmart25/bioconductor-txdb.athaliana.biomart.plantsmart25_3.1.3_src_all.tar.gz"
)
MD5="eb007c07317b9717c76949e5ed999978"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
phac-nml/bioconda-recipes
|
recipes/bioconductor-txdb.athaliana.biomart.plantsmart25/post-link.sh
|
Shell
|
mit
| 1,587 |
#!/bin/sh
# $1: type.
# echo "This is a script to get the modem status."
act_node1="usb_modem_act_int"
act_node2="usb_modem_act_bulk"
modem_vid=`nvram get usb_modem_act_vid`
modem_pid=`nvram get usb_modem_act_pid`
modem_dev=`nvram get usb_modem_act_dev`
sim_order=`nvram get modem_sim_order`
at_lock="flock -x /tmp/at_cmd_lock"
jffs_dir="/jffs"
# $1: ifname.
_get_qcqmi_by_usbnet(){
rp1=`readlink -f /sys/class/net/$1/device 2>/dev/null`
if [ "$rp1" == "" ]; then
echo ""
return
fi
rp2=
i=0
while [ $i -lt 10 ]; do
rp2=`readlink -f /sys/class/GobiQMI/qcqmi$i/device 2>/dev/null`
if [ "$rp2" == "" ]; then
i=$((i+1))
continue
fi
if [ "$rp1" == "$rp2" ]; then
echo "qcqmi$i"
return
fi
i=$((i+1))
done
echo ""
}
act_node=
#modem_type=`nvram get usb_modem_act_type`
#if [ "$modem_type" == "tty" -o "$modem_type" == "mbim" ]; then
# if [ "$modem_type" == "tty" -a "$modem_vid" == "6610" ]; then # e.q. ZTE MF637U
# act_node=$act_node1
# else
# act_node=$act_node2
# fi
#else
act_node=$act_node1
#fi
modem_act_node=`nvram get $act_node`
if [ "$modem_act_node" == "" ]; then
find_modem_node.sh
modem_act_node=`nvram get $act_node`
if [ "$modem_act_node" == "" ]; then
echo "Can't get $act_node!"
exit 1
fi
fi
if [ "$1" == "bytes" -o "$1" == "bytes-" ]; then
if [ "$modem_dev" == "" ]; then
echo "2:Can't get the active network device of USB."
exit 2
fi
if [ -z "$sim_order" ]; then
echo "12:Fail to get the SIM order."
exit 12
fi
if [ ! -d "$jffs_dir/sim/$sim_order" ]; then
mkdir -p "$jffs_dir/sim/$sim_order"
fi
rx_new=`cat "/sys/class/net/$modem_dev/statistics/rx_bytes" 2>/dev/null`
tx_new=`cat "/sys/class/net/$modem_dev/statistics/tx_bytes" 2>/dev/null`
echo " rx_new=$rx_new."
echo " tx_new=$tx_new."
if [ "$1" == "bytes" ]; then
rx_old=`nvram get modem_bytes_rx`
if [ -z "$rx_old" ]; then
rx_old=0
fi
tx_old=`nvram get modem_bytes_tx`
if [ -z "$tx_old" ]; then
tx_old=0
fi
echo " rx_old=$rx_old."
echo " tx_old=$tx_old."
rx_reset=`nvram get modem_bytes_rx_reset`
if [ -z "$rx_reset" ]; then
rx_reset=0
fi
tx_reset=`nvram get modem_bytes_tx_reset`
if [ -z "$tx_reset" ]; then
tx_reset=0
fi
echo "rx_reset=$rx_reset."
echo "tx_reset=$tx_reset."
rx_now=`lplus $rx_old $rx_new`
tx_now=`lplus $tx_old $tx_new`
rx_now=`lminus $rx_now $rx_reset`
tx_now=`lminus $tx_now $tx_reset`
echo " rx_now=$rx_now."
echo " tx_now=$tx_now."
nvram set modem_bytes_rx=$rx_now
nvram set modem_bytes_tx=$tx_now
else
rx_now=0
tx_now=0
nvram set modem_bytes_rx=$rx_now
nvram set modem_bytes_tx=$tx_now
data_start=`nvram get modem_bytes_data_start 2>/dev/null`
if [ -n "$data_start" ]; then
echo -n "$data_start" > "$jffs_dir/sim/$sim_order/modem_bytes_data_start"
fi
fi
nvram set modem_bytes_rx_reset=$rx_new
nvram set modem_bytes_tx_reset=$tx_new
echo "set rx_reset=$rx_new."
echo "set tx_reset=$tx_new."
echo "done."
elif [ "$1" == "bytes+" ]; then
if [ -z "$sim_order" ]; then
echo "12:Fail to get the SIM order."
exit 12
fi
if [ ! -d "$jffs_dir/sim/$sim_order" ]; then
mkdir -p "$jffs_dir/sim/$sim_order"
fi
rx_now=`nvram get modem_bytes_rx`
tx_now=`nvram get modem_bytes_tx`
echo -n "$rx_now" > "$jffs_dir/sim/$sim_order/modem_bytes_rx"
echo -n "$tx_now" > "$jffs_dir/sim/$sim_order/modem_bytes_tx"
echo "done."
elif [ "$1" == "get_dataset" ]; then
if [ -z "$sim_order" ]; then
echo "12:Fail to get the SIM order."
exit 12
fi
echo "Getting data setting..."
if [ ! -d "$jffs_dir/sim/$sim_order" ]; then
mkdir -p "$jffs_dir/sim/$sim_order"
fi
data_start=`cat "$jffs_dir/sim/$sim_order/modem_bytes_data_start" 2>/dev/null`
data_cycle=`cat "$jffs_dir/sim/$sim_order/modem_bytes_data_cycle" 2>/dev/null`
data_limit=`cat "$jffs_dir/sim/$sim_order/modem_bytes_data_limit" 2>/dev/null`
data_warning=`cat "$jffs_dir/sim/$sim_order/modem_bytes_data_warning" 2>/dev/null`
if [ -n "$data_start" ]; then
nvram set modem_bytes_data_start=$data_start
fi
if [ -z "$data_cycle" ] || [ "$data_cycle" -lt 1 -o "$data_cycle" -gt 31 ]; then
data_cycle=1
echo -n "$data_cycle" > "$jffs_dir/sim/$sim_order/modem_bytes_data_cycle"
fi
nvram set modem_bytes_data_cycle=$data_cycle
if [ -z "$data_limit" ]; then
data_limit=0
echo -n "$data_limit" > "$jffs_dir/sim/$sim_order/modem_bytes_data_limit"
fi
nvram set modem_bytes_data_limit=$data_limit
if [ -z "$data_warning" ]; then
data_warning=0
echo -n "$data_warning" > "$jffs_dir/sim/$sim_order/modem_bytes_data_warning"
fi
nvram set modem_bytes_data_warning=$data_warning
rx_now=`cat "$jffs_dir/sim/$sim_order/modem_bytes_rx" 2>/dev/null`
tx_now=`cat "$jffs_dir/sim/$sim_order/modem_bytes_tx" 2>/dev/null`
nvram set modem_bytes_rx=$rx_now
nvram set modem_bytes_tx=$tx_now
echo "done."
elif [ "$1" == "set_dataset" ]; then
if [ -z "$sim_order" ]; then
echo "12:Fail to get the SIM order."
exit 12
fi
echo "Setting data setting..."
if [ ! -d "$jffs_dir/sim/$sim_order" ]; then
mkdir -p "$jffs_dir/sim/$sim_order"
fi
data_start=`nvram get modem_bytes_data_start 2>/dev/null`
data_cycle=`nvram get modem_bytes_data_cycle 2>/dev/null`
data_limit=`nvram get modem_bytes_data_limit 2>/dev/null`
data_warning=`nvram get modem_bytes_data_warning 2>/dev/null`
if [ -n "$data_start" ]; then
echo -n "$data_start" > "$jffs_dir/sim/$sim_order/modem_bytes_data_start"
fi
if [ -z "$data_cycle" ] || [ "$data_cycle" -lt 1 -o "$data_cycle" -gt 31 ]; then
data_cycle=1
nvram set modem_bytes_data_cycle=$data_cycle
fi
echo -n "$data_cycle" > "$jffs_dir/sim/$sim_order/modem_bytes_data_cycle"
if [ -z "$data_limit" ]; then
data_limit=0
nvram set modem_bytes_data_limit=$data_limit
fi
echo -n "$data_limit" > "$jffs_dir/sim/$sim_order/modem_bytes_data_limit"
if [ -z "$data_warning" ]; then
data_warning=0
nvram set modem_bytes_data_warning=$data_warning
fi
echo -n "$data_warning" > "$jffs_dir/sim/$sim_order/modem_bytes_data_warning"
echo "done."
elif [ "$1" == "sim" ]; then
modem_enable=`nvram get modem_enable`
simdetect=`nvram get usb_modem_act_simdetect`
if [ -z "$simdetect" ]; then
modem_status.sh simdetect
fi
# check the SIM status.
at_ret=`$at_lock modem_at.sh '+CPIN?' 2>/dev/null`
sim_inserted1=`echo "$at_ret" |grep "READY" 2>/dev/null`
sim_inserted2=`echo "$at_ret" |grep "SIM" |awk '{FS=": "; print $2}' 2>/dev/null`
sim_inserted3=`echo "$at_ret" |grep "+CME ERROR: " |awk '{FS=": "; print $2}' 2>/dev/null`
sim_inserted4=`echo "$sim_inserted2" |cut -c 1-3`
if [ -n "$sim_inserted1" ]; then
echo "Got SIM."
act_sim=1
elif [ "$sim_inserted2" == "SIM PIN" ]; then
echo "Need PIN."
act_sim=2
elif [ "$sim_inserted2" == "SIM PUK" ]; then
echo "Need PUK."
act_sim=3
elif [ "$sim_inserted2" == "SIM PIN2" ]; then
echo "Need PIN2."
act_sim=4
elif [ "$sim_inserted2" == "SIM PUK2" ]; then
echo "Need PUK2."
act_sim=5
elif [ "$sim_inserted4" == "PH-" ]; then
echo "Waiting..."
act_sim=6
elif [ "$sim_inserted3" != "" ]; then
if [ "$sim_inserted3" == "SIM not inserted" ]; then
echo "SIM not inserted."
act_sim=-1
else
if [ "$modem_enable" == "2" ]; then
echo "Detected CDMA2000's SIM"
act_sim=1
else
echo "CME ERROR: $sim_inserted3"
act_sim=-2
fi
fi
else
echo "No or unknown response."
act_sim=-10
fi
act_sim_orig=`nvram get usb_modem_act_sim`
if [ "$act_sim_orig" != "$act_sim" ]; then
nvram set usb_modem_act_sim=$act_sim
fi
echo "done."
elif [ "$1" == "signal" ]; then
at_ret=`$at_lock modem_at.sh '+CSQ' 2>/dev/null`
ret=`echo "$at_ret" |grep "+CSQ: " |awk '{FS=": "; print $2}' |awk '{FS=",99"; print $1}' 2>/dev/null`
if [ "$ret" == "" ]; then
echo "Fail to get the signal from $modem_act_node."
exit 3
fi
signal=
if [ $ret -eq 99 ]; then
# not known or not detectable.
signal=-1
elif [ $ret -le 1 ]; then
# almost no signal.
signal=0
elif [ $ret -le 9 ]; then
# Marginal.
signal=1
elif [ $ret -le 14 ]; then
# OK.
signal=2
elif [ $ret -le 19 ]; then
# Good.
signal=3
elif [ $ret -le 30 ]; then
# Excellent.
signal=4
elif [ $ret -eq 31 ]; then
# Full.
signal=5
else
echo "Can't identify the signal strength: $ret."
exit 4
fi
nvram set usb_modem_act_signal=$signal
echo "$signal"
echo "done."
elif [ "$1" == "operation" ]; then
if [ "$modem_vid" == "1478" -a "$modem_pid" == "36902" ]; then
at_ret=`$at_lock modem_at.sh '$CBEARER' 2>/dev/null`
ret=`echo "$at_ret" |grep '$CBEARER:' |awk '{FS=":"; print $2}' 2>/dev/null`
if [ "$ret" == "" ]; then
echo "Fail to get the operation type from $modem_act_node."
exit 5
fi
operation=
if [ "$ret" == "0x01" ]; then
operation=GPRS
elif [ "$ret" == "0x02" ]; then
operation=EDGE
elif [ "$ret" == "0x03" ]; then
operation=HSDPA
elif [ "$ret" == "0x04" ]; then
operation=HSUPA
elif [ "$ret" == "0x05" ]; then
operation=WCDMA
elif [ "$ret" == "0x06" ]; then
operation=CDMA
elif [ "$ret" == "0x07" ]; then
operation="EV-DO REV 0"
elif [ "$ret" == "0x08" ]; then
operation="EV-DO REV A"
elif [ "$ret" == "0x09" ]; then
operation=GSM
elif [ "$ret" == "0x0a" -o "$ret" == "0x0A" ]; then
operation="EV-DO REV B"
elif [ "$ret" == "0x0b" -o "$ret" == "0x0B" ]; then
operation=LTE
elif [ "$ret" == "0x0c" -o "$ret" == "0x0C" ]; then
operation="HSDPA+"
elif [ "$ret" == "0x0d" -o "$ret" == "0x0D" ]; then
operation="DC-HSDPA+"
else
echo "Can't identify the operation type: $ret."
exit 6
fi
nvram set usb_modem_act_operation="$operation"
echo "$operation"
echo "done."
fi
elif [ "$1" == "setmode" ]; then
if [ "$modem_vid" == "1478" -a "$modem_pid" == "36902" ]; then
mode=
if [ "$2" == "0" ]; then # Auto
mode=10
elif [ "$2" == "43" ]; then # 4G/3G
mode=17
elif [ "$2" == "4" ]; then # 4G only
mode=11
elif [ "$2" == "3" ]; then # 3G only
mode=2
elif [ "$2" == "2" ]; then # 2G only
mode=1
else
echo "Can't identify the mode type: $2."
exit 7
fi
at_ret=`$at_lock modem_at.sh '+CSETPREFNET='$mode 2>/dev/null`
ret=`echo "$at_ret" |grep '+CSETPREFNET=' |awk '{FS="="; print $2}' 2>/dev/null`
if [ "$ret" == "" ]; then
echo "Fail to set the modem mode from $modem_act_node."
exit 8
fi
echo "Set the mode be $2($ret)."
echo "done."
fi
elif [ "$1" == "getmode" ]; then
if [ "$modem_vid" == "1478" -a "$modem_pid" == "36902" ]; then
mode=
at_ret=`$at_lock modem_at.sh '+CGETPREFNET' 2>/dev/null`
ret=`echo "$at_ret" |grep '+CGETPREFNET:' |awk '{FS=":"; print $2}' 2>/dev/null`
if [ "$ret" == "" ]; then
echo "Fail to get the modem mode from $modem_act_node."
exit 9
elif [ "$ret" == "10" ]; then # Auto
mode=0
elif [ "$ret" == "17" ]; then # 4G/3G
mode=43
elif [ "$ret" == "11" ]; then # 4G only
mode=4
elif [ "$ret" == "2" ]; then # 3G only
mode=3
elif [ "$ret" == "1" ]; then # 2G only
mode=2
else
echo "Can't identify the mode type: $ret."
exit 10
fi
echo "Get the mode be $mode."
echo "done."
fi
elif [ "$1" == "imsi" ]; then
echo "Getting IMSI..."
at_ret=`$at_lock modem_at.sh '+CIMI' 2>/dev/null`
ret=`echo "$at_ret" |grep "^[0-9].*$" 2>/dev/null`
if [ "$ret" == "" ]; then
echo "Fail to get the IMSI from $modem_act_node."
exit 11
fi
nvram set usb_modem_act_imsi=$ret
sim_num=`nvram get modem_sim_num`
if [ -z "$sim_num" ]; then
sim_num=10
fi
nvram set modem_sim_order=-1
i=1
while [ $i -le $sim_num ]; do
echo -n "check SIM($i)..."
got_imsi=`nvram get modem_sim_imsi$i`
if [ "$got_imsi" == "" ]; then
echo "Set SIM($i)."
nvram set modem_sim_order=$i
nvram set modem_sim_imsi${i}=$ret
break
elif [ "$got_imsi" == "$ret" ]; then
echo "Get SIM($i)."
nvram set modem_sim_order=$i
break
fi
i=$((i+1))
done
echo "done."
elif [ "$1" == "imsi_del" ]; then
if [ -z "$2" ]; then
echo "Usage: $0 $1 <SIM's order>"
exit 11;
fi
echo "Delete SIM..."
sim_num=`nvram get modem_sim_num`
if [ -z "$sim_num" ]; then
sim_num=10
fi
i=$2
while [ $i -le $sim_num ]; do
echo -n "check SIM($i)..."
got_imsi=`nvram get modem_sim_imsi$i`
if [ $i -eq $2 ]; then
echo -n "Delete SIM($i)."
got_imsi=""
nvram set modem_sim_imsi$i=$got_imsi
rm -rf "$jffs_dir/sim/$i"
fi
if [ -z "$got_imsi" ]; then
j=$((i+1))
next_imsi=`nvram get modem_sim_imsi$j`
if [ -n "$next_imsi" ]; then
echo -n "Move SIM($j) to SIM($i)."
nvram set modem_sim_imsi$i=$next_imsi
mv "$jffs_dir/sim/$j" "$jffs_dir/sim/$i"
nvram set modem_sim_imsi$j=
fi
fi
echo ""
i=$((i+1))
done
echo "done."
elif [ "$1" == "imei" ]; then
echo -n "Getting IMEI..."
at_ret=`$at_lock modem_at.sh '+CGSN' 2>/dev/null`
ret=`echo "$at_ret" |grep "^[0-9].*$" 2>/dev/null`
if [ "$ret" == "" ]; then
echo "Fail to get the IMEI from $modem_act_node."
exit 12
fi
nvram set usb_modem_act_imei=$ret
echo "done."
elif [ "$1" == "iccid" ]; then
if [ "$modem_vid" == "1478" -a "$modem_pid" == "36902" ]; then
echo -n "Getting ICCID..."
at_ret=`$at_lock modem_at.sh '+ICCID' 2>/dev/null`
ret=`echo "$at_ret" |grep "ICCID: " |awk '{FS="ICCID: "; print $2}' 2>/dev/null`
if [ "$ret" == "" ]; then
echo "Fail to get the ICCID from $modem_act_node."
exit 13
fi
nvram set usb_modem_act_iccid=$ret
echo "done."
fi
elif [ "$1" == "rate" ]; then
if [ "$modem_vid" == "1478" -a "$modem_pid" == "36902" ]; then
echo -n "Getting Rate..."
qcqmi=`_get_qcqmi_by_usbnet $modem_dev 2>/dev/null`
at_ret=`gobi_api $qcqmi rate |grep "Max Tx" 2>/dev/null`
max_tx=`echo "$at_ret" |awk '{FS=","; print $1}' |awk '{FS=" "; print $3}' 2>/dev/null`
max_rx=`echo "$at_ret" |awk '{FS=","; print $2}' |awk '{FS=" "; print $2}' |awk '{FS="."; print $1}' 2>/dev/null`
if [ "$max_tx" == "" -o "$max_rx" == "" ]; then
echo "Fail to get the rate from $modem_act_node."
exit 14
fi
nvram set usb_modem_act_tx=$max_tx
nvram set usb_modem_act_rx=$max_rx
echo "done."
fi
elif [ "$1" == "hwver" ]; then
if [ "$modem_vid" == "1478" -a "$modem_pid" == "36902" ]; then
echo -n "Getting HWVER..."
at_ret=`$at_lock modem_at.sh '$HWVER' 2>/dev/null`
ret=`echo "$at_ret" |grep "^[0-9].*$" 2>/dev/null`
if [ "$ret" == "" ]; then
nvram set usb_modem_act_hwver=
echo "Fail to get the hardware version from $modem_act_node."
exit 15
fi
nvram set usb_modem_act_hwver=$ret
echo "done."
fi
elif [ "$1" == "swver" ]; then
if [ "$modem_vid" == "1478" -a "$modem_pid" == "36902" ]; then
echo -n "Getting SWVER..."
at_ret=`$at_lock modem_at.sh I 2>/dev/null`
ret=`echo -n "$at_ret" |grep "^WW" 2>/dev/null`
if [ "$ret" == "" ]; then
nvram set usb_modem_act_swver=
echo "Fail to get the software version from $modem_act_node."
exit 15
fi
nvram set usb_modem_act_swver=$ret
echo "done."
fi
elif [ "$1" == "band" ]; then
if [ "$modem_vid" == "1478" -a "$modem_pid" == "36902" ]; then
echo -n "Getting Band..."
at_ret=`$at_lock modem_at.sh '$CRFI' 2>/dev/null`
ret=`echo "$at_ret" |grep '$CRFI:' |awk '{FS=":"; print $2}' 2>/dev/null`
if [ "$ret" == "" ]; then
echo "Fail to get the current band from $modem_act_node."
exit 16
fi
nvram set usb_modem_act_band="$ret"
echo "done."
fi
elif [ "$1" == "scan" ]; then
echo "Start to scan the stations:"
modem_roaming_scantime=`nvram get modem_roaming_scantime`
modem_roaming_scanlist=`nvram get modem_roaming_scanlist`
nvram set usb_modem_act_scanning=2
at_ret=`$at_lock modem_at.sh '+COPS=2' |grep "OK" 2>/dev/null`
echo "Scanning the stations."
at_ret=`$at_lock modem_at.sh '+COPS=?' $modem_roaming_scantime 2>/dev/null`
ret=`echo "$at_ret" |grep '+COPS: ' |awk '{FS=": "; print $2}' |awk '{FS=",,"; print $1}' 2>/dev/null`
echo "Finish the scan."
nvram set usb_modem_act_scanning=1
if [ "$ret" == "" ]; then
echo "17:Fail to scan the stations."
exit 17
fi
echo "Count the stations."
num=`echo "$ret" |awk '{FS=")"; print NF}' 2>/dev/null`
if [ "$num" == "" ]; then
echo "18:Fail to count the stations."
exit 18
fi
echo "Work the list."
list="["
filter=""
i=1
while [ $i -lt $num ]; do
str=`echo "$ret" |awk '{FS=")"; print $'$i'}' |awk '{FS="("; print $2}' 2>/dev/null`
sta=`echo "$str" |awk '{FS=","; print $2}' 2>/dev/null`
sta_code=`echo "$str" |awk '{FS=","; print $4}' 2>/dev/null`
sta_type_number=`echo "$str" |awk '{FS=","; print $5}' 2>/dev/null`
if [ "$sta_type_number" == "0" -o "$sta_type_number" == "1" -o "$sta_type_number" == "3" ]; then
sta_type=2G
elif [ "$sta_type_number" == "2" ]; then
sta_type=3G
elif [ "$sta_type_number" == "4" ]; then
sta_type=HSDPA
elif [ "$sta_type_number" == "5" ]; then
sta_type=HSUPA
elif [ "$sta_type_number" == "6" ]; then
sta_type=H+
elif [ "$sta_type_number" == "7" ]; then
sta_type=4G
else
sta_type=unknown
fi
if [ "$list" != "[" ]; then
list=$list",[$sta, $sta_code, \"$sta_type\"]"
else
list=$list"[$sta, $sta_code, \"$sta_type\"]"
fi
filter=$filter","$sta","
i=$((i+1))
done
list=$list"]"
echo -n "$list" > $modem_roaming_scanlist
nvram set usb_modem_act_scanning=0
echo "done."
elif [ "$1" == "station" ]; then
modem_reg_time=`nvram get modem_reg_time`
$at_lock modem_at.sh "+COPS=1,0,\"$2\"" "$modem_reg_time" 1,2>/dev/null
if [ $? -ne 0 ]; then
echo "19:Fail to set the station: $2."
exit 19
fi
echo "done."
elif [ "$1" == "simauth" ]; then
if [ "$modem_vid" == "1478" -a "$modem_pid" == "36902" ]; then
nvram set usb_modem_act_auth=
nvram set usb_modem_act_auth_pin=
nvram set usb_modem_act_auth_puk=
at_ret=`$at_lock modem_at.sh '+CPINR' |grep "+CPINR:" |awk '{FS=":"; print $2}' 2>/dev/null`
if [ "$at_ret" == "" ]; then
echo "Fail to get the SIM status."
exit 20
fi
ret=`echo "$at_ret" |awk '{FS=","; print $3}' 2>/dev/null`
if [ "$ret" == "" ]; then
echo "Fail to get the SIM auth state."
exit 21
fi
nvram set usb_modem_act_auth=$ret
if [ "$ret" == "1" ]; then
echo "SIM auth state is ENABLED_NOT_VERIFIED."
elif [ "$ret" == "2" ]; then
echo "SIM auth state is ENABLED_VERIFIED."
elif [ "$ret" == "3" ]; then
echo "SIM auth state is DISABLED."
elif [ "$ret" == "4" ]; then
echo "SIM auth state is BLOCKED."
elif [ "$ret" == "5" ]; then
echo "SIM auth state is PERMANENTLY_BLOCKED."
else
echo "SIM auth state is UNKNOWN."
fi
ret=`echo "$at_ret" |awk '{FS=","; print $4}' 2>/dev/null`
if [ "$ret" == "" ]; then
echo "Fail to get the PIN retry."
exit 22
fi
nvram set usb_modem_act_auth_pin=$ret
echo "SIM PIN retry is $ret."
ret=`echo "$at_ret" |awk '{FS=","; print $5}' 2>/dev/null`
if [ "$ret" == "" ]; then
echo "Fail to get the PUK retry."
exit 23
fi
nvram set usb_modem_act_auth_puk=$ret
echo "SIM PUK retry is $ret."
echo "done."
fi
elif [ "$1" == "simpin" ]; then
if [ "$2" == "" ]; then
nvram set g3state_pin=2
echo "24:Need to input the PIN code."
exit 24
fi
nvram set g3state_pin=1
at_ret=`$at_lock modem_at.sh '+CPIN='\"$2\" |grep "OK" 2>/dev/null`
if [ "$at_ret" == "" ]; then
nvram set g3err_pin=1
echo "25:Fail to unlock the SIM: $2."
exit 25
fi
nvram set g3err_pin=0
echo "done."
elif [ "$1" == "simpuk" ]; then
# $2: the original PUK. $3: the new PIN.
if [ "$2" == "" ]; then
echo "26:Need to input the PUK code."
exit 26
elif [ "$3" == "" ]; then
echo "27:Need to input the new PIN code."
exit 27
fi
at_ret=`$at_lock modem_at.sh '+CPIN='\"$2\"','\"$3\" |grep "OK" 2>/dev/null`
if [ "$at_ret" == "" ]; then
echo "28:Fail to unlock the SIM PIN: $2."
exit 28
fi
echo "done."
elif [ "$1" == "lockpin" ]; then
# $2: 1, lock; 0, unlock. $3: the original PIN.
simauth=`nvram get usb_modem_act_auth`
if [ "$simauth" == "1" ]; then
echo "29:SIM need to input the PIN code first."
exit 29
elif [ "$simauth" == "4" -o "$simauth" == "5" ]; then # lock
echo "30:SIM had been blocked."
exit 30
elif [ "$simauth" == "0" ]; then # lock
echo "31:Can't get the SIM auth state."
exit 31
fi
if [ "$2" == "" ]; then
echo "32:Decide to lock/unlock PIN."
exit 32
fi
if [ "$3" == "" ]; then
echo "33:Need the PIN code."
exit 33
fi
if [ "$2" == "1" -a "$simauth" == "1" ] || [ "$2" == "1" -a "$simauth" == "2" ] || [ "$2" == "0" -a "$simauth" == "3" ]; then # lock
if [ "$simauth" == "1" -o "$simauth" == "2" ]; then
echo "had locked."
elif [ "$simauth" == "3" ]; then
echo "had unlocked."
fi
echo "done."
exit 0
fi
at_ret=`$at_lock modem_at.sh '+CLCK="SC",'$2',"'$3'"' 2>/dev/null`
ok_ret=`echo -n $at_ret |grep "OK" 2>/dev/null`
if [ -z "$ok_ret" ]; then
if [ "$2" == "1" ]; then
echo "34:Fail to lock PIN."
exit 34
else
echo "35:Fail to unlock PIN."
exit 35
fi
fi
echo "done."
elif [ "$1" == "pwdpin" ]; then
if [ "$2" == "" ]; then
echo "36:Need to input the original PIN code."
exit 36
elif [ "$3" == "" ]; then
echo "37:Need to input the new PIN code."
exit 37
fi
at_ret=`$at_lock modem_at.sh '+CPWD="SC",'$2','$3 |grep "OK" 2>/dev/null`
if [ "$at_ret" == "" ]; then
echo "38:Fail to change the PIN."
exit 38
fi
echo "done."
elif [ "$1" == "gnws" ]; then
if [ "$modem_vid" == "1478" -a "$modem_pid" == "36902" ]; then
at_cgnws=`$at_lock modem_at.sh '+CGNWS' |grep "+CGNWS:" |awk '{FS=":"; print $2}' 2>/dev/null`
if [ "$at_cgnws" == "" ]; then
echo "Fail to get the CGNWS."
exit 39
fi
roaming=`echo "$at_cgnws" |awk '{FS=","; print $1}' 2>/dev/null`
signal=`echo "$at_cgnws" |awk '{FS=","; print $2}' 2>/dev/null`
reg_type=`echo "$at_cgnws" |awk '{FS=","; print $3}' 2>/dev/null`
reg_state=`echo "$at_cgnws" |awk '{FS=","; print $4}' 2>/dev/null`
mcc=`echo "$at_cgnws" |awk '{FS=","; print $5}' 2>/dev/null`
mnc=`echo "$at_cgnws" |awk '{FS=","; print $6}' 2>/dev/null`
spn=`echo "$at_cgnws" |awk '{FS=","; print $7}' 2>/dev/null`
isp_long=`echo "$at_cgnws" |awk '{FS=","; print $8}' 2>/dev/null`
isp_short=`echo "$at_cgnws" |awk '{FS=","; print $9}' 2>/dev/null`
echo " Roaming=$roaming."
echo " Signal=$signal."
echo " REG. Type=$reg_type."
echo "REG. State=$reg_state."
echo " MCC=$mcc."
echo " MNC=$mnc."
echo " SPN=$spn."
echo " ISP Long=$isp_long."
echo " ISP Short=$isp_short."
echo "done."
fi
elif [ "$1" == "send_sms" ]; then
# $2: phone number, $3: sended message.
at_ret=`$at_lock modem_at.sh +CMGF? 2>/dev/null`
at_ret_ok=`echo -n "$at_ret" |grep "OK" 2>/dev/null`
msg_format=`echo -n "$at_ret" |grep "+CMGF:" |awk '{FS=" "; print $2}' 2>/dev/null`
if [ -z "$at_ret_ok" ] || [ "$msg_format" != "1" ]; then
#echo "Changing the message format to the Text mode..."
at_ret=`$at_lock modem_at.sh +CMGF=1 |grep "OK" 2>/dev/null`
if [ "$at_ret" == "" ]; then
echo "40:Fail to set the message format to the Text mode."
exit 40
fi
fi
if [ -z "$2" -o -z "$3" ]; then
echo "41:Usage: $0 $1 <phone number> <sended message>"
exit 41
fi
at_ret=`$at_lock modem_at.sh +CMGS=\"$2\" |grep ">" 2>/dev/null`
at_ret1=`echo -n "$at_ret" |grep ">" 2>/dev/null`
if [ -z "at_ret1" ]; then
echo "42:Fail to execute +CMGS."
exit 42
fi
at_ret=`$at_lock chat -t 1 -e '' "$3^z" OK >> /dev/$modem_act_node < /dev/$modem_act_node 2>/tmp/at_ret`
at_ret_ok=`echo -n "$at_ret" |grep "OK" 2>/dev/null`
if [ -z "at_ret_ok" ]; then
echo "43:Fail to send the message: $3."
exit 43
fi
echo "done."
elif [ "$1" == "simdetect" ]; then
if [ "$modem_vid" == "1478" -a "$modem_pid" == "36902" ]; then
# $2: 0: disable, 1: enable.
at_ret=`$at_lock modem_at.sh '$NV70210' 2>/dev/null`
ret=`echo -n $at_ret |grep "OK" 2>/dev/null`
if [ -z "$ret" ]; then
echo "44:Fail to get the value of SIM detect."
exit 44
fi
current=`echo -n $at_ret |awk '{print $2}'`
if [ -z "$2" ]; then
echo "$current"
nvram set usb_modem_act_simdetect=$current
elif [ "$2" == "1" -a "$current" == "0" ] || [ "$2" == "0" -a "$current" == "1" ]; then
at_ret=`$at_lock modem_at.sh '$NV70210='$2 |grep "OK" 2>/dev/null`
if [ -z "$at_ret" ]; then
echo "45:Fail to set the SIM detect to be $2."
exit 45
fi
nvram set usb_modem_act_simdetect=$2
# Use reboot to replace this.
#at_ret=`$at_lock modem_at.sh '+CFUN=1,1' |grep "OK" 2>/dev/null`
#if [ -z "$at_ret" ]; then
# echo "45:Fail to reset the Gobi."
# exit 46
#fi
fi
echo "done."
fi
fi
|
wkritzinger/asuswrt-merlin
|
release/src/router/rom/apps_scripts/modem_status.sh
|
Shell
|
gpl-2.0
| 24,457 |
#!/bin/sh
#
# Copyright (c) 2005 Amos Waterland
#
test_description='git rebase assorted tests
This test runs git rebase and checks that the author information is not lost
among other things.
'
. ./test-lib.sh
GIT_AUTHOR_NAME=author@name
GIT_AUTHOR_EMAIL=bogus@email@address
export GIT_AUTHOR_NAME GIT_AUTHOR_EMAIL
test_expect_success 'prepare repository with topic branches' '
git config core.logAllRefUpdates true &&
echo First >A &&
git update-index --add A &&
git commit -m "Add A." &&
git checkout -b force-3way &&
echo Dummy >Y &&
git update-index --add Y &&
git commit -m "Add Y." &&
git checkout -b filemove &&
git reset --soft master &&
mkdir D &&
git mv A D/A &&
git commit -m "Move A." &&
git checkout -b my-topic-branch master &&
echo Second >B &&
git update-index --add B &&
git commit -m "Add B." &&
git checkout -f master &&
echo Third >>A &&
git update-index A &&
git commit -m "Modify A." &&
git checkout -b side my-topic-branch &&
echo Side >>C &&
git add C &&
git commit -m "Add C" &&
git checkout -b nonlinear my-topic-branch &&
echo Edit >>B &&
git add B &&
git commit -m "Modify B" &&
git merge side &&
git checkout -b upstream-merged-nonlinear &&
git merge master &&
git checkout -f my-topic-branch &&
git tag topic
'
test_expect_success 'rebase on dirty worktree' '
echo dirty >>A &&
test_must_fail git rebase master
'
test_expect_success 'rebase on dirty cache' '
git add A &&
test_must_fail git rebase master
'
test_expect_success 'rebase against master' '
git reset --hard HEAD &&
git rebase master
'
test_expect_success 'rebase against master twice' '
git rebase master >out &&
grep "Current branch my-topic-branch is up to date" out
'
test_expect_success 'rebase against master twice with --force' '
git rebase --force-rebase master >out &&
grep "Current branch my-topic-branch is up to date, rebase forced" out
'
test_expect_success 'rebase against master twice from another branch' '
git checkout my-topic-branch^ &&
git rebase master my-topic-branch >out &&
grep "Current branch my-topic-branch is up to date" out
'
test_expect_success 'rebase fast-forward to master' '
git checkout my-topic-branch^ &&
git rebase my-topic-branch >out &&
grep "Fast-forwarded HEAD to my-topic-branch" out
'
test_expect_success 'the rebase operation should not have destroyed author information' '
! (git log | grep "Author:" | grep "<>")
'
test_expect_success 'the rebase operation should not have destroyed author information (2)' "
git log -1 |
grep 'Author: $GIT_AUTHOR_NAME <$GIT_AUTHOR_EMAIL>'
"
test_expect_success 'HEAD was detached during rebase' '
test $(git rev-parse HEAD@{1}) != $(git rev-parse my-topic-branch@{1})
'
test_expect_success 'rebase after merge master' '
git reset --hard topic &&
git merge master &&
git rebase master &&
! (git show | grep "^Merge:")
'
test_expect_success 'rebase of history with merges is linearized' '
git checkout nonlinear &&
test 4 = $(git rev-list master.. | wc -l) &&
git rebase master &&
test 3 = $(git rev-list master.. | wc -l)
'
test_expect_success 'rebase of history with merges after upstream merge is linearized' '
git checkout upstream-merged-nonlinear &&
test 5 = $(git rev-list master.. | wc -l) &&
git rebase master &&
test 3 = $(git rev-list master.. | wc -l)
'
test_expect_success 'rebase a single mode change' '
git checkout master &&
echo 1 >X &&
git add X &&
test_tick &&
git commit -m prepare &&
git checkout -b modechange HEAD^ &&
echo 1 >X &&
git add X &&
test_chmod +x A &&
test_tick &&
git commit -m modechange &&
GIT_TRACE=1 git rebase master
'
test_expect_success 'rebase is not broken by diff.renames' '
git config diff.renames copies &&
test_when_finished "git config --unset diff.renames" &&
git checkout filemove &&
GIT_TRACE=1 git rebase force-3way
'
test_expect_success 'setup: recover' '
test_might_fail git rebase --abort &&
git reset --hard &&
git checkout modechange
'
test_expect_success 'Show verbose error when HEAD could not be detached' '
>B &&
test_must_fail git rebase topic 2>output.err >output.out &&
grep "The following untracked working tree files would be overwritten by checkout:" output.err &&
grep B output.err
'
rm -f B
test_expect_success 'dump usage when upstream arg is missing' '
git checkout -b usage topic &&
test_must_fail git rebase 2>error1 &&
grep "[Uu]sage" error1 &&
test_must_fail git rebase --abort 2>error2 &&
grep "No rebase in progress" error2 &&
test_must_fail git rebase --onto master 2>error3 &&
grep "[Uu]sage" error3 &&
! grep "can.t shift" error3
'
test_expect_success 'rebase -q is quiet' '
git checkout -b quiet topic &&
git rebase -q master >output.out 2>&1 &&
test ! -s output.out
'
test_expect_success 'Rebase a commit that sprinkles CRs in' '
(
echo "One"
echo "TwoQ"
echo "Three"
echo "FQur"
echo "Five"
) | q_to_cr >CR &&
git add CR &&
test_tick &&
git commit -a -m "A file with a line with CR" &&
git tag file-with-cr &&
git checkout HEAD^0 &&
git rebase --onto HEAD^^ HEAD^ &&
git diff --exit-code file-with-cr:CR HEAD:CR
'
test_expect_success 'rebase can copy notes' '
git config notes.rewrite.rebase true &&
git config notes.rewriteRef "refs/notes/*" &&
test_commit n1 &&
test_commit n2 &&
test_commit n3 &&
git notes add -m"a note" n3 &&
git rebase --onto n1 n2 &&
test "a note" = "$(git notes show HEAD)"
'
test_expect_success 'rebase -m can copy notes' '
git reset --hard n3 &&
git rebase -m --onto n1 n2 &&
test "a note" = "$(git notes show HEAD)"
'
test_done
|
qsnake/git
|
t/t3400-rebase.sh
|
Shell
|
gpl-2.0
| 5,569 |
#!/bin/sh
LOPTIONS="-no-obsolete -locations none -source-language en"
lupdate=lupdate
if which -s lupdate-qt4; then
lupdate=lupdate-qt4
fi
lupdate="${lupdate} ${LOPTIONS}"
${lupdate} ../utils/utils.pro
${lupdate} ../loader/loader.pro
find ../plugins -name '*.pro' -exec ${lupdate} {} \;
|
sanchay160887/vacuum-im
|
src/translations/tsupdate.sh
|
Shell
|
gpl-3.0
| 292 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
$(dirname $0)/kafka-run-class.sh kafka.perf.ConsumerPerformance $@
|
Digsolab/kafka_2.10
|
bin/kafka-consumer-perf-test.sh
|
Shell
|
apache-2.0
| 862 |
#!/bin/bash -e
# This is useful so we can debug containers running inside of OpenShift that are
# failing to start properly.
if [ "$OO_PAUSE_ON_START" = "true" ] ; then
echo
echo "This container's startup has been paused indefinitely because OO_PAUSE_ON_START has been set."
echo
while sleep 10; do
true
done
fi
# interactive shells read .bashrc (which this script doesn't execute as) so force it
source /root/.bashrc
# Configure the container
time ansible-playbook /root/config.yml
# Configure rkhunter
echo '/usr/bin/ops-runner -f -t 600 -n monitoring.root.rkhunter.yml ansible-playbook /root/rkhunter.yml >> /var/log/rkhunter.yml.log &'
/usr/bin/ops-runner -f -t 600 -n monitoring.root.rkhunter.yml ansible-playbook /root/rkhunter.yml >> /var/log/rkhunter.yml.log &
# Send a heartbeat when the container starts up
/usr/bin/ops-metric-client --send-heartbeat
# fire off the check pmcd status script
check-pmcd-status.sh &
# fire off the pmcd script
#/usr/share/pcp/lib/pmcd start &
/root/start-pmcd.bash > /var/log/pmcd.log &
# Run the main service of this container
#
# SELinux is always active on the underlying host. Separately, crond fails to
# start inside the container if SELinux appears to be running separately inside.
# This might have started when other packages installed policy files which are
# unused but seen as present. To rememdy, we use setenforce to make this clearer.
echo
echo 'Starting crond'
echo '---------------'
exec /usr/sbin/crond -n -m off -x sch,proc,load
|
blrm/openshift-tools
|
docker/oso-host-monitoring/src/start.sh
|
Shell
|
apache-2.0
| 1,514 |
#!/bin/bash
set -e
source $(dirname $0)/common.sh
pushd git-repo > /dev/null
if [[ -d /opt/openjdk-toolchain ]]; then
./gradlew -Dorg.gradle.internal.launcher.welcomeMessageEnabled=false --no-daemon --max-workers=4 systemTest -PtoolchainVersion=${TOOLCHAIN_JAVA_VERSION} -Porg.gradle.java.installations.auto-detect=false -Porg.gradle.java.installations.auto-download=false -Porg.gradle.java.installations.paths=/opt/openjdk-toolchain/
else
./gradlew -Dorg.gradle.internal.launcher.welcomeMessageEnabled=false --no-daemon --max-workers=4 systemTest
fi
popd > /dev/null
|
spring-projects/spring-boot
|
ci/scripts/run-system-tests.sh
|
Shell
|
apache-2.0
| 574 |
#!/usr/bin/env bash
which register-python-argcomplete > /dev/null \
&& eval "$(register-python-argcomplete conda)" \
|| echo "Please install argcomplete to use conda completion"
|
moraleida/dotfiles
|
home/.bash_it/completion/available/conda.completion.bash
|
Shell
|
gpl-2.0
| 182 |
#!/bin/bash
export JAVA_HOME=$JAVA_HOME_1_6
rm -rf output
mkdir output
mvn clean package -Dmaven.test.skip=true
cd target
unzip -o heisenberg-server-1.0.3.2.zip
cd heisenberg-server-1.0.3.2
tar czf heisenberg-server.tgz *
cd ../../
cp target/heisenberg-server-1.0.3.2/heisenberg-server.tgz output/
|
ccvcd/heisenberg
|
build.sh
|
Shell
|
apache-2.0
| 302 |
#!/bin/bash -e
# -*- Mode: Shell-script; tab-width: 4; indent-tabs-mode: nil; -*-
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is Mozilla JavaScript Testing Utilities
#
# The Initial Developer of the Original Code is
# Mozilla Corporation.
# Portions created by the Initial Developer are Copyright (C) 2007
# the Initial Developer. All Rights Reserved.
#
# Contributor(s): Bob Clary <[email protected]>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
if [[ -z "$TEST_DIR" ]]; then
cat <<EOF
`basename $0`: error
TEST_DIR, the location of the Sisyphus framework,
is required to be set prior to calling this script.
EOF
exit 2
fi
if [[ ! -e $TEST_DIR/bin/library.sh ]]; then
echo "TEST_DIR=$TEST_DIR"
echo ""
echo "This script requires the Sisyphus testing framework. Please "
echo "cvs check out the Sisyphys framework from mozilla/testing/sisyphus"
echo "and set the environment variable TEST_DIR to the directory where it"
echo "located."
echo ""
exit 2
fi
source $TEST_DIR/bin/library.sh
TEST_JSDIR=${TEST_JSDIR:-$TEST_DIR/tests/mozilla.org/js}
usage()
{
cat <<EOF
usage: process-logs.sh.sh -l testlogfiles -A arch -K kernel
variable description
=============== ============================================================
testlogfiles The test log to be processed. If testlogfiles is a file
pattern it must be single quoted to prevent the shell from
expanding it before it is passed to the script.
EOF
exit 2
}
while getopts "l:" optname;
do
case $optname in
l) testlogfiles=$OPTARG;;
esac
done
if [[ -z "$testlogfiles" ]]; then
usage
fi
for testlogfile in `ls $testlogfiles`; do
debug "testlogfile=$testlogfile"
case $testlogfile in
*.log)
worktestlogfile=$testlogfile
;;
*.log.bz2)
worktestlogfile=`mktemp $testlogfile.XXXXXX`
bunzip2 -c $testlogfile > $worktestlogfile
;;
*.log.gz)
worktestlogfile=`mktemp $testlogfile.XXXXXX`
gunzip -c $testlogfile > $worktestlogfile
;;
*)
echo "unknown log type: $f"
exit 2
;;
esac
case "$testlogfile" in
*,js,*) testtype=shell;;
*,firefox,*) testtype=browser;;
*,thunderbird,*) testtype=browser;;
*,fennec,*) testtype=browser;;
*) error "unknown testtype in logfile $testlogfile" $LINENO;;
esac
debug "testtype=$testtype"
case "$testlogfile" in
*,nightly*) buildtype=nightly;;
*,opt,*) buildtype=opt;;
*,debug,*) buildtype=debug;;
*) error "unknown buildtype in logfile $testlogfile" $LINENO;
esac
debug "buildtype=$buildtype"
branch=`echo $testlogfile | sed 's|.*,\([0-9]\.[0-9]*\.[0-9]*\).*|\1|'`
debug "branch=$branch"
repo=`grep -m 1 '^environment: TEST_MOZILLA_HG=' $worktestlogfile | sed 's|.*TEST_MOZILLA_HG=http://hg.mozilla.org.*/\([^\/]*\)|\1|'`
if [[ -z "$repo" ]]; then
repo=CVS
fi
debug "repo=$repo"
case "$testlogfile" in
*,nt,*) OSID=nt;;
*,linux,*) OSID=linux;;
*,darwin,*) OSID=darwin;;
*)
OSID=`grep -m 1 '^environment: OSID=' $worktestlogfile | sed 's|.*OSID=\(.*\)|\1|'`
if [[ -z "$OSID" ]]; then
error "unknown OS in logfile $testlogfile" $LINENO
fi
;;
esac
debug "OSID=$OSID"
kernel=`grep -m 1 '^environment: TEST_KERNEL=' $worktestlogfile | sed 's|.*TEST_KERNEL=\(.*\)|\1|'`
if [[ "$OSID" == "linux" ]]; then
kernel=`echo $kernel | sed 's|\([0-9]*\)\.\([0-9]*\)\.\([0-9]*\).*|\1.\2.\3|'`
fi
debug "kernel=$kernel"
arch=`grep -m 1 '^environment: TEST_PROCESSORTYPE=' $worktestlogfile | sed 's|.*TEST_PROCESSORTYPE=\(.*\)|\1|'`
debug "arch=$arch"
memory=`grep -m 1 '^environment: TEST_MEMORY=' $worktestlogfile | sed 's|.*TEST_MEMORY=\(.*\)|\1|'`
timezone=`basename $testlogfile | sed 's|^[-0-9]*\([-+]\)\([0-9]\{4,4\}\),.*|\1\2|'`
debug "timezone=$timezone"
jsoptions=`grep -m 1 '^arguments: javascriptoptions=' $worktestlogfile | sed 's|.*javascriptoptions=\(.*\)|\1|'`
if [[ -z "$jsoptions" ]]; then
jsoptions=none
fi
debug "jsoptions=$jsoptions"
outputprefix=$testlogfile
includetests="included-$branch-$testtype-$buildtype.tests"
excludetests="excluded-$branch-$testtype-$buildtype.tests"
grep '^include: ' $worktestlogfile | sed 's|include: ||' > $TEST_DIR/tests/mozilla.org/js/$includetests
grep '^exclude: ' $worktestlogfile | sed 's|exclude: ||' > $TEST_DIR/tests/mozilla.org/js/$excludetests
$TEST_DIR/tests/mozilla.org/js/known-failures.pl \
-b "$branch" \
-T "$buildtype" \
-R "$repo" \
-t "$testtype" \
-o "$OSID" \
-K "$kernel" \
-A "$arch" \
-M "$memory" \
-z "$timezone" \
-J "$jsoptions" \
-r "$TEST_JSDIR/failures.txt" \
-l "$worktestlogfile" \
-O "$outputprefix"
if [[ "$testlogfile" != "$worktestlogfile" ]]; then
rm $worktestlogfile
unset worktestlogfile
fi
done
|
glycerine/vj
|
src/js-1.8.5/js/src/tests/process-logs.sh
|
Shell
|
apache-2.0
| 6,540 |
#!/bin/bash
#
# GNU/Linux build script for ProGuard.
#
# Configuration.
#
ANT_HOME=${ANT_HOME:-/usr/local/java/ant}
WTK_HOME=${WTK_HOME:-/usr/local/java/wtk}
if [ -z $PROGUARD_HOME ]; then
PROGUARD_HOME=$(which "$0")
PROGUARD_HOME=$(dirname "$0")/..
fi
cd "$PROGUARD_HOME"
SRC=src
CLASSES=classes
LIB=lib
PROGUARD=proguard/ProGuard
PROGUARD_GUI=proguard/gui/ProGuardGUI
RETRACE=proguard/retrace/ReTrace
ANT_TASK=proguard/ant/ProGuardTask
WTK_PLUGIN=proguard/wtk/ProGuardObfuscator
ANT_JAR=$ANT_HOME/lib/ant.jar
WTK_JAR=$WTK_HOME/wtklib/kenv.zip
PROGUARD_JAR=$LIB/proguard.jar
PROGUARD_GUI_JAR=$LIB/proguardgui.jar
RETRACE_JAR=$LIB/retrace.jar
#
# Function definitions.
#
function compile {
# Compile java source files.
echo "Compiling ${1//\//.} ..."
javac -nowarn -Xlint:none -sourcepath "$SRC" -d "$CLASSES" \
"$SRC/$1.java" 2>&1 \
| sed -e 's|^| |'
# Copy resource files.
(cd "$SRC"; find $(dirname $1) -maxdepth 1 \
\( -name \*.properties -o -name \*.png -o -name \*.gif -o -name \*.pro \) \
-exec cp --parents {} "../$CLASSES" \; )
}
function createjar {
echo "Creating $2..."
jar -cfm "$2" "$SRC/$(dirname $1)/MANIFEST.MF" -C "$CLASSES" $(dirname $1)
}
function updatejar {
echo "Updating $PROGUARD_JAR..."
jar -uf "$PROGUARD_JAR" -C "$CLASSES" $(dirname $1)
}
#
# Main script body.
#
mkdir -p "$CLASSES"
compile $PROGUARD
createjar $PROGUARD "$PROGUARD_JAR"
compile $PROGUARD_GUI
createjar $PROGUARD_GUI "$PROGUARD_GUI_JAR"
compile $RETRACE
createjar $RETRACE "$RETRACE_JAR"
if [ -f "$ANT_JAR" ]; then
export CLASSPATH=$ANT_JAR
compile $ANT_TASK
updatejar $ANT_TASK
else
echo "Please make sure the environment variable ANT_HOME is set correctly,"
echo "if you want to compile the optional ProGuard Ant task."
fi
if [ -f "$WTK_JAR" ]; then
export CLASSPATH=$WTK_JAR
compile $WTK_PLUGIN
updatejar $WTK_PLUGIN
else
echo "Please make sure the environment variable WTK_HOME is set correctly,"
echo "if you want to compile the optional ProGuard WTK plugin."
fi
|
Fellowzdoomer/halfnes
|
proguard4.7/build/build.sh
|
Shell
|
gpl-3.0
| 2,051 |
#!/bin/bash
test -z "$(goimports -d .)"
if [[ -n "$(gofmt -s -l .)" ]]; then
echo -e '\e[31mCode not gofmt simplified in:\n\n'
gofmt -s -l .
echo -e "\e[0"
fi
|
mkumatag/origin
|
vendor/gonum.org/v1/gonum/.travis/script.d/check-formatting.sh
|
Shell
|
apache-2.0
| 163 |
#!/bin/sh
ES_CLASSPATH=$ES_CLASSPATH:$ES_HOME/lib/elasticsearch-1.0.0.jar:$ES_HOME/lib/*:$ES_HOME/lib/sigar/*
if [ "x$ES_MIN_MEM" = "x" ]; then
ES_MIN_MEM=256m
fi
if [ "x$ES_MAX_MEM" = "x" ]; then
ES_MAX_MEM=1g
fi
if [ "x$ES_HEAP_SIZE" != "x" ]; then
ES_MIN_MEM=$ES_HEAP_SIZE
ES_MAX_MEM=$ES_HEAP_SIZE
fi
# min and max heap sizes should be set to the same value to avoid
# stop-the-world GC pauses during resize, and so that we can lock the
# heap in memory on startup to prevent any of it from being swapped
# out.
JAVA_OPTS="$JAVA_OPTS -Xms${ES_MIN_MEM}"
JAVA_OPTS="$JAVA_OPTS -Xmx${ES_MAX_MEM}"
# new generation
if [ "x$ES_HEAP_NEWSIZE" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -Xmn${ES_HEAP_NEWSIZE}"
fi
# max direct memory
if [ "x$ES_DIRECT_SIZE" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -XX:MaxDirectMemorySize=${ES_DIRECT_SIZE}"
fi
# reduce the per-thread stack size
JAVA_OPTS="$JAVA_OPTS -Xss256k"
# set to headless, just in case
JAVA_OPTS="$JAVA_OPTS -Djava.awt.headless=true"
# Force the JVM to use IPv4 stack
if [ "x$ES_USE_IPV4" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -Djava.net.preferIPv4Stack=true"
fi
JAVA_OPTS="$JAVA_OPTS -XX:+UseParNewGC"
JAVA_OPTS="$JAVA_OPTS -XX:+UseConcMarkSweepGC"
JAVA_OPTS="$JAVA_OPTS -XX:CMSInitiatingOccupancyFraction=75"
JAVA_OPTS="$JAVA_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
# GC logging options
if [ "x$ES_USE_GC_LOGGING" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDetails"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCTimeStamps"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintClassHistogram"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintTenuringDistribution"
JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCApplicationStoppedTime"
JAVA_OPTS="$JAVA_OPTS -Xloggc:/var/log/elasticsearch/gc.log"
fi
# Causes the JVM to dump its heap on OutOfMemory.
JAVA_OPTS="$JAVA_OPTS -XX:+HeapDumpOnOutOfMemoryError"
# The path to the heap dump location, note directory must exists and have enough
# space for a full heap dump.
#JAVA_OPTS="$JAVA_OPTS -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof"
|
wenzhucjy/elasticsearch-rtf
|
bin/elasticsearch.in.sh
|
Shell
|
apache-2.0
| 2,022 |
#!/bin/bash
DEPLOY_SCRIPT=/data/bedrock/deploy
WORKING_DIR=/data/bedrock/src/www.mozilla.org-django/bedrock
SITE_NAME=www.mozilla.org
PYTHON=../venv/bin/python
PD_PATH=lib/product_details_json
cd $WORKING_DIR
$PYTHON manage.py update_product_details
$DEPLOY_SCRIPT -q "${SITE_NAME}-django/bedrock/$PD_PATH"
exit 0
|
petabyte/bedrock
|
bin/update-scripts/prod/update-prod-product-details.sh
|
Shell
|
mpl-2.0
| 316 |
#!/bin/sh
. "${TEST_SCRIPTS_DIR}/unit.sh"
define_test "Memory check, good situation, all memory checks enabled"
setup_memcheck
CTDB_MONITOR_MEMORY_USAGE="80:90"
CTDB_MONITOR_SWAP_USAGE="1:50"
ok_null
simple_test
|
SVoxel/R7800
|
git_home/samba.git/ctdb/tests/eventscripts/05.system.monitor.012.sh
|
Shell
|
gpl-2.0
| 218 |
#!/bin/sh
#
# Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# @test
# @bug 6265810 6705893
# @build CheckEngine
# @run shell jrunscript-DTest.sh
# @summary Test that output of 'jrunscript -D'
. ${TESTSRC-.}/common.sh
setup
${JAVA} ${TESTVMOPTS} ${TESTJAVAOPTS} -cp ${TESTCLASSES} CheckEngine
if [ $? -eq 2 ]; then
echo "No js engine found and engine not required; test vacuously passes."
exit 0
fi
# test whether value specifieD by -D option is passed
# to script as java.lang.System property. sysProps is
# jrunscript shell built-in variable for System properties.
${JRUNSCRIPT} -l nashorn -Djrunscript.foo=bar <<EOF
if (sysProps["jrunscript.foo"] == "bar") { println("Passed"); exit(0); }
// unexpected value
println("Unexpected System property value");
exit(1);
EOF
if [ $? -ne 0 ]; then
exit 1
fi
|
md-5/jdk10
|
test/jdk/sun/tools/jrunscript/jrunscript-DTest.sh
|
Shell
|
gpl-2.0
| 1,808 |
#!/bin/bash
export CFLAGS="-I$PREFIX/include"
export LDFLAGS="-L$PREFIX/lib"
export C_INCLUDE_PATH=${PREFIX}/include
CYTHONIZE=1 $PYTHON setup.py install --single-version-externally-managed --record=record.txt
|
ostrokach/bioconda-recipes
|
recipes/cyvcf2/build.sh
|
Shell
|
mit
| 210 |
#!/bin/sh
echo stdOut: first line
echo stdOut: second line
echo stdErr: first line 1>&2
echo stdErr: second line 1>&2
|
mzcity123/sonar-runner
|
sonar-runner-api/src/test/scripts/output.sh
|
Shell
|
lgpl-3.0
| 119 |
#/usr/bin/env bash
metaphlan_hclust_heatmap.py \
--in test-data/merged_community_profile.tabular \
--out test-data/heatmap.png \
-m 'average' \
-d 'braycurtis' \
-f 'correlation' \
--minv '0' \
--tax_lev 'a' \
--sdend_h '0.1' \
--fdend_w '0.1' \
--cm_h '0.03' \
--font_size '7' \
--clust_line_w '1' \
--perc '90' \
-c 'jet'
metaphlan_hclust_heatmap.py \
--in test-data/merged_community_profile.tabular \
--out test-data/heatmap.pdf \
-m 'ward' \
-d 'euclidean' \
-f 'euclidean' \
--minv '0' \
--tax_lev 'a' \
--sdend_h '0.1' \
--fdend_w '0.1' \
--cm_h '0.03' \
--font_size '7' \
--clust_line_w '1' \
--perc '90' \
-c 'pink'
metaphlan_hclust_heatmap.py \
--in test-data/merged_community_profile.tabular \
--out test-data/heatmap.svg \
-m 'complete' \
-d 'hamming' \
-f 'matching' \
--minv '0' \
--tax_lev 'a' \
--sdend_h '0.1' \
--fdend_w '0.1' \
--cm_h '0.03' \
--font_size '7' \
--clust_line_w '1' \
--perc '90' \
-c 'pink'
|
lparsons/tools-iuc
|
tools/metaphlan2/generate_test_data.sh
|
Shell
|
mit
| 1,095 |
#!/bin/bash
set -e
pushd $(dirname $0) &>/dev/null
scriptdir=$PWD
popd &>/dev/null
tokudbdir=$(dirname $scriptdir)
cd $tokudbdir
if [ ! -d build ] ; then
mkdir build
pushd build
CC=gcc47 CXX=g++47 cmake \
-D CMAKE_BUILD_TYPE=Release \
-D USE_VALGRIND=ON \
-D TOKU_DEBUG_PARANOID=OFF \
-D USE_CTAGS=OFF \
-D USE_GTAGS=OFF \
-D USE_CSCOPE=OFF \
-D USE_ETAGS=OFF \
-D USE_BDB=ON \
-D CMAKE_LINK_DEPENDS_NO_SHARED=ON \
-G Ninja \
-D RUN_LONG_TESTS=ON \
-D TOKUDB_DATA=$tokudbdir/../tokudb.data \
..
ninja build_jemalloc build_lzma
popd
fi
cd build
set +e
ctest -j16 \
-D NightlyStart \
-D NightlyUpdate \
-D NightlyConfigure \
-D NightlyBuild \
-D NightlyTest \
-E '/drd|/helgrind'
ctest -j16 \
-D NightlyMemCheck \
-E '^ydb/.*\.bdb|test1426\.tdb|/drd|/helgrind'
set -e
ctest -D NightlySubmit
|
ottok/mariadb-galera-10.0
|
storage/tokudb/ft-index/scripts/run-nightly-release-tests.bash
|
Shell
|
gpl-2.0
| 950 |
#!/bin/bash
echo
swig -python wolfssl.i
pythonIncludes=`python-config --includes`
pythonLibs=`python-config --libs`
gcc -c -fpic wolfssl_wrap.c -I$pythonIncludes
gcc -c -fpic wolfssl_adds.c
gcc -shared -flat_namespace wolfssl_adds.o wolfssl_wrap.o -lwolfssl $pythonLibs -o _wolfssl.so
python runme.py
|
yashdsaraf/bb-bot
|
wolfssl/swig/PythonBuild.sh
|
Shell
|
gpl-3.0
| 304 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
HADOOP_COMMON_DIR="./"
HADOOP_COMMON_LIB_JARS_DIR="lib"
HADOOP_COMMON_LIB_NATIVE_DIR="lib/native"
HDFS_DIR="./"
HDFS_LIB_JARS_DIR="lib"
YARN_DIR="./"
YARN_LIB_JARS_DIR="lib"
MAPRED_DIR="./"
MAPRED_LIB_JARS_DIR="lib"
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-"/usr/lib/hadoop/libexec"}
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop/conf"}
HADOOP_COMMON_HOME=${HADOOP_COMMON_HOME:-"/usr/lib/hadoop"}
HADOOP_HDFS_HOME=${HADOOP_HDFS_HOME:-"/usr/lib/hadoop-hdfs"}
HADOOP_MAPRED_HOME=${HADOOP_MAPRED_HOME:-"/usr/lib/hadoop-mapreduce"}
HADOOP_YARN_HOME=${HADOOP_YARN_HOME:-"/usr/lib/hadoop-yarn"}
|
keedio/buildoopRecipes
|
hadoop/hadoop-2.2.0/rpm/sources/hadoop-layout.sh
|
Shell
|
apache-2.0
| 1,373 |
#!/bin/bash
set -eu -o pipefail
gunzip -v *.gz
mkdir -p "$PREFIX/bin"
cp asn2gb* $PREFIX/bin/asn2gb
chmod +x $PREFIX/bin/asn2gb
|
blankenberg/bioconda-recipes
|
recipes/asn2gb/build.sh
|
Shell
|
mit
| 130 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the Container Linux distro.
source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh"
# create-master-instance creates the master instance. If called with
# an argument, the argument is used as the name to a reserved IP
# address for the master. (In the case of upgrade/repair, we re-use
# the same IP.)
#
# It requires a whole slew of assumed variables, partially due to to
# the call to write-master-env. Listing them would be rather
# futile. Instead, we list the required calls to ensure any additional
#
# variables are set:
# ensure-temp-dir
# detect-project
# get-bearer-token
function create-master-instance {
local address=""
[[ -n ${1:-} ]] && address="${1}"
write-master-env
create-master-instance-internal "${MASTER_NAME}" "${address}"
}
function replicate-master-instance() {
local existing_master_zone="${1}"
local existing_master_name="${2}"
local existing_master_replicas="${3}"
local kube_env="$(get-metadata "${existing_master_zone}" "${existing_master_name}" kube-env)"
# Substitute INITIAL_ETCD_CLUSTER to enable etcd clustering.
kube_env="$(echo "${kube_env}" | grep -v "INITIAL_ETCD_CLUSTER")"
kube_env="$(echo -e "${kube_env}\nINITIAL_ETCD_CLUSTER: '${existing_master_replicas},${REPLICA_NAME}'")"
ETCD_CA_KEY="$(echo "${kube_env}" | grep "ETCD_CA_KEY" | sed "s/^.*: '//" | sed "s/'$//")"
ETCD_CA_CERT="$(echo "${kube_env}" | grep "ETCD_CA_CERT" | sed "s/^.*: '//" | sed "s/'$//")"
create-etcd-certs "${REPLICA_NAME}" "${ETCD_CA_CERT}" "${ETCD_CA_KEY}"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_KEY")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_KEY: '${ETCD_PEER_KEY_BASE64}'")"
kube_env="$(echo "${kube_env}" | grep -v "ETCD_PEER_CERT")"
kube_env="$(echo -e "${kube_env}\nETCD_PEER_CERT: '${ETCD_PEER_CERT_BASE64}'")"
echo "${kube_env}" > ${KUBE_TEMP}/master-kube-env.yaml
get-metadata "${existing_master_zone}" "${existing_master_name}" cluster-name > "${KUBE_TEMP}/cluster-name.txt"
create-master-instance-internal "${REPLICA_NAME}"
}
function create-master-instance-internal() {
local gcloud="gcloud"
local retries=5
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
gcloud="gcloud beta"
fi
local -r master_name="${1}"
local -r address="${2:-}"
local preemptible_master=""
if [[ "${PREEMPTIBLE_MASTER:-}" == "true" ]]; then
preemptible_master="--preemptible --maintenance-policy TERMINATE"
fi
local network=$(make-gcloud-network-argument \
"${NETWORK_PROJECT}" "${REGION}" "${NETWORK}" "${SUBNETWORK:-}" \
"${address:-}" "${ENABLE_IP_ALIASES:-}" "${IP_ALIAS_SIZE:-}")
local metadata="kube-env=${KUBE_TEMP}/master-kube-env.yaml"
metadata="${metadata},user-data=${KUBE_ROOT}/cluster/gce/container-linux/master.yaml"
metadata="${metadata},configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh"
metadata="${metadata},cluster-name=${KUBE_TEMP}/cluster-name.txt"
local disk="name=${master_name}-pd"
disk="${disk},device-name=master-pd"
disk="${disk},mode=rw"
disk="${disk},boot=no"
disk="${disk},auto-delete=no"
for attempt in $(seq 1 ${retries}); do
if result=$(${gcloud} compute instances create "${master_name}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--scopes "storage-ro,compute-rw,monitoring,logging-write" \
--metadata-from-file "${metadata}" \
--disk "${disk}" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
${preemptible_master} \
${network} 2>&1); then
echo "${result}" >&2
return 0
else
echo "${result}" >&2
if [[ ! "${result}" =~ "try again later" ]]; then
echo "Failed to create master instance due to non-retryable error" >&2
return 1
fi
sleep 10
fi
done
echo "Failed to create master instance despite ${retries} attempts" >&2
return 1
}
function get-metadata() {
local zone="${1}"
local name="${2}"
local key="${3}"
local metadata_url="http://metadata.google.internal/computeMetadata/v1/instance/attributes/${key}"
gcloud compute ssh "${name}" \
--project "${PROJECT}" \
--zone "${zone}" \
--command "curl '${metadata_url}' -H 'Metadata-Flavor: Google'" 2>/dev/null
}
|
raffaelespazzoli/origin
|
vendor/k8s.io/kubernetes/cluster/gce/container-linux/master-helper.sh
|
Shell
|
apache-2.0
| 5,008 |
#!/bin/sh
test_description="Test whether cache-tree is properly updated
Tests whether various commands properly update and/or rewrite the
cache-tree extension.
"
. ./test-lib.sh
cmp_cache_tree () {
test-dump-cache-tree | sed -e '/#(ref)/d' >actual &&
sed "s/$_x40/SHA/" <actual >filtered &&
test_cmp "$1" filtered
}
# We don't bother with actually checking the SHA1:
# test-dump-cache-tree already verifies that all existing data is
# correct.
generate_expected_cache_tree_rec () {
dir="$1${1:+/}" &&
parent="$2" &&
# ls-files might have foo/bar, foo/bar/baz, and foo/bar/quux
# We want to count only foo because it's the only direct child
subtrees=$(git ls-files|grep /|cut -d / -f 1|uniq) &&
subtree_count=$(echo "$subtrees"|awk -v c=0 '$1 != "" {++c} END {print c}') &&
entries=$(git ls-files|wc -l) &&
printf "SHA $dir (%d entries, %d subtrees)\n" "$entries" "$subtree_count" &&
for subtree in $subtrees
do
cd "$subtree"
generate_expected_cache_tree_rec "$dir$subtree" "$dir" || return 1
cd ..
done &&
dir=$parent
}
generate_expected_cache_tree () {
(
generate_expected_cache_tree_rec
)
}
test_cache_tree () {
generate_expected_cache_tree >expect &&
cmp_cache_tree expect
}
test_invalid_cache_tree () {
printf "invalid %s ()\n" "" "$@" >expect &&
test-dump-cache-tree |
sed -n -e "s/[0-9]* subtrees//" -e '/#(ref)/d' -e '/^invalid /p' >actual &&
test_cmp expect actual
}
test_no_cache_tree () {
: >expect &&
cmp_cache_tree expect
}
test_expect_success 'initial commit has cache-tree' '
test_commit foo &&
test_cache_tree
'
test_expect_success 'read-tree HEAD establishes cache-tree' '
git read-tree HEAD &&
test_cache_tree
'
test_expect_success 'git-add invalidates cache-tree' '
test_when_finished "git reset --hard; git read-tree HEAD" &&
echo "I changed this file" >foo &&
git add foo &&
test_invalid_cache_tree
'
test_expect_success 'git-add in subdir invalidates cache-tree' '
test_when_finished "git reset --hard; git read-tree HEAD" &&
mkdir dirx &&
echo "I changed this file" >dirx/foo &&
git add dirx/foo &&
test_invalid_cache_tree
'
cat >before <<\EOF
SHA (3 entries, 2 subtrees)
SHA dir1/ (1 entries, 0 subtrees)
SHA dir2/ (1 entries, 0 subtrees)
EOF
cat >expect <<\EOF
invalid (2 subtrees)
invalid dir1/ (0 subtrees)
SHA dir2/ (1 entries, 0 subtrees)
EOF
test_expect_success 'git-add in subdir does not invalidate sibling cache-tree' '
git tag no-children &&
test_when_finished "git reset --hard no-children; git read-tree HEAD" &&
mkdir dir1 dir2 &&
test_commit dir1/a &&
test_commit dir2/b &&
echo "I changed this file" >dir1/a &&
cmp_cache_tree before &&
echo "I changed this file" >dir1/a &&
git add dir1/a &&
cmp_cache_tree expect
'
test_expect_success 'update-index invalidates cache-tree' '
test_when_finished "git reset --hard; git read-tree HEAD" &&
echo "I changed this file" >foo &&
git update-index --add foo &&
test_invalid_cache_tree
'
test_expect_success 'write-tree establishes cache-tree' '
test-scrap-cache-tree &&
git write-tree &&
test_cache_tree
'
test_expect_success 'test-scrap-cache-tree works' '
git read-tree HEAD &&
test-scrap-cache-tree &&
test_no_cache_tree
'
test_expect_success 'second commit has cache-tree' '
test_commit bar &&
test_cache_tree
'
test_expect_success PERL 'commit --interactive gives cache-tree on partial commit' '
cat <<-\EOT >foo.c &&
int foo()
{
return 42;
}
int bar()
{
return 42;
}
EOT
git add foo.c &&
test_invalid_cache_tree &&
git commit -m "add a file" &&
test_cache_tree &&
cat <<-\EOT >foo.c &&
int foo()
{
return 43;
}
int bar()
{
return 44;
}
EOT
(echo p; echo 1; echo; echo s; echo n; echo y; echo q) |
git commit --interactive -m foo &&
test_cache_tree
'
test_expect_success 'commit in child dir has cache-tree' '
mkdir dir &&
>dir/child.t &&
git add dir/child.t &&
git commit -m dir/child.t &&
test_cache_tree
'
test_expect_success 'reset --hard gives cache-tree' '
test-scrap-cache-tree &&
git reset --hard &&
test_cache_tree
'
test_expect_success 'reset --hard without index gives cache-tree' '
rm -f .git/index &&
git reset --hard &&
test_cache_tree
'
test_expect_success 'checkout gives cache-tree' '
git tag current &&
git checkout HEAD^ &&
test_cache_tree
'
test_expect_success 'checkout -b gives cache-tree' '
git checkout current &&
git checkout -b prev HEAD^ &&
test_cache_tree
'
test_expect_success 'checkout -B gives cache-tree' '
git checkout current &&
git checkout -B prev HEAD^ &&
test_cache_tree
'
test_expect_success 'partial commit gives cache-tree' '
git checkout -b partial no-children &&
test_commit one &&
test_commit two &&
echo "some change" >one.t &&
git add one.t &&
echo "some other change" >two.t &&
git commit two.t -m partial &&
test_cache_tree
'
test_expect_success 'no phantom error when switching trees' '
mkdir newdir &&
>newdir/one &&
git add newdir/one &&
git checkout 2>errors &&
! test -s errors
'
test_done
|
teh/git
|
t/t0090-cache-tree.sh
|
Shell
|
gpl-2.0
| 5,090 |
#/bin/sh
#
# Run this script in a directory with a working makefile to check for
# compiler warnings in SQLite.
#
rm -f sqlite3.c
make sqlite3.c
echo '********** No optimizations. Includes FTS4 and RTREE *********'
gcc -c -Wshadow -Wall -Wextra -pedantic-errors -Wno-long-long -std=c89 \
-ansi -DHAVE_STDINT_H -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_RTREE \
sqlite3.c
echo '********** No optimizations. ENABLE_STAT4. THREADSAFE=0 *******'
gcc -c -Wshadow -Wall -Wextra -pedantic-errors -Wno-long-long -std=c89 \
-ansi -DSQLITE_ENABLE_STAT4 -DSQLITE_THREADSAFE=0 \
sqlite3.c
echo '********** Optimized -O3. Includes FTS4 and RTREE ************'
gcc -O3 -c -Wshadow -Wall -Wextra -pedantic-errors -Wno-long-long -std=c89 \
-ansi -DHAVE_STDINT_H -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_RTREE \
sqlite3.c
|
hongbinz/sqlcipher
|
tool/warnings.sh
|
Shell
|
bsd-3-clause
| 834 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.