code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
set -e
do_reset_config() {
/setMyCnf.sh
}
reset_root_pw() {
if ! grep -q ${MYSQL_ROOT_PASSWORD} /root/.my.cnf ; then
echo "resetting mysql root pw"
/usr/bin/mysqladmin -u root password "${MYSQL_ROOT_PASSWORD}"
mv -f /root/postConfig.my.cnf /root/.my.cnf
fi
}
case "$*" in
bash)
bash
;;
start)
echo "reseting my.cnf"
do_reset_config
echo "Starting Mysql"
if [ -f /var/run/mysqld/mysqld.sock ]; then
rm /var/run/mysqld/mysqld.sock
fi
if [ -f ${DATADIR}/mysqld.pid ]; then
rm ${DATADIR}/mysqld.pid
fi
mysqld_safe
#if [ ${USE_KEEPALIVED} = true ]; then
# /etc/init.d/keepalived start
#fi
#HACK to get Docker to stay running while in Daemon mode, tail stays in the foreground
mysqladmin --silent --wait=300 ping || exit 1 && tail -f /dev/null
;;
bootstrap)
echo "Bootstrappping Mysql"
do_reset_config
if [ -f /var/run/mysqld/mysqld.sock ]; then
rm /var/run/mysqld/mysqld.sock
fi
if [ -f ${DATADIR}/mysqld.pid ]; then
rm ${DATADIR}/mysqld.pid
fi
/etc/init.d/mysql bootstrap-pxc & mysqladmin --silent --wait=300 ping || exit 1
reset_root_pw
#if [ ${USE_KEEPALIVED} = true ]; then
# /etc/init.d/keepalived start
#fi
#HACK to get Docker to stay running while in Daemon mode, tail stays in the foreground
mysqladmin --silent --wait=300 ping || exit 1 && tail -f /dev/null
;;
*)
echo "Use either start or bootstrap"
;;
esac
|
sveesible/percona-pxc1
|
docker-entrypoint.sh
|
Shell
|
apache-2.0
| 1,451 |
#!/bin/bash
set -e
# keep exit status
status=0
ANSIBLE_CONFIG=/ansible/ansible.cfg
ANSIBLE_PLAYBOOK=/ansible/provision-vm.yml
ANSIBLE_INVENTORY=/ansible/environments/docker/inventory
ANSIBLE_SECRETS=/ansible/environments/vm/secrets/vm.yml
# start docker container
docker run --detach \
-v "${PWD}":/ansible:rw \
-v /sys/fs/cgroup:/sys/fs/cgroup:ro \
--privileged \
--name ansible-test \
--add-host static.vm.openconext.org:127.0.0.1 \
--add-host metadata.vm.openconext.org:127.0.0.1 \
--add-host serviceregistry.vm.openconext.org:127.0.0.1 \
--add-host engine.vm.openconext.org:127.0.0.1 \
--add-host profile.vm.openconext.org:127.0.0.1 \
--add-host mujina-sp.vm.openconext.org:127.0.0.1 \
--add-host mujina-idp.vm.openconext.org:127.0.0.1 \
--add-host teams.vm.openconext.org:127.0.0.1 \
--add-host authz.vm.openconext.org:127.0.0.1 \
--add-host authz-admin.vm.openconext.org:127.0.0.1 \
--add-host authz-playground.vm.openconext.org:127.0.0.1 \
--add-host voot.vm.openconext.org:127.0.0.1 \
--add-host lb.vm.openconext.org:127.0.0.1 \
--add-host apps.vm.openconext.org:127.0.0.1 \
--add-host db.vm.openconext.org:127.0.0.1 \
--add-host pdp.vm.openconext.org:127.0.0.1 \
--add-host engine-api.vm.openconext.org:127.0.0.1 \
--add-host aa.vm.openconext.org:127.0.0.1 \
--add-host link.vm.openconext.org:127.0.0.1 \
--add-host multidata.vm.openconext.org:127.0.0.1 \
--add-host oidc.vm.openconext.org:127.0.0.1 \
--add-host manage.vm.openconext.org:127.0.0.1 \
--hostname test.openconext.org \
-e TERM=xterm \
-e ANSIBLE_CONFIG=${ANSIBLE_CONFIG} \
surfnet/centos7-openconext
# initialize ansible.cfg
cat <<-'EOF' > /tmp/ansible.cfg
[defaults]
callback_plugins=/ansible/callback_plugins
callback_whitelist=profile_tasks
[ssh_connection]
ssh_args=-o ControlMaster=auto -o ControlPersist=60m
pipelining=True
EOF
# and copy it into the container
docker cp /tmp/ansible.cfg ansible-test:${ANSIBLE_CONFIG}
echo
echo "================================================================="
echo "================================================================="
echo "== STARTING SYNTAX CHECK ========================================"
echo "================================================================="
echo "================================================================="
echo
docker exec -t ansible-test \
ansible-playbook \
-i $ANSIBLE_INVENTORY \
-e secrets_file=$ANSIBLE_SECRETS \
$ANSIBLE_PLAYBOOK \
--syntax-check
echo
echo "================================================================="
echo "================================================================="
echo "== STARTING MAIN PLAYBOOK RUN ==================================="
echo "================================================================="
echo "================================================================="
echo
docker exec -t ansible-test \
ansible-playbook \
-i $ANSIBLE_INVENTORY \
-e secrets_file=$ANSIBLE_SECRETS \
$ANSIBLE_PLAYBOOK
echo
echo "================================================================="
echo "================================================================="
echo "== STARTING IDEMPOTENCY TEST ===================================="
echo "================================================================="
echo "================================================================="
echo
TMPOUT=$(tempfile)
docker exec -t ansible-test \
ansible-playbook \
-i $ANSIBLE_INVENTORY \
-e secrets_file=$ANSIBLE_SECRETS \
$ANSIBLE_PLAYBOOK \
| tee $TMPOUT
echo
echo "================================================================="
echo "================================================================="
if grep -q 'changed=0.*failed=0' $TMPOUT
then
echo "== IDEMPOTENCY CHECK: PASS ======================================"
else
echo "== IDEMPOTENCY CHECK: FAILED! ==================================="
status=1
fi
echo "================================================================="
echo "================================================================="
echo
docker exec -t ansible-test \
ansible-playbook \
-i $ANSIBLE_INVENTORY \
/ansible/tests/all_services_are_up.yml
exit $status
|
baszoetekouw/OpenConext-deploy
|
tests/travis-build.sh
|
Shell
|
apache-2.0
| 5,248 |
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
set -e
set -x
virtualenv -p python3 .
source ./bin/activate
pip install tensorflow
pip install -r sail_rl/requirements.txt
python -m sail_rl.train --env gym --game Pong --agent_type sail_dqn --workdir /tmp/sail --gin_file sail_rl/configs/atari_dqn.gin
|
google-research/google-research
|
sail_rl/run.sh
|
Shell
|
apache-2.0
| 859 |
#!/bin/bash
# Setup
sudo dpkg -s mesos
if [ $? -eq 0 ]
then
echo "Mesos is already installed"
exit $?
fi
if [ -z "$MESOS_VERSION" ]
then
MESOS_VERSION=`curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/mesosversion"`
if [ -z "$MESOS_VERSION" ]
then
echo "$MESOS_VERSION is not set"
exit 1
fi
fi
# Add the repository
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF
DISTRO=$(lsb_release -is | tr '[:upper:]' '[:lower:]')
CODENAME=$(lsb_release -cs)
echo "deb http://repos.mesosphere.io/${DISTRO} ${CODENAME} main" | \
sudo tee /etc/apt/sources.list.d/mesosphere.list
sudo apt-get -y update
# Generate locale
sudo locale-gen en_US.UTF-8
# Try to install Mesos from a package
sudo apt-get -y install mesos=$MESOS_VERSION-1.0.ubuntu1404
if [ $? -eq 0 ]
then
echo "Mesos $MESOS_VERSION installed"
exit 0
fi
if [ -z "$PACKER_BUILD" ]
then
echo "There is no package for Mesos $MESOS_VERSION available. Please try to build it from sources into an image. More info: terraform-mesos/images/README.md"
exit 1
fi
# Try to install Mesos from sources
sudo apt-get -y install git htop build-essential openjdk-7-jdk python-dev python-boto libcurl4-nss-dev libsasl2-dev maven libapr1-dev libsvn-dev autoconf libtool httpie
mkdir /tmp/mesos
cd /tmp/mesos
git clone https://git-wip-us.apache.org/repos/asf/mesos.git .
git checkout $MESOS_VERSION
if [ $? -ne 0 ]
then
echo "Cannot find branch $MESOS_VERSION in Mesos git repository"
exit 1
fi
./bootstrap
mkdir build
cd build
../configure
make
sudo make install
# Post install scripting
chmod +x /usr/bin/mesos-init-wrapper
mkdir -p /usr/share/doc/mesos /etc/default /etc/mesos /var/log/mesos
mkdir -p /etc/mesos-master /etc/mesos-slave /var/lib/mesos
cp ../CHANGELOG /usr/share/doc/mesos/
echo zk://localhost:2181/mesos > /etc/mesos/zk
echo /var/lib/mesos > /etc/mesos-master/work_dir
echo 1 > /etc/mesos-master/quorum
( cd /usr/local/lib && cp -s ../../lib/lib*.so . )
|
samklr/terraform-mesos
|
scripts/mesos_install.sh
|
Shell
|
apache-2.0
| 2,007 |
#!/bin/bash
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
set -x
if [[ $# -ne 1 ]]; then
echo "Usage: $0 <disk-id to clean>"
exit -1
fi
disk_id=$1
# Mount the dirty disk
mkdir -p /mnt/disks/${disk_id}
mount -t ext4 -o discard,defaults \
/dev/disk/by-id/google-${disk_id}-part1 \
/mnt/disks/${disk_id}
cd /mnt/disks/${disk_id}
echo "FYI, disk Usage is `df -kh .`"
# Remove user information from etc/ and their home directories.
# But leave spinnaker.
etc_files="group gshadow passwd shadow subgid subuid"
for user in $(ls home); do
if [[ -f home/${user}/keep_user ]]; then
# If we want to keep non-standard users, then
# mark them with a keep_user in their root directory.
# We'll remove the marker here, but keep the user intact.
rm -f home/${user}/keep_user
continue
fi
if [[ "$user" != "spinnaker" && "$user" != "ubuntu" ]]; then
for file in $etc_files; do
sed /^$user:.*/d -i etc/${file} || true
sed /^$user:.*/d -i etc/${file}- || true
done
rm -rf home/$user;
fi
done
# Remove authorized keys
if [[ -f root/.ssh/authorized_keys ]]; then
cat /dev/null > root/.ssh/authorized_keys
fi
# Remove tmp and log files
rm -rf tmp/*
find var/log -type f -exec rm {} \;
cd /
umount /mnt/disks/${disk_id}
|
jtk54/spinnaker
|
dev/clean_google_image.sh
|
Shell
|
apache-2.0
| 1,837 |
#!/bin/bash
# Copyright (C) 2013-2015 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. common.sh
#configure nginx build
if [ "$1" != "--quick-mode" ]; then
echo "-- configuring nginx with module dependencies"
echo_and_execute_cmd "cd ${EZNGINX_BUILD_PATH}"
echo_and_execute_cmd "./configure.sh"
echo_and_execute_cmd "cd ${CWD}"
fi
##build eznginx libraries
echo_and_execute_cmd "./build_ezbake_nginx_module_rpm.sh"
##build eznginx app
echo_and_execute_cmd "./build_ezbake_nginx_app.sh --quick-mode"
#build RPMs
./build_ezbake_frontend_rpm-vm.sh
|
crawlik/ezbake-platform-services
|
efe/build-vm.sh
|
Shell
|
apache-2.0
| 1,117 |
#!/bin/bash -e
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. ./kythe/cxx/extractor/testdata/skip_functions.sh
# This script checks that extract_compilation_database.sh works on a simple
# compilation database.
BASE_DIR="$PWD/kythe/extractors/cmake"
OUT_DIR="$TEST_TMPDIR"
EXTRACT="${BASE_DIR}/extract_compilation_database.sh"
EXPECTED_FILE_HASH="deac66ccb79f6d31c0fa7d358de48e083c15c02ff50ec1ebd4b64314b9e6e196"
KINDEX_TOOL="$PWD/kythe/cxx/tools/kindex_tool"
export KYTHE_EXTRACTOR="$PWD/kythe/cxx/extractor/cxx_extractor"
export JQ="$PWD/third_party/jq/jq"
cd "${BASE_DIR}/testdata"
KYTHE_CORPUS=test_corpus KYTHE_ROOT_DIRECTORY="${BASE_DIR}" \
KYTHE_OUTPUT_DIRECTORY="${OUT_DIR}" \
"${EXTRACT}" "${BASE_DIR}/testdata/compilation_database.json"
[[ $(ls -1 "${OUT_DIR}"/*.kindex | wc -l) -eq 1 ]]
INDEX_PATH=$(ls -1 "${OUT_DIR}"/*.kindex)
"${KINDEX_TOOL}" -canonicalize_hashes -suppress_details -explode \
"${INDEX_PATH}"
# Remove lines that are system specific
skip_inplace "-target" 1 "${INDEX_PATH}_UNIT"
sed "s:TEST_CWD:${PWD}/:
s:TEST_EXTRACTOR:${KYTHE_EXTRACTOR}:" "${BASE_DIR}/testdata/expected.unit" | \
skip "-target" 1 |
diff - "${INDEX_PATH}_UNIT"
diff "${BASE_DIR}/testdata/expected.file" "${INDEX_PATH}_${EXPECTED_FILE_HASH}"
|
legrosbuffle/kythe
|
kythe/extractors/cmake/test_extract_compilation_database.sh
|
Shell
|
apache-2.0
| 1,815 |
#!/bin/bash
set -e
rm -rf stage
rm -rf public
./hugo
mkdir stage -p
mv public/* stage
cp favicon.png stage/images/
rsync --delete -rcv stage/* con:/www/docs.factcast.org
rm -rf stage
|
uweschaefer/factcast
|
factcast-site/documentation/release.sh
|
Shell
|
apache-2.0
| 188 |
# -----------------------------------------------------------------------------
#
# Package : fliplog
# Version : 0.3.13
# Source repo : https://github.com/fliphub/fliplog
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=fliplog
PACKAGE_VERSION=0.3.13
PACKAGE_URL=https://github.com/fliphub/fliplog
yum -y update && yum install -y yum-utils nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git gcc gcc-c++ libffi libffi-devel ncurses git jq make cmake
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/appstream/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/baseos/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/7Server/ppc64le/optional/
yum install -y firefox liberation-fonts xdg-utils && npm install n -g && n latest && npm install -g npm@latest && export PATH="$PATH" && npm install --global yarn grunt-bump xo testem acorn
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME" > /home/tester/output/clone_fails
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails" > /home/tester/output/version_tracker
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
PACKAGE_VERSION=$(jq -r ".version" package.json)
# run the test command from test.sh
if ! npm install && npm audit fix && npm audit fix --force; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! npm test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
f/fliplog/fliplog_rhel_8.3.sh
|
Shell
|
apache-2.0
| 3,048 |
#!/bin/bash
. 000_define.sh
printf "Prepare some system config.\n"
${sudo_cmd} cp ${verbose} 000_define.sh ${new_root}/
${sudo_cmd} cp ${verbose} ${chroot_files}/in_chroot_task.sh ${new_root}/
${sudo_cmd} cp ${verbose} /etc/resolv.conf ${new_root}/etc/
if [ $use_packages = 1 ] ; then
${sudo_cmd} cp ${verbose} ${chroot_files}/binpkg ${new_root}/etc/portage/make.conf
printf 'PORTAGE_BINHOST="'$binhost'"' | ${sudo_cmd} tee -a ${new_root}/etc/portage/make.conf/binpkg >/dev/null
fi
if [ $ru = 1 ] ; then
${sudo_cmd} mv ${verbose} ${new_root}/etc/conf.d/consolefont ${new_root}/etc/conf.d/consolefont.default
${sudo_cmd} cp ${verbose} ${chroot_files}/consolefont ${new_root}/etc/conf.d/consolefont
${sudo_cmd} mv ${verbose} ${new_root}/etc/conf.d/keymaps ${new_root}/etc/conf.d/keymaps.default
${sudo_cmd} cp ${verbose} ${chroot_files}/keymaps ${new_root}/etc/conf.d/keymaps
fi
[ -f ${chroot_files}/${kernel} ] && ${sudo_cmd} cp ${verbose} ${chroot_files}/${kernel} ${new_root}/.config
${sudo_cmd} cp ${verbose} first* ${new_root}/root
|
ppv77/qsgentoo
|
450_prepare_other.sh
|
Shell
|
apache-2.0
| 1,073 |
#!/bin/bash
##############################################
####### Install from scratch
##############################################
echo "installing site from scratch"
source ./build_scripts/backup_src_dir.sh
############################################
##### Download Drupal
############################################
source ./build_scripts/download_drupal.sh
###########################################
##### Install drupal on the docker container
##########################################
# this command also needs the correct drupal version, else it just downloads the most recent
echo "Drupal Version == $drupal_version"
echo "Apache Root == $apache_root"
if [ "$drupal_version" ];then
echo "drupal_version is set to $drupal_version so we will try and get that version"
###########################
####### copy a working settings file
###########################
# @TODO move the settings file to somewhere obvious
cp dockers/apache/drupal_docker_settings.php ./src/docroot/sites/default/settings.php
fi
#########################
#### run site install on the apache-server container
########################
echo "running site install on the apache-server"
$vagrant_apache_docker_run -- $drush_site_install
|
heavyengineer/drupal-tug
|
build_scripts/install_from_scratch.sh
|
Shell
|
apache-2.0
| 1,239 |
#!/bin/bash -e
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TEST_NAME="test_modules"
. ./kythe/cxx/extractor/testdata/test_common.sh
. ./kythe/cxx/extractor/testdata/skip_functions.sh
KYTHE_OUTPUT_DIRECTORY="${OUT_DIR}" \
"./${EXTRACTOR}" --with_executable "/usr/bin/g++" \
-fmodules \
-fmodule-map-file="kythe/cxx/extractor/testdata/modfoo.modulemap" \
-I./kythe/cxx/extractor \
./kythe/cxx/extractor/testdata/modules.cc
[[ $(ls -1 "${OUT_DIR}"/*.kindex | wc -l) -eq 1 ]]
INDEX_PATH=$(ls -1 "${OUT_DIR}"/*.kindex)
"${KINDEX_TOOL}" -canonicalize_hashes -suppress_details -explode "${INDEX_PATH}"
# Remove lines that will change depending on the machine the test is run on.
skip_inplace "-target" 1 "${INDEX_PATH}_UNIT"
skip_inplace "signature" 0 "${INDEX_PATH}_UNIT"
sed "s|TEST_CWD|${PWD}/|" "${BASE_DIR}/modules.UNIT" | \
skip "-target" 1 |
skip "signature" 0 |
diff - "${INDEX_PATH}_UNIT"
|
benjyw/kythe
|
kythe/cxx/extractor/testdata/test_modules.sh
|
Shell
|
apache-2.0
| 1,476 |
#!/bin/bash
#
# Copyright (C) 2005-2020 Centre National d'Etudes Spatiales (CNES)
#
# This file is part of Orfeo Toolbox
#
# https://www.orfeo-toolbox.org/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# usage : call CI/otb_coverage.sh from source dir
OTB_DIR="$(dirname $0)/.."
OTB_DIR="$(readlink -f $OTB_DIR)"
if [ -z "$BUILD_DIR" ]; then
BUILD_DIR=${OTB_DIR}/build
fi
echo Generating gcov reports in $BUILD_DIR ...
cd $BUILD_DIR
find $BUILD_DIR -name "*.gcda" -exec llvm-cov gcov -p '{}' > /dev/null \;
ls *.gcov | grep -E -v '#Modules#[a-zA-Z0-9]+#[a-zA-Z0-9]+#(include|src|app)#' | xargs -L 1 rm
echo Filtered $(ls $BUILD_DIR/*.gcov | wc -l) gcov reports
gcovr -r $OTB_DIR -x -g --object-directory=$BUILD_DIR > $BUILD_DIR/coverage_report.xml
echo Generated $BUILD_DIR/coverage_report.xml with $(grep -c '<class ' $BUILD_DIR/coverage_report.xml) classes
|
orfeotoolbox/OTB
|
CI/otb_coverage.sh
|
Shell
|
apache-2.0
| 1,369 |
# -----------------------------------------------------------------------------
#
# Package : error-symbol
# Version : 0.1.0
# Source repo : https://github.com/jonschlinkert/error-symbol
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=error-symbol
PACKAGE_VERSION=0.1.0
PACKAGE_URL=https://github.com/jonschlinkert/error-symbol
yum -y update && yum install -y yum-utils nodejs nodejs-devel nodejs-packaging npm python38 python38-devel ncurses git gcc gcc-c++ libffi libffi-devel ncurses git jq make cmake
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/appstream/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/8.3Server/ppc64le/baseos/
yum-config-manager --add-repo http://rhn.pbm.ihost.com/rhn/latest/7Server/ppc64le/optional/
yum install -y firefox liberation-fonts xdg-utils && npm install n -g && n latest && npm install -g npm@latest && export PATH="$PATH" && npm install --global yarn grunt-bump xo testem acorn
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
HOME_DIR=`pwd`
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME" > /home/tester/output/clone_fails
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Clone_Fails" > /home/tester/output/version_tracker
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
PACKAGE_VERSION=$(jq -r ".version" package.json)
# run the test command from test.sh
if ! npm install && npm audit fix && npm audit fix --force; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! npm test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_&_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | GitHub | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
e/error-symbol/error-symbol_rhel_8.3.sh
|
Shell
|
apache-2.0
| 3,078 |
#!/bin/bash
set -e
set -x
SCRIPTPATH=$( cd "$(dirname "$0")" ; pwd -P )
DIRBUILD=~/tmp/bitcoin
# wget -qO- https://github.com/bitcoin/bitcoin/archive/v0.10.0.tar.gz | tar xvz -C ~/tmp
rm -rf ${DIRBUILD}
cp -r ~/tmp/bitcoin-0.10.0 ${DIRBUILD}
cd ${DIRBUILD}
./autogen.sh
CONFIGFLAGS="-disable-tests --without-gui --enable-upnp-default --disable-ccache --disable-maintainer-mode --disable-dependency-tracking"
./configure --prefix=${SCRIPTPATH} --with-incompatible-bdb ${CONFIGFLAGS}
make -j4
make install-strip
#
# William-Yeh/extract-elf-so
# https://github.com/William-Yeh/extract-elf-so
#
|
y12studio/y12docker
|
elf-btcd/build.sh
|
Shell
|
apache-2.0
| 594 |
#!/bin/bash
. ./env.sh
LB_OPTS="--resource-dir ${LB_RESOURCE_DIR}"
if [ "$OS" = "Windows_NT" ]; then
LB_RESOURCE_DIR=`{ cd $LB_RESOURCE_DIR && pwd -W; }`
LB_OPTS="--resource-dir ${LB_RESOURCE_DIR}"
fi
java -jar linkbinder-subscriber/build/libs/linkbinder-subscriber*.jar $LB_OPTS
|
otsecbsol/linkbinder
|
run_subscriber.sh
|
Shell
|
apache-2.0
| 292 |
npm install -g wcm-design.tar.gz
npm install -g --unsafe-perm --no-optional dashboard.tar.gz
npm install -g [email protected]
|
OpenNTF/WebDevToolkitForDx
|
install.sh
|
Shell
|
apache-2.0
| 116 |
#!/bin/bash
cname="$1" # Container name.
if [ x"$cname" == "x" ]; then
echo "Invalid container usage" >&2
exit 4
fi
SDIR=$(readlink -f `dirname "${BASH_SOURCE[0]}"`)
if [ "`docker inspect --format '{{ .State.Running }}' "$cname"`" != "true" ]; then
echo "[FAIL] Container $cname is not running"
exit 3
else
echo "[ ok ] Running"
fi
|
millerlogic/cluster-deploy
|
roles/docker-container/status-container.sh
|
Shell
|
apache-2.0
| 341 |
#!/bin/bash
#
# <#> a <http://purl.org/twc/vocab/conversion/ConversionTrigger> ;
# rdfs:seeAlso <https://github.com/timrdf/csv2rdf4lod-automation/wiki/Conversion-trigger>,
# <https://github.com/timrdf/csv2rdf4lod-automation/blob/master/bin/cr-create-convert-sh.sh> .
#
# datasetID versionID (lastModDate):
# domain-template-leaves-spaces 2011-Jun-09 ()
#--------------------------------------------------------------
CSV2RDF4LOD_HOME=${CSV2RDF4LOD_HOME:?"not set; source csv2rdf4lod/source-me.sh or see https://github.com/timrdf/csv2rdf4lod-automation/wiki/CSV2RDF4LOD-not-set"}
surrogate="http://logd.tw.rpi.edu" # Came from $CSV2RDF4LOD_BASE_URI when cr-create-convert-sh.sh created this script.
sourceID="lebot"
datasetID="domain-template-leaves-spaces"
datasetVersion="2011-Jun-09" # NO SPACES; Use curl -I -L http://www.data.gov/download/domain-template-leaves-spaces/csv | grep Last-Modified: | awk '{printf(%s-%s-%s,,,)}'
versionID="2011-Jun-09" # renaming datasetVersion (deprecating datasetVersion)
eID="1" # enhancement identifier
if [ $# -ge 2 ]; then
if [ $1 == "-e" ]; then
eID="$2"
fi
fi
# manual/test.csv
sourceDir="manual" # if directly from source, 'source'; if manual manipulations of source were required, 'manual'.
destDir="automatic" # always 'automatic'
#--------------------------------------------------------------
#-----------------------------------
datafile="test.csv"
data="$sourceDir/$datafile"
subjectDiscriminator="" # Additional part of URI for subjects created; must be URI-ready (e.g., no spaces).
commentCharacter="" # ONLY one character; complain to http://sourceforge.net/projects/javacsv/ otherwise.
cellDelimiter="," # ONLY one character; complain to http://sourceforge.net/projects/javacsv/ otherwise.
header= # Line that header is on; only needed if not '1'. '0' means no header.
dataStart= # Line that data starts; only needed if not immediately after header.
repeatAboveIfEmptyCol= # 'Fill in' value from row above for this column.
onlyIfCol= # Do not process if value in this column is empty
interpretAsNull= # NO SPACES
dataEnd= # Line on which data stops; only needed if non-data bottom matter (legends, footnotes, etc).
source $CSV2RDF4LOD_HOME/bin/convert.sh
#-----------------------------------
source $CSV2RDF4LOD_HOME/bin/convert-aggregate.sh
|
timrdf/csv2rdf4lod-automation
|
test/source/lebot/domain-template-leaves-spaces/version/2011-Jun-09/convert-domain-template-leaves-spaces.sh
|
Shell
|
apache-2.0
| 2,599 |
#!/usr/bin/env bash
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o xtrace
set -o pipefail
# Update the gh-pages branch. Note that `cargo doc` is **not deterministic** so
# this should only be done when there is a real change.
readonly RUST_BRANCH=${1:-main}
readonly RUST_GH_BRANCH=gh-pages
if [ -z "${FORCE+x}" ]; then
readonly PREV_COMMIT=$(git log --oneline -n 1 ${RUST_GH_BRANCH} | sed 's/.*branch at \([0-9a-f]*\)/\1/')
readonly CHANGES=$(git diff "${PREV_COMMIT}..${RUST_BRANCH}" | grep -e '[+-]//[/!]')
if [ -z "${CHANGES}" ]; then
echo "No doc comment changes found in ${PREV_COMMIT}..${RUST_BRANCH} subdir rust/"
exit 0
fi
fi
git switch "${RUST_BRANCH}"
readonly RUST_BRANCH_SHA1=$(git rev-parse --short HEAD)
readonly RUST_BRANCH_SUBJECT=$(git log -n 1 --format=format:%s)
readonly COMMIT_MESSAGE=$(cat <<-END
Update Rust docs to ${RUST_BRANCH} branch at ${RUST_BRANCH_SHA1}
Auto-generated from commit ${RUST_BRANCH_SHA1} ("${RUST_BRANCH_SUBJECT}").
END
)
readonly TGZ_FILE="/tmp/coset-doc-${RUST_BRANCH_SHA1}.tgz"
# Build Cargo docs and save them off outside the repo
(
rm -rf target/doc
cargo doc --no-deps
cargo deadlinks
cd target/doc || exit
tar czf "${TGZ_FILE}" ./*
)
# Shift to ${RUST_GH_BRANCH} branch and replace contents of (just) ./rust/
git switch ${RUST_GH_BRANCH}
readonly DOC_DIR=rust
rm -rf ${DOC_DIR}
mkdir ${DOC_DIR}
(
cd "${DOC_DIR}" || exit
tar xzf "${TGZ_FILE}"
)
# Commit any differences
git add "${DOC_DIR}"
git commit --message="${COMMIT_MESSAGE}"
git switch "${RUST_BRANCH}"
|
google/coset
|
scripts/build-gh-pages.sh
|
Shell
|
apache-2.0
| 2,130 |
mkdir ~/src
cd ~/src/
#mv victor victor_old
mkdir ~/src/victor
cd ~/src/victor
git clone https://github.com/bayvictor/distributed-polling-system.git
# ./down.sh
cd distributed-polling-system/
diff bin ~/bin -dup >../diff.txt
cat ../diff.txt |grep -e ^Only >../test.txt
cat ../test.txt |grep home >../home.txt
cat ../home.txt|cut -d":" -f2|sed 's|^ |cp ~/bin/|g;s/$/ bin\/ /g' >../cpall.sh
chmod +x ../*.sh
chmod +x bin -R
chmod +x ~/bin -R
../cpall.sh
cat ../home.txt|cut -d":" -f2|sed 's|^ |git add bin/|g;' >../addall.sh
echo "git commit -m \"1st adding\"" >>../addall.sh
echo "git pull;git push" >>../addall.sh
bin/git_config.sh
../addall.sh
|
bayvictor/distributed-polling-system
|
bin/curdir_git_sync_bin_dir.sh
|
Shell
|
apache-2.0
| 669 |
python -c "import socket; s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM); s.connect(('8.8.8.8', 80)); print(s.getsockname()[0]); s.close()"
|
bayvictor/distributed-polling-system
|
bin/find_localhost_ip.sh
|
Shell
|
apache-2.0
| 147 |
#!/bin/sh
javac -Xlint:unchecked Jcp.java
|
drednout/java_learning
|
jcp/build.sh
|
Shell
|
bsd-2-clause
| 43 |
# Convert Google's zipped csv files to year-by-year
# Usage:
# ./ngram.sh IN START END OUT
# IN - folder where the zip files are kept. This script reads all of them
# START - First year to process
# END - Last year to process
# OUT - Output folder.
# e.g.
# ./ngram.sh ~/Downloads 1789 2009 year
IN=$1
START=$2
END=$3
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
OUT=$DIR/$4
for z in $(find $IN -name "*.zip")
do
unzip -p $z > $z.temp
for i in $(seq $START $END)
do
if [ "$i" = "$START" ]
then
>"$OUT/$i.gz"
fi
echo "$z for $i"
grep -P "\t$i\t" $z.temp | gzip >> "$OUT/$i.gz"
done
rm $z.temp
done
|
organisciak/field-exam
|
ngrams/ngram.sh
|
Shell
|
bsd-2-clause
| 635 |
#!/bin/sh
set -e
SLITE_LIST=`cat sippy_lite.list`
git checkout sippy_git_master
git pull
git branch -D sippy_git_master_toplevel
git subtree split --prefix=sippy --branch=sippy_git_master_toplevel
git checkout sippy_git_master_toplevel
mkdir -p sippy_lite/Math
mkdir -p sippy_lite/Time
for f in ${SLITE_LIST}
do
git mv ${f} sippy_lite/${f}
done
git rm .gitignore dictionary *.py
git commit -m "Move files in place."
git checkout master
git pull
git merge sippy_git_master_toplevel
git add sippy_lite/.gitignore rtp_cluster_client.py LICENSE
git commit
git push
|
sippy/rtp_cluster
|
sippy_lite/update_sippy_lite.sh
|
Shell
|
bsd-2-clause
| 565 |
#!/bin/sh
set -o errexit
set -o nounset
abort() {
echo "$@"
exit 1
}
ensure_root_dir() {
if [ -z "${ROOT:-}" ]; then
ROOT="$(pwd)"
export ROOT
if [ ! -e "${ROOT}/test/script/setup.sh" ]; then
abort "${ROOT} is not a valid test root."
fi
fi
}
find_tmp_dir() {
ensure_root_dir
TMP_DIR="${ROOT}/test/tmp"
export TMP_DIR
}
kill_ssh_agent() {
if [ -e "${TMP_DIR}/env" ]; then
eval "$(cat "${TMP_DIR}/env")"
if [ -n "${SSH_AGENT_PID:-}" ]; then
kill -9 "${SSH_AGENT_PID}" || true
fi
if [ -n "${SSH_AUTH_SOCK:-}" ]; then
rm -f "${SSH_AUTH_SOCK}"
fi
fi
}
kill_sshd() {
if [ -e "${TMP_DIR}/env" ]; then
eval "$(cat "${TMP_DIR}/env")"
if [ -n "${SSHD_PID:-}" ]; then
kill -9 "${SSHD_PID}" || true
fi
fi
}
ensure_root_dir
find_tmp_dir
if [ -d "${TMP_DIR}" ]; then
kill_ssh_agent
kill_sshd
rm -r "${TMP_DIR}"
fi
|
conormcd/clj-libssh2
|
test/script/teardown.sh
|
Shell
|
bsd-2-clause
| 907 |
#!/bin/sh
# Copyright 2009-2012 Urban Airship Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binaryform must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided withthe distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE URBAN AIRSHIP INC ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL URBAN AIRSHIP INC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
echo "Checking for file $SRCROOT/AirshipConfig.plist..."
if [ -f "$SRCROOT/AirshipConfig.plist" ];
then
echo "Found AirshipConfig.plist, and copying to $TARGET_BUILD_DIR/$EXECUTABLE_FOLDER_PATH"
cp "$SRCROOT/AirshipConfig.plist" "$TARGET_BUILD_DIR/$EXECUTABLE_FOLDER_PATH/"
else
echo "Did not find AirshipConfig.plist"
fi
|
jamesstout/ios-library_LITE
|
PushSample/checkConfig.sh
|
Shell
|
bsd-2-clause
| 1,664 |
#!/bin/bash
# Compile a language knowledge base from a grammar
# [20080707] (air)
# :: :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: ::::::
# :: FIRST: Copy this script into your Project root folder ::::::
here="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# :: Combine the grammar files into a main source file
rm $here/src/Kade.gra
cat $here/src/*.gra >> $here/src/Kade.gra
# :: SECOND: Change the following line to point to your Logios installation ::::::
# :: or have the environment variable set appropriately ::::::
cd "$( dirname "${BASH_SOURCE[0]}" )"
cd ../../../Library_Logios
LOGIOS_ROOT=$(pwd)
# :: CHANGE THESE LABELS AS PER YOUR PROJECT; OUTPUT FILES WILL BE NAMED ACCORDINGLY ::::::
PROJECT="Kade"
INSTANCE="Kade"
# :: :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: ::::::
# :: Compile language knowledge bases from the grammar
# :: --inpath and --outpath give you control over actual locations
INPATH=$here/src
OUTPATH=$here/Grammar
mkdir -p $OUTPATH
perl $LOGIOS_ROOT/Tools/MakeLanguage/make_language.pl \
--logios $LOGIOS_ROOT \
--inpath $INPATH --outpath $OUTPATH \
--project $PROJECT --instance $INSTANCE \
--force
#
rm $here/src/Kade.gra
|
minigeek/MMDAgent
|
Release/AppData/Logios/CompileLanguage.sh
|
Shell
|
bsd-3-clause
| 1,363 |
#!/bin/sh
sudo apt-get update
sudo apt-get upgrade
# These aren't necessary per se but I like having
# them around (20140125/straup)
# sudo apt-get install sysstat htop unzip
# sudo apt-get install tcsh emacs24-nox
# Chances are good you will have already installed
# git if you're reading this file (20140125/straup)
# sudo apt-get install git
sudo apt-get install make tesseract-ocr python-setuptools
sudo easy_install flask
sudo easy_install flask-cors
# Again, not strictly necessary but you'll probably
# install this sooner or later... (20140125/straup)
sudo apt-get install gunicorn
|
cooperhewitt/label-whisperer
|
ubuntu/setup.sh
|
Shell
|
bsd-3-clause
| 598 |
#!/bin/bash
browserify -t jadeify client/amp.js -o client/amp.bundle.js
node server.js
|
benbenolson/NetFlow-Viewer
|
server/run.sh
|
Shell
|
bsd-3-clause
| 88 |
#!/bin/bash
MD=$(which md5sum)
if [ "$MD" == "" ]; then
MD=$(which md5)
fi
if [ "$MD" == "" ]; then
echo "Missing MD5!"
exit 1
fi
VERBOSE=""
if [[ "$1" == "-v" || "$1" == "-vv" || "$1" == "-vvv" ]]; then
VERBOSE="$1"
shift
fi
if [ "$1" == "" ]; then
rm -f test/err
find src/test-scripts -name '*.mvp' -exec $0 $VERBOSE \{\} \;
find src/test-scripts -name '*.mvpt' -exec $0 $VERBOSE \{\} \;
if [ -e test/err ]; then
rm test/err
exit 1
fi
else
RET=0
mkdir -p test/run
echo "global_foo = \"bar\"" > test/run/.cgpiperc
echo "bar = \"baz\"" > test/run/global.incl
touch test/file1 test/file2
if [ "$(echo $1 | grep ".mvpt$")" != "" ]; then
CGPIPE_HOME=test/run dist/cgpipe $VERBOSE -nolog -f $1 > test/.testout 2> test/.testerr
else
CGPIPE_HOME=test/run $1 > test/.testout 2> test/.testerr
fi
if [ "$VERBOSE" = "-vvv" ]; then
cat test/.testerr
fi
TEST=$(cat test/.testout | grep -v '^#' | grep -v '^$' | sed -e 's/^[[:blank:]]*//g' | sed -e 's/CGPIPE ERROR.*/CGPIPE ERROR/g' | $MD)
GOOD=$(cat $1.good | grep -v '^#' | grep -v '^$' |sed -e 's/^[[:blank:]]*//g' | $MD)
if [ "$TEST" != "$GOOD" ]; then
echo "$1 ERROR"
touch test/err
RET=1
else
echo "$1 OK"
fi
if [ "$VERBOSE" != "" ]; then
cat test/.testout | grep -v '^#' | grep -v '^$' | sed -e 's/^[[:blank:]]*//g' | sed -e 's/CGPIPE ERROR.*/CGPIPE ERROR/g' > test/.testout1
cat $1.good | grep -v '^#' | grep -v '^$' | sed -e 's/^[[:blank:]]*//g' | sed -e 's/CGPIPE ERROR.*/CGPIPE ERROR/g' > test/.testout2
echo "[EXPECTED]"
cat test/.testout2
echo ""
echo "[GOT]"
cat test/.testout1
if [ "$TEST" != "$GOOD" ]; then
echo ""
echo "[DIFF]"
diff -y test/.testout1 test/.testout2
fi
rm test/.testout1 test/.testout2
fi
rm test/file1 test/file2
rm test/.testout
rm test/.testerr
rm test/run/*
exit $RET
fi
|
compgen-io/cgpipe
|
test.sh
|
Shell
|
bsd-3-clause
| 2,073 |
# This script is for installing dependencies in Linux for testing
sudo apt-get update &&
sudo apt-get install -y python-software-properties python g++ make clang build-essential &&
# sudo add-apt-repository ppa:ubuntugis/ubuntugis-unstable -y &&
# sudo apt-get update &&
# sudo apt-get install -y postgresql-9.1 postgresql-server-dev-9.1 postgresql-plpython-9.1 postgresql-contrib &&
# sudo apt-get install -y libxml2-dev libgeos-dev libproj-dev libgdal-dev libfreexl-dev sqlite3 postgis &&
sudo add-apt-repository -y ppa:chris-lea/node.js &&
sudo apt-get install -y nodejs npm &&
sudo npm install -g node-gyp
# cd ~
# wget http://www.gaia-gis.it/gaia-sins/freexl-1.0.0e.tar.gz && tar -xvf freexl-1.0.0e.tar.gz
# wget http://download.osgeo.org/geos/geos-3.3.7.tar.bz2 && tar -xvf geos-3.3.7.tar.bz2
# wget http://download.osgeo.org/postgis/source/postgis-2.0.2.tar.gz && tar -xvf postgis-2.0.2.tar.gz
# wget http://download.osgeo.org/proj/proj-4.8.0.tar.gz && tar -xvf proj-4.8.0.tar.gz
# wget http://ftp.gnu.org/pub/gnu/libiconv/libiconv-1.14.tar.gz && tar -xvf libiconv-1.14.tar.gz
# wget http://www.gaia-gis.it/gaia-sins/libspatialite-sources/libspatialite-4.0.0.tar.gz && tar -xvf libspatialite-4.0.0.tar.gz
|
zhm/node-spatialite
|
install_deps.sh
|
Shell
|
bsd-3-clause
| 1,215 |
make -C libs/ilclient clean
make -C libs/vgfont clean
make -C hello_world clean
make -C hello_triangle clean
make -C hello_triangle2 clean
make -C hello_video clean
make -C hello_audio clean
make -C hello_font clean
make -C hello_dispmanx clean
make -C hello_tiger clean
make -C hello_encode clean
make -C hello_jpeg clean
make -C hello_videocube clean
make -C hello_teapot clean
make -C hello_fft clean
make -C libs/ilclient
make -C libs/vgfont
make -C hello_world
make -C hello_triangle
make -C hello_triangle2
make -C hello_video
make -C hello_audio
make -C hello_font
make -C hello_dispmanx
make -C hello_tiger
make -C hello_encode
make -C hello_jpeg
make -C hello_videocube
make -C hello_teapot
make -C hello_fft clean
|
Razdroid/android_vendor_rpi_userland
|
host_applications/linux/apps/hello_pi/rebuild.sh
|
Shell
|
bsd-3-clause
| 726 |
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
EXECUTABLES="bdftopcf${NACL_EXEEXT}"
NACLPORTS_CPPFLAGS+=" -Dmain=nacl_main"
NACLPORTS_LDFLAGS+=" -Dmain=nacl_main"
export LIBS="-Wl,--undefined=nacl_main -lz \
${NACL_CLI_MAIN_LIB} -lppapi_simple -lnacl_io -lppapi -l${NACL_CXX_LIB}"
if [ "${NACL_LIBC}" = "newlib" ]; then
NACLPORTS_CPPFLAGS+=" -I${NACLPORTS_INCLUDE}/glibc-compat"
export LIBS+=" -lglibc-compat"
fi
PublishStep() {
PublishByArchForDevEnv
}
|
Schibum/naclports
|
ports/bdftopcf/build.sh
|
Shell
|
bsd-3-clause
| 590 |
#/bin/bash
# Creates the salt used to protect all applications from csrf attacks.
set -e -x
source ../kube/config.sh
source ../bash/ramdisk.sh
cd /tmp/ramdisk
head -c 32 /dev/urandom > salt.txt
kubectl create secret generic csrf-salt --from-file=salt.txt
cd -
|
google/skia-buildbot
|
kube/create-csrf-salt-secret.sh
|
Shell
|
bsd-3-clause
| 263 |
#!/bin/sh
# $FreeBSD$
# Copyright (c) 2009 Douglas Barton
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
. /etc/rc.subr
load_rc_config 'XXX'
usage () {
echo ''
echo 'Usage:'
echo "${0##*/} -e"
echo "${0##*/} -R"
echo "${0##*/} [-v] -l | -r"
echo "${0##*/} [-v] <rc.d script> start|stop|etc."
echo "${0##*/} -h"
echo ''
echo '-e Show services that are enabled'
echo "-R Stop and start enabled $local_startup services"
echo "-l List all scripts in /etc/rc.d and $local_startup"
echo '-r Show the results of boot time rcorder'
echo '-v Verbose'
echo ''
}
while getopts 'ehlrRv' COMMAND_LINE_ARGUMENT ; do
case "${COMMAND_LINE_ARGUMENT}" in
e) ENABLED=eopt ;;
h) usage ; exit 0 ;;
l) LIST=lopt ;;
r) RCORDER=ropt ;;
R) RESTART=Ropt ;;
v) VERBOSE=vopt ;;
*) usage ; exit 1 ;;
esac
done
shift $(( $OPTIND - 1 ))
if [ -n "$RESTART" ]; then
skip="-s nostart"
if [ `/sbin/sysctl -n security.jail.jailed` -eq 1 ]; then
skip="$skip -s nojail"
fi
[ -n "$local_startup" ] && find_local_scripts_new
files=`rcorder ${skip} ${local_rc} 2>/dev/null`
for file in `reverse_list ${files}`; do
if grep -q ^rcvar $file; then
eval `grep ^name= $file`
eval `grep ^rcvar $file`
checkyesno $rcvar 2>/dev/null && run_rc_script ${file} stop
fi
done
for file in $files; do
if grep -q ^rcvar $file; then
eval `grep ^name= $file`
eval `grep ^rcvar $file`
checkyesno $rcvar 2>/dev/null && run_rc_script ${file} start
fi
done
exit 0
fi
if [ -n "$ENABLED" -o -n "$RCORDER" ]; then
# Copied from /etc/rc
skip="-s nostart"
if [ `/sbin/sysctl -n security.jail.jailed` -eq 1 ]; then
skip="$skip -s nojail"
fi
[ -n "$local_startup" ] && find_local_scripts_new
files=`rcorder ${skip} /etc/rc.d/* ${local_rc} 2>/dev/null`
fi
if [ -n "$ENABLED" ]; then
for file in $files; do
if grep -q ^rcvar $file; then
eval `grep ^name= $file`
eval `grep ^rcvar $file`
checkyesno $rcvar 2>/dev/null && echo $file
fi
done
exit 0
fi
if [ -n "$LIST" ]; then
for dir in /etc/rc.d $local_startup; do
[ -n "$VERBOSE" ] && echo "From ${dir}:"
cd $dir && for file in *; do echo $file; done
done
exit 0
fi
if [ -n "$RCORDER" ]; then
for file in $files; do
echo $file
if [ -n "$VERBOSE" ]; then
case "$file" in
*/${early_late_divider})
echo '========= Early/Late Divider =========' ;;
esac
fi
done
exit 0
fi
if [ $# -gt 1 ]; then
script=$1
shift
else
usage
exit 1
fi
cd /
for dir in /etc/rc.d $local_startup; do
if [ -x "$dir/$script" ]; then
[ -n "$VERBOSE" ] && echo "$script is located in $dir"
exec env -i HOME=/ PATH=/sbin:/bin:/usr/sbin:/usr/bin $dir/$script $*
fi
done
# If the script was not found
echo "$script does not exist in /etc/rc.d or the local startup"
echo "directories (${local_startup})"
exit 1
|
jhbsz/OSI-OS
|
usr.sbin/service/service.sh
|
Shell
|
bsd-3-clause
| 4,052 |
#!/bin/bash
if [ "$#" != "4" ]; then
echo "ID algo1 algo2 algo3"
exit
fi
id=$1
algo1=$2
algo2=$3
algo3=$4
list=../data/sequin.list
results=../results/sequin
abd="0.01 1.0 2.5 5.0 7.5 10"
#abd="0.01 1 2.5 5 7.5 10 25 50 75 100"
#echo "#accession algorithm aligner sensitivity(%) precision(%)"
for aa in `echo "tophat star hisat"`
do
cc=""
#for bb in `echo "1.0 1.5 2.0 2.5 3.0 3.5 4.0 4.5 5.0 5.5 6.0 6.5 7.0 7.5 8.0 8.5 9.0 9.5"`
for bb in `echo $abd`
do
x0=`cat $results/$id.$aa/$algo1.$bb/gffmul.stats | grep Query | grep mRNAs | head -n 1 | awk '{print $5}'`
x1=`cat $results/$id.$aa/$algo1.$bb/gffmul.stats | grep Matching | grep intron | grep chain | head -n 1 | awk '{print $4}'`
x2=`cat $results/$id.$aa/$algo1.$bb/gffmul.stats | grep Intron | grep chain | head -n 1 | awk '{print $4}'`
x3=`cat $results/$id.$aa/$algo1.$bb/gffmul.stats | grep Intron | grep chain | head -n 1 | awk '{print $6}'`
if [ "$x0" == "" ]; then x0="0"; fi
if [ "$x1" == "" ]; then x1="0"; fi
if [ "$x2" == "" ]; then x2="0"; fi
if [ "$x3" == "" ]; then x3="0"; fi
cc="$cc$x2 $x3 "
done
echo "$cc"
done
for aa in `echo "tophat star hisat"`
do
cc=""
for bb in `echo $abd`
do
x0=`cat $results/$id.$aa/$algo2.$bb/gffmul.stats | grep Query | grep mRNAs | head -n 1 | awk '{print $5}'`
x1=`cat $results/$id.$aa/$algo2.$bb/gffmul.stats | grep Matching | grep intron | grep chain | head -n 1 | awk '{print $4}'`
x2=`cat $results/$id.$aa/$algo2.$bb/gffmul.stats | grep Intron | grep chain | head -n 1 | awk '{print $4}'`
x3=`cat $results/$id.$aa/$algo2.$bb/gffmul.stats | grep Intron | grep chain | head -n 1 | awk '{print $6}'`
if [ "$x0" == "" ]; then x0="0"; fi
if [ "$x1" == "" ]; then x1="0"; fi
if [ "$x2" == "" ]; then x2="0"; fi
if [ "$x3" == "" ]; then x3="0"; fi
cc="$cc$x2 $x3 "
done
echo "$cc"
done
for aa in `echo "tophat star"`
do
cc=""
for bb in `echo $abd`
do
x0=`cat $results/$id.$aa/$algo3.$bb/gffmul.stats | grep Query | grep mRNAs | head -n 1 | awk '{print $5}'`
x1=`cat $results/$id.$aa/$algo3.$bb/gffmul.stats | grep Matching | grep intron | grep chain | head -n 1 | awk '{print $4}'`
x2=`cat $results/$id.$aa/$algo3.$bb/gffmul.stats | grep Intron | grep chain | head -n 1 | awk '{print $4}'`
x3=`cat $results/$id.$aa/$algo3.$bb/gffmul.stats | grep Intron | grep chain | head -n 1 | awk '{print $6}'`
if [ "$x0" == "" ]; then x0="0"; fi
if [ "$x1" == "" ]; then x1="0"; fi
if [ "$x2" == "" ]; then x2="0"; fi
if [ "$x3" == "" ]; then x3="0"; fi
cc="$cc$x2 $x3 "
done
echo "$cc"
done
|
Kingsford-Group/scalloptest
|
plots/collect.sequin.roc.sh
|
Shell
|
bsd-3-clause
| 2,563 |
#!/bin/bash
fw_depends mysql java sbt
cd play2-java-jooq-hikaricp
rm -rf target/ project/target/ project/project/
sbt stage
target/universal/stage/bin/play2-java-jooq-hikaricp -Dplay.server.provider=play.core.server.AkkaHttpServerProvider &
|
valum-framework/FrameworkBenchmarks
|
frameworks/Java/play2-java/setup_java_jooq_hikaricp.sh
|
Shell
|
bsd-3-clause
| 245 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/GTMOAuth2/GTMOAuth2.framework"
install_framework "$BUILT_PRODUCTS_DIR/GTMSessionFetcher/GTMSessionFetcher.framework"
install_framework "$BUILT_PRODUCTS_DIR/GoogleToolboxForMac/GoogleToolboxForMac.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/GTMOAuth2/GTMOAuth2.framework"
install_framework "$BUILT_PRODUCTS_DIR/GTMSessionFetcher/GTMSessionFetcher.framework"
install_framework "$BUILT_PRODUCTS_DIR/GoogleToolboxForMac/GoogleToolboxForMac.framework"
fi
|
bryanro92/Recipe-Sharing
|
Pods/Target Support Files/Pods-Recipe-Sharing/Pods-Recipe-Sharing-frameworks.sh
|
Shell
|
mit
| 3,973 |
#!/bin/bash
FOLDER=$1
cd ${FOLDER}
BRANCHES=$(git branch -a | wc -l);
TAGS=$(git tag -l | wc -l);
REVERTS=$(git log --oneline | grep 'revert' | wc -l);
echo "{\"branches\": \"$BRANCHES\", \"tags\": \"$TAGS\", \"reverts\": \"$REVERTS\"}";
|
benas/gossed
|
examples/git/git-stats.sh
|
Shell
|
mit
| 242 |
#!/bin/bash
set -e
/usr/local/bin/provision-soa-service sdk_service
/usr/local/bin/provision-soa-service sdk_certify
exec "$@"
|
gudTECH/retailops-sdk
|
verify/docker-entrypoint.sh
|
Shell
|
mit
| 129 |
#!/bin/bash -e
export REDIS_DOWNLOAD_URL='http://download.redis.io/releases/redis-stable.tar.gz'
export REDIS_SERVICE_NAME='redis'
export REDIS_USER_NAME='redis'
export REDIS_GROUP_NAME='redis'
export REDIS_PORT='6379'
export REDIS_INSTALL_BIN_FOLDER='/opt/redis/bin'
export REDIS_INSTALL_CONFIG_FOLDER='/opt/redis/config'
export REDIS_INSTALL_DATA_FOLDER='/opt/redis/data'
export REDIS_VM_OVER_COMMIT_MEMORY='1'
|
gdbtek/linux-cookbooks
|
cookbooks/redis/attributes/default.bash
|
Shell
|
mit
| 418 |
#!/bin/bash
# OCO data
Rscript oco-download/01.oco.download.R
Rscript oco-download/02.oco.process.R
# MODIS data
Rscript modis-download/01-modis.get.site.info.R
Rscript modis-download/02-modis.download.R
Rscript modis-download/03-modis.process.R
# Flux tower data
Rscript flux-download/01.download.flux.R
Rscript flux-download/02.process.flux.R
Rscript flux-download/03.plot.flux.R
|
EcoForecast/GPP
|
w1.prepare.data.sh
|
Shell
|
mit
| 386 |
#!/bin/bash
rm -r tmp
rm -r installs
rm lib/virtual-network-interface/vnicppcodegen
|
madMAx43v3r/virtual-network-tools
|
clean_all.sh
|
Shell
|
mit
| 86 |
#!/bin/sh
set -e
sudo -k
echo "This script requires superuser authority to configure stns yum repository:"
sudo sh <<'SCRIPT'
set -x
# import GPG key
gpgkey_path=`mktemp`
curl -fsS -o $gpgkey_path https://repo.stns.jp/gpg/GPG-KEY-stns
rpm --import $gpgkey_path
rm $gpgkey_path
# add config for stns yum repos
cat >/etc/yum.repos.d/stns.repo <<'EOF';
[stns]
name=stns
baseurl=https://repo.stns.jp/centos/$basearch/$releasever
gpgcheck=1
EOF
SCRIPT
echo 'done'
|
pyama86/STNS
|
package/scripts/yum-repo.sh
|
Shell
|
mit
| 482 |
# == Name
#
# mysql.user
#
# === Description
#
# Manages MySQL users
#
# === Parameters
#
# * state: The state of the resource. Required. Default: present.
# * user: The username of the mysql user. unique.
# * host: The host of the mysql user. Required. unique.
# * password: The password of the mysql user.
#
# Unintuitively, user and password are optional because MySQL allows blank usernames and blank passwords.
#
# === Example
#
# ```bash
# mysql.user --user root --password password
# ```
#
mysql.user() {
# Declare the resource
waffles_resource="mysql.user"
# Check if all dependencies are installed
local _wrd=("mysql")
if ! waffles.resource.check_dependencies "${_wrd[@]}" ; then
return 2
fi
# Resource Options
local -A options
waffles.options.create_option state "present"
waffles.options.create_option user ""
waffles.options.create_option host "__required__"
waffles.options.create_option password ""
waffles.options.parse_options "$@"
if [[ $? != 0 ]]; then
return $?
fi
# Local Variables
local _name="${options[user]}@${options[host]}"
local password
# Internal Resource configuration
if [[ -z ${options[password]} ]]; then
password=""
else
password=$(mysql -NBe "select password('${options[password]}')")
fi
# Process the resource
waffles.resource.process $waffles_resource "$_name"
}
mysql.user.read() {
# TODO
#local _user_query="SELECT MAX_USER_CONNECTIONS, MAX_CONNECTIONS, MAX_QUESTIONS, MAX_UPDATES, PASSWORD /*!50508 , PLUGIN */ FROM mysql.user WHERE CONCAT(user, '@', host) = '${options[user]}@${options[host]}'"
local _user_query="SELECT count(*) FROM mysql.user WHERE CONCAT(user, '@', host) = '${options[user]}@${options[host]}'"
local _user_count=$(mysql -NBe "${_user_query}")
if [[ $_user_count == 0 ]]; then
waffles_resource_current_state="absent"
return
fi
local _password_query="SELECT PASSWORD FROM mysql.user WHERE CONCAT(user, '@', host) = '${options[user]}@${options[host]}'"
local _password=$(mysql -NBe "${_password_query}")
if [[ $_password != $password ]]; then
waffles_resource_current_state="update"
return
fi
waffles_resource_current_state="present"
}
mysql.user.create() {
exec.capture_error "mysql -NBe \"CREATE USER '${options[user]}'@'${options[host]}' IDENTIFIED BY PASSWORD '${password}'\""
}
mysql.user.update() {
exec.capture_error "mysql -NBe \"SET PASSWORD FOR '${options[user]}'@'${options[host]}' = '${password}'\""
}
mysql.user.delete() {
exec.capture_error "mysql -NBe \"DROP USER '${options[user]}'@'${options[host]}'\""
}
|
wffls/waffles
|
resources/mysql_user.sh
|
Shell
|
mit
| 2,614 |
#!/bin/bash
# $1 = strip executable
# $2 = JSON input file
$1 $2 > out1.json
$1 out1.json > out2.json
cmp -s out1.json out2.json
|
nncarlson/petaca
|
test/yajl_fort/strip-cmp.sh
|
Shell
|
mit
| 130 |
#! /bin/sh
rm -rf publish
node ./bin/nodeppt release ppts/demo.md -a
cd publish
mv demo.html index.html
git init
git add -A
date_str=`date "+DATE: %m/%d/%Y%nTIME: %H:%M:%S"`
git commit -m "build with nodeppt on $date_str"
#exit
echo 'push remote github'
git push -u [email protected]:ksky521/nodeppt.git master:gh-pages --force
|
zkyeu/vuePPT
|
gh-page.sh
|
Shell
|
mit
| 328 |
#!/bin/bash
echo "Now you can connect to http://localhost:9090"
cd dev-resources/private/browser
python -m SimpleHTTPServer 9090
|
Lambda-X/replumb
|
scripts/browser-repl.sh
|
Shell
|
epl-1.0
| 130 |
#!/bin/sh
perl <<EOF
print "Hi from perl!\n";
exit(0);
EOF
|
ggnull35/scbind
|
examples/perldemo.sh
|
Shell
|
gpl-2.0
| 60 |
#!/bin/sh
ifconfig eth0 0.0.0.0 down
ifconfig eth1 0.0.0.0 down
ifconfig eth2 0.0.0.0 down
ifconfig eth3 0.0.0.0 down
#ifconfig bond0 10.4.53.196 netmask 255.255.255.0 up
ifconfig bond0 192.168.0.5 netmask 255.255.255.0 up
ifenslave bond0 eth0 eth1 eth2 eth3
#eth2 eth3
ifconfig bond0 down
echo balance-alb > /sys/class/net/bond0/bonding/mode
ifconfig bond0 up
#ifconfig bond0 10.4.53.196 netmask 255.255.255.0 up
#echo +eth1 > /sys/class/net/bond0/bonding/slaves
#echo +eth2 > /sys/class/net/bond0/bonding/slaves
#echo +eth3 > /sys/class/net/bond0/bonding/slaves
#echo 2 > proc/irq/smp_affinity
|
yellowback/ubuntu-precise-armadaxp
|
tools/nas/bond_4link_setup.sh
|
Shell
|
gpl-2.0
| 603 |
#!/usr/bin/env bash
#
# This file is part of Zenodo.
# Copyright (C) 2015-2018 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
if [ -d "$VIRTUAL_ENV/var/instance/data" ]; then
rm -Rf $VIRTUAL_ENV/var/instance/data
fi
# Remove all data
zenodo db destroy --yes-i-know
zenodo db init
zenodo queues purge
zenodo index destroy --force --yes-i-know
# Initialize everything again
script_path=$(dirname "$0")
"$script_path/init.sh"
|
slint/zenodo
|
scripts/recreate.sh
|
Shell
|
gpl-2.0
| 1,272 |
#!/bin/sh
#
# Copyright (c) 2009 Red Hat, Inc.
#
test_description='Test updating submodules
This test verifies that "git submodule update" detaches the HEAD of the
submodule and "git submodule update --rebase/--merge" does not detach the HEAD.
'
. ./test-lib.sh
compare_head()
{
sha_master=$(git rev-list --max-count=1 master)
sha_head=$(git rev-list --max-count=1 HEAD)
test "$sha_master" = "$sha_head"
}
test_expect_success 'setup a submodule tree' '
echo file > file &&
git add file &&
test_tick &&
git commit -m upstream &&
git clone . super &&
git clone super submodule &&
git clone super rebasing &&
git clone super merging &&
git clone super none &&
(cd super &&
git submodule add ../submodule submodule &&
test_tick &&
git commit -m "submodule" &&
git submodule init submodule
) &&
(cd submodule &&
echo "line2" > file &&
git add file &&
git commit -m "Commit 2"
) &&
(cd super &&
(cd submodule &&
git pull --rebase origin
) &&
git add submodule &&
git commit -m "submodule update"
) &&
(cd super &&
git submodule add ../rebasing rebasing &&
test_tick &&
git commit -m "rebasing"
) &&
(cd super &&
git submodule add ../merging merging &&
test_tick &&
git commit -m "rebasing"
) &&
(cd super &&
git submodule add ../none none &&
test_tick &&
git commit -m "none"
) &&
git clone . recursivesuper &&
( cd recursivesuper &&
git submodule add ../super super
)
'
test_expect_success 'submodule update detaching the HEAD ' '
(cd super/submodule &&
git reset --hard HEAD~1
) &&
(cd super &&
(cd submodule &&
compare_head
) &&
git submodule update submodule &&
cd submodule &&
! compare_head
)
'
test_expect_success 'submodule update from subdirectory' '
(cd super/submodule &&
git reset --hard HEAD~1
) &&
mkdir super/sub &&
(cd super/sub &&
(cd ../submodule &&
compare_head
) &&
git submodule update ../submodule &&
cd ../submodule &&
! compare_head
)
'
supersha1=$(git -C super rev-parse HEAD)
mergingsha1=$(git -C super/merging rev-parse HEAD)
nonesha1=$(git -C super/none rev-parse HEAD)
rebasingsha1=$(git -C super/rebasing rev-parse HEAD)
submodulesha1=$(git -C super/submodule rev-parse HEAD)
pwd=$(pwd)
cat <<EOF >expect
Submodule path '../super': checked out '$supersha1'
Submodule path '../super/merging': checked out '$mergingsha1'
Submodule path '../super/none': checked out '$nonesha1'
Submodule path '../super/rebasing': checked out '$rebasingsha1'
Submodule path '../super/submodule': checked out '$submodulesha1'
EOF
cat <<EOF >expect2
Cloning into '$pwd/recursivesuper/super/merging'...
Cloning into '$pwd/recursivesuper/super/none'...
Cloning into '$pwd/recursivesuper/super/rebasing'...
Cloning into '$pwd/recursivesuper/super/submodule'...
Submodule 'merging' ($pwd/merging) registered for path '../super/merging'
Submodule 'none' ($pwd/none) registered for path '../super/none'
Submodule 'rebasing' ($pwd/rebasing) registered for path '../super/rebasing'
Submodule 'submodule' ($pwd/submodule) registered for path '../super/submodule'
done.
done.
done.
done.
EOF
test_expect_success 'submodule update --init --recursive from subdirectory' '
git -C recursivesuper/super reset --hard HEAD^ &&
(cd recursivesuper &&
mkdir tmp &&
cd tmp &&
git submodule update --init --recursive ../super >../../actual 2>../../actual2
) &&
test_i18ncmp expect actual &&
sort actual2 >actual2.sorted &&
test_i18ncmp expect2 actual2.sorted
'
cat <<EOF >expect2
Submodule 'foo/sub' ($pwd/withsubs/../rebasing) registered for path 'sub'
EOF
test_expect_success 'submodule update --init from and of subdirectory' '
git init withsubs &&
(cd withsubs &&
mkdir foo &&
git submodule add "$(pwd)/../rebasing" foo/sub &&
(cd foo &&
git submodule deinit -f sub &&
git submodule update --init sub 2>../../actual2
)
) &&
test_i18ncmp expect2 actual2
'
test_expect_success 'submodule update does not fetch already present commits' '
(cd submodule &&
echo line3 >> file &&
git add file &&
test_tick &&
git commit -m "upstream line3"
) &&
(cd super/submodule &&
head=$(git rev-parse --verify HEAD) &&
echo "Submodule path ${SQ}submodule$SQ: checked out $SQ$head$SQ" > ../../expected &&
git reset --hard HEAD~1
) &&
(cd super &&
git submodule update > ../actual 2> ../actual.err
) &&
test_i18ncmp expected actual &&
test_must_be_empty actual.err
'
test_expect_success 'submodule update should fail due to local changes' '
(cd super/submodule &&
git reset --hard HEAD~1 &&
echo "local change" > file
) &&
(cd super &&
(cd submodule &&
compare_head
) &&
test_must_fail git submodule update submodule
)
'
test_expect_success 'submodule update should throw away changes with --force ' '
(cd super &&
(cd submodule &&
compare_head
) &&
git submodule update --force submodule &&
cd submodule &&
! compare_head
)
'
test_expect_success 'submodule update --force forcibly checks out submodules' '
(cd super &&
(cd submodule &&
rm -f file
) &&
git submodule update --force submodule &&
(cd submodule &&
test "$(git status -s file)" = ""
)
)
'
test_expect_success 'submodule update --remote should fetch upstream changes' '
(cd submodule &&
echo line4 >> file &&
git add file &&
test_tick &&
git commit -m "upstream line4"
) &&
(cd super &&
git submodule update --remote --force submodule &&
cd submodule &&
test "$(git log -1 --oneline)" = "$(GIT_DIR=../../submodule/.git git log -1 --oneline)"
)
'
test_expect_success 'submodule update --remote should fetch upstream changes with .' '
(
cd super &&
git config -f .gitmodules submodule."submodule".branch "." &&
git add .gitmodules &&
git commit -m "submodules: update from the respective superproject branch"
) &&
(
cd submodule &&
echo line4a >> file &&
git add file &&
test_tick &&
git commit -m "upstream line4a" &&
git checkout -b test-branch &&
test_commit on-test-branch
) &&
(
cd super &&
git submodule update --remote --force submodule &&
git -C submodule log -1 --oneline >actual &&
git -C ../submodule log -1 --oneline master >expect &&
test_cmp expect actual &&
git checkout -b test-branch &&
git submodule update --remote --force submodule &&
git -C submodule log -1 --oneline >actual &&
git -C ../submodule log -1 --oneline test-branch >expect &&
test_cmp expect actual &&
git checkout master &&
git branch -d test-branch &&
git reset --hard HEAD^
)
'
test_expect_success 'local config should override .gitmodules branch' '
(cd submodule &&
git checkout test-branch &&
echo line5 >> file &&
git add file &&
test_tick &&
git commit -m "upstream line5" &&
git checkout master
) &&
(cd super &&
git config submodule.submodule.branch test-branch &&
git submodule update --remote --force submodule &&
cd submodule &&
test "$(git log -1 --oneline)" = "$(GIT_DIR=../../submodule/.git git log -1 --oneline test-branch)"
)
'
test_expect_success 'submodule update --rebase staying on master' '
(cd super/submodule &&
git checkout master
) &&
(cd super &&
(cd submodule &&
compare_head
) &&
git submodule update --rebase submodule &&
cd submodule &&
compare_head
)
'
test_expect_success 'submodule update --merge staying on master' '
(cd super/submodule &&
git reset --hard HEAD~1
) &&
(cd super &&
(cd submodule &&
compare_head
) &&
git submodule update --merge submodule &&
cd submodule &&
compare_head
)
'
test_expect_success 'submodule update - rebase in .git/config' '
(cd super &&
git config submodule.submodule.update rebase
) &&
(cd super/submodule &&
git reset --hard HEAD~1
) &&
(cd super &&
(cd submodule &&
compare_head
) &&
git submodule update submodule &&
cd submodule &&
compare_head
)
'
test_expect_success 'submodule update - checkout in .git/config but --rebase given' '
(cd super &&
git config submodule.submodule.update checkout
) &&
(cd super/submodule &&
git reset --hard HEAD~1
) &&
(cd super &&
(cd submodule &&
compare_head
) &&
git submodule update --rebase submodule &&
cd submodule &&
compare_head
)
'
test_expect_success 'submodule update - merge in .git/config' '
(cd super &&
git config submodule.submodule.update merge
) &&
(cd super/submodule &&
git reset --hard HEAD~1
) &&
(cd super &&
(cd submodule &&
compare_head
) &&
git submodule update submodule &&
cd submodule &&
compare_head
)
'
test_expect_success 'submodule update - checkout in .git/config but --merge given' '
(cd super &&
git config submodule.submodule.update checkout
) &&
(cd super/submodule &&
git reset --hard HEAD~1
) &&
(cd super &&
(cd submodule &&
compare_head
) &&
git submodule update --merge submodule &&
cd submodule &&
compare_head
)
'
test_expect_success 'submodule update - checkout in .git/config' '
(cd super &&
git config submodule.submodule.update checkout
) &&
(cd super/submodule &&
git reset --hard HEAD^
) &&
(cd super &&
(cd submodule &&
compare_head
) &&
git submodule update submodule &&
cd submodule &&
! compare_head
)
'
test_expect_success 'submodule update - command in .git/config' '
(cd super &&
git config submodule.submodule.update "!git checkout"
) &&
(cd super/submodule &&
git reset --hard HEAD^
) &&
(cd super &&
(cd submodule &&
compare_head
) &&
git submodule update submodule &&
cd submodule &&
! compare_head
)
'
test_expect_success 'submodule update - command in .gitmodules is rejected' '
test_when_finished "git -C super reset --hard HEAD^" &&
git -C super config -f .gitmodules submodule.submodule.update "!false" &&
git -C super commit -a -m "add command to .gitmodules file" &&
git -C super/submodule reset --hard $submodulesha1^ &&
test_must_fail git -C super submodule update submodule
'
test_expect_success 'fsck detects command in .gitmodules' '
git init command-in-gitmodules &&
(
cd command-in-gitmodules &&
git submodule add ../submodule submodule &&
test_commit adding-submodule &&
git config -f .gitmodules submodule.submodule.update "!false" &&
git add .gitmodules &&
test_commit configuring-update &&
test_must_fail git fsck
)
'
cat << EOF >expect
Execution of 'false $submodulesha1' failed in submodule path 'submodule'
EOF
test_expect_success 'submodule update - command in .git/config catches failure' '
(cd super &&
git config submodule.submodule.update "!false"
) &&
(cd super/submodule &&
git reset --hard $submodulesha1^
) &&
(cd super &&
test_must_fail git submodule update submodule 2>../actual
) &&
test_i18ncmp actual expect
'
cat << EOF >expect
Execution of 'false $submodulesha1' failed in submodule path '../submodule'
EOF
test_expect_success 'submodule update - command in .git/config catches failure -- subdirectory' '
(cd super &&
git config submodule.submodule.update "!false"
) &&
(cd super/submodule &&
git reset --hard $submodulesha1^
) &&
(cd super &&
mkdir tmp && cd tmp &&
test_must_fail git submodule update ../submodule 2>../../actual
) &&
test_i18ncmp actual expect
'
test_expect_success 'submodule update - command run for initial population of submodule' '
cat >expect <<-EOF &&
Execution of '\''false $submodulesha1'\'' failed in submodule path '\''submodule'\''
EOF
rm -rf super/submodule &&
test_must_fail git -C super submodule update 2>actual &&
test_i18ncmp expect actual &&
git -C super submodule update --checkout
'
cat << EOF >expect
Execution of 'false $submodulesha1' failed in submodule path '../super/submodule'
Failed to recurse into submodule path '../super'
EOF
test_expect_success 'recursive submodule update - command in .git/config catches failure -- subdirectory' '
(cd recursivesuper &&
git submodule update --remote super &&
git add super &&
git commit -m "update to latest to have more than one commit in submodules"
) &&
git -C recursivesuper/super config submodule.submodule.update "!false" &&
git -C recursivesuper/super/submodule reset --hard $submodulesha1^ &&
(cd recursivesuper &&
mkdir -p tmp && cd tmp &&
test_must_fail git submodule update --recursive ../super 2>../../actual
) &&
test_i18ncmp actual expect
'
test_expect_success 'submodule init does not copy command into .git/config' '
test_when_finished "git -C super update-index --force-remove submodule1" &&
test_when_finished git config -f super/.gitmodules \
--remove-section submodule.submodule1 &&
(cd super &&
git ls-files -s submodule >out &&
H=$(cut -d" " -f2 out) &&
mkdir submodule1 &&
git update-index --add --cacheinfo 160000 $H submodule1 &&
git config -f .gitmodules submodule.submodule1.path submodule1 &&
git config -f .gitmodules submodule.submodule1.url ../submodule &&
git config -f .gitmodules submodule.submodule1.update !false &&
test_must_fail git submodule init submodule1 &&
test_expect_code 1 git config submodule.submodule1.update >actual &&
test_must_be_empty actual
)
'
test_expect_success 'submodule init picks up rebase' '
(cd super &&
git config -f .gitmodules submodule.rebasing.update rebase &&
git submodule init rebasing &&
test "rebase" = "$(git config submodule.rebasing.update)"
)
'
test_expect_success 'submodule init picks up merge' '
(cd super &&
git config -f .gitmodules submodule.merging.update merge &&
git submodule init merging &&
test "merge" = "$(git config submodule.merging.update)"
)
'
test_expect_success 'submodule update --merge - ignores --merge for new submodules' '
test_config -C super submodule.submodule.update checkout &&
(cd super &&
rm -rf submodule &&
git submodule update submodule &&
git status -s submodule >expect &&
rm -rf submodule &&
git submodule update --merge submodule &&
git status -s submodule >actual &&
test_cmp expect actual
)
'
test_expect_success 'submodule update --rebase - ignores --rebase for new submodules' '
test_config -C super submodule.submodule.update checkout &&
(cd super &&
rm -rf submodule &&
git submodule update submodule &&
git status -s submodule >expect &&
rm -rf submodule &&
git submodule update --rebase submodule &&
git status -s submodule >actual &&
test_cmp expect actual
)
'
test_expect_success 'submodule update ignores update=merge config for new submodules' '
(cd super &&
rm -rf submodule &&
git submodule update submodule &&
git status -s submodule >expect &&
rm -rf submodule &&
git config submodule.submodule.update merge &&
git submodule update submodule &&
git status -s submodule >actual &&
git config --unset submodule.submodule.update &&
test_cmp expect actual
)
'
test_expect_success 'submodule update ignores update=rebase config for new submodules' '
(cd super &&
rm -rf submodule &&
git submodule update submodule &&
git status -s submodule >expect &&
rm -rf submodule &&
git config submodule.submodule.update rebase &&
git submodule update submodule &&
git status -s submodule >actual &&
git config --unset submodule.submodule.update &&
test_cmp expect actual
)
'
test_expect_success 'submodule init picks up update=none' '
(cd super &&
git config -f .gitmodules submodule.none.update none &&
git submodule init none &&
test "none" = "$(git config submodule.none.update)"
)
'
test_expect_success 'submodule update - update=none in .git/config' '
(cd super &&
git config submodule.submodule.update none &&
(cd submodule &&
git checkout master &&
compare_head
) &&
git diff --name-only >out &&
grep ^submodule$ out &&
git submodule update &&
git diff --name-only >out &&
grep ^submodule$ out &&
(cd submodule &&
compare_head
) &&
git config --unset submodule.submodule.update &&
git submodule update submodule
)
'
test_expect_success 'submodule update - update=none in .git/config but --checkout given' '
(cd super &&
git config submodule.submodule.update none &&
(cd submodule &&
git checkout master &&
compare_head
) &&
git diff --name-only >out &&
grep ^submodule$ out &&
git submodule update --checkout &&
git diff --name-only >out &&
! grep ^submodule$ out &&
(cd submodule &&
! compare_head
) &&
git config --unset submodule.submodule.update
)
'
test_expect_success 'submodule update --init skips submodule with update=none' '
(cd super &&
git add .gitmodules &&
git commit -m ".gitmodules"
) &&
git clone super cloned &&
(cd cloned &&
git submodule update --init &&
test_path_exists submodule/.git &&
test_path_is_missing none/.git
)
'
test_expect_success 'submodule update continues after checkout error' '
(cd super &&
git reset --hard HEAD &&
git submodule add ../submodule submodule2 &&
git submodule init &&
git commit -am "new_submodule" &&
(cd submodule2 &&
git rev-parse --verify HEAD >../expect
) &&
(cd submodule &&
test_commit "update_submodule" file
) &&
(cd submodule2 &&
test_commit "update_submodule2" file
) &&
git add submodule &&
git add submodule2 &&
git commit -m "two_new_submodule_commits" &&
(cd submodule &&
echo "" > file
) &&
git checkout HEAD^ &&
test_must_fail git submodule update &&
(cd submodule2 &&
git rev-parse --verify HEAD >../actual
) &&
test_cmp expect actual
)
'
test_expect_success 'submodule update continues after recursive checkout error' '
(cd super &&
git reset --hard HEAD &&
git checkout master &&
git submodule update &&
(cd submodule &&
git submodule add ../submodule subsubmodule &&
git submodule init &&
git commit -m "new_subsubmodule"
) &&
git add submodule &&
git commit -m "update_submodule" &&
(cd submodule &&
(cd subsubmodule &&
test_commit "update_subsubmodule" file
) &&
git add subsubmodule &&
test_commit "update_submodule_again" file &&
(cd subsubmodule &&
test_commit "update_subsubmodule_again" file
) &&
test_commit "update_submodule_again_again" file
) &&
(cd submodule2 &&
git rev-parse --verify HEAD >../expect &&
test_commit "update_submodule2_again" file
) &&
git add submodule &&
git add submodule2 &&
git commit -m "new_commits" &&
git checkout HEAD^ &&
(cd submodule &&
git checkout HEAD^ &&
(cd subsubmodule &&
echo "" > file
)
) &&
test_must_fail git submodule update --recursive &&
(cd submodule2 &&
git rev-parse --verify HEAD >../actual
) &&
test_cmp expect actual
)
'
test_expect_success 'submodule update exit immediately in case of merge conflict' '
(cd super &&
git checkout master &&
git reset --hard HEAD &&
(cd submodule &&
(cd subsubmodule &&
git reset --hard HEAD
)
) &&
git submodule update --recursive &&
(cd submodule &&
test_commit "update_submodule_2" file
) &&
(cd submodule2 &&
test_commit "update_submodule2_2" file
) &&
git add submodule &&
git add submodule2 &&
git commit -m "two_new_submodule_commits" &&
(cd submodule &&
git checkout master &&
test_commit "conflict" file &&
echo "conflict" > file
) &&
git checkout HEAD^ &&
(cd submodule2 &&
git rev-parse --verify HEAD >../expect
) &&
git config submodule.submodule.update merge &&
test_must_fail git submodule update &&
(cd submodule2 &&
git rev-parse --verify HEAD >../actual
) &&
test_cmp expect actual
)
'
test_expect_success 'submodule update exit immediately after recursive rebase error' '
(cd super &&
git checkout master &&
git reset --hard HEAD &&
(cd submodule &&
git reset --hard HEAD &&
git submodule update --recursive
) &&
(cd submodule &&
test_commit "update_submodule_3" file
) &&
(cd submodule2 &&
test_commit "update_submodule2_3" file
) &&
git add submodule &&
git add submodule2 &&
git commit -m "two_new_submodule_commits" &&
(cd submodule &&
git checkout master &&
test_commit "conflict2" file &&
echo "conflict" > file
) &&
git checkout HEAD^ &&
(cd submodule2 &&
git rev-parse --verify HEAD >../expect
) &&
git config submodule.submodule.update rebase &&
test_must_fail git submodule update &&
(cd submodule2 &&
git rev-parse --verify HEAD >../actual
) &&
test_cmp expect actual
)
'
test_expect_success 'add different submodules to the same path' '
(cd super &&
git submodule add ../submodule s1 &&
test_must_fail git submodule add ../merging s1
)
'
test_expect_success 'submodule add places git-dir in superprojects git-dir' '
(cd super &&
mkdir deeper &&
git submodule add ../submodule deeper/submodule &&
(cd deeper/submodule &&
git log > ../../expected
) &&
(cd .git/modules/deeper/submodule &&
git log > ../../../../actual
) &&
test_cmp expected actual
)
'
test_expect_success 'submodule update places git-dir in superprojects git-dir' '
(cd super &&
git commit -m "added submodule"
) &&
git clone super super2 &&
(cd super2 &&
git submodule init deeper/submodule &&
git submodule update &&
(cd deeper/submodule &&
git log > ../../expected
) &&
(cd .git/modules/deeper/submodule &&
git log > ../../../../actual
) &&
test_cmp expected actual
)
'
test_expect_success 'submodule add places git-dir in superprojects git-dir recursive' '
(cd super2 &&
(cd deeper/submodule &&
git submodule add ../submodule subsubmodule &&
(cd subsubmodule &&
git log > ../../../expected
) &&
git commit -m "added subsubmodule" &&
git push origin :
) &&
(cd .git/modules/deeper/submodule/modules/subsubmodule &&
git log > ../../../../../actual
) &&
git add deeper/submodule &&
git commit -m "update submodule" &&
git push origin : &&
test_cmp expected actual
)
'
test_expect_success 'submodule update places git-dir in superprojects git-dir recursive' '
mkdir super_update_r &&
(cd super_update_r &&
git init --bare
) &&
mkdir subsuper_update_r &&
(cd subsuper_update_r &&
git init --bare
) &&
mkdir subsubsuper_update_r &&
(cd subsubsuper_update_r &&
git init --bare
) &&
git clone subsubsuper_update_r subsubsuper_update_r2 &&
(cd subsubsuper_update_r2 &&
test_commit "update_subsubsuper" file &&
git push origin master
) &&
git clone subsuper_update_r subsuper_update_r2 &&
(cd subsuper_update_r2 &&
test_commit "update_subsuper" file &&
git submodule add ../subsubsuper_update_r subsubmodule &&
git commit -am "subsubmodule" &&
git push origin master
) &&
git clone super_update_r super_update_r2 &&
(cd super_update_r2 &&
test_commit "update_super" file &&
git submodule add ../subsuper_update_r submodule &&
git commit -am "submodule" &&
git push origin master
) &&
rm -rf super_update_r2 &&
git clone super_update_r super_update_r2 &&
(cd super_update_r2 &&
git submodule update --init --recursive >actual &&
test_i18ngrep "Submodule path .submodule/subsubmodule.: checked out" actual &&
(cd submodule/subsubmodule &&
git log > ../../expected
) &&
(cd .git/modules/submodule/modules/subsubmodule &&
git log > ../../../../../actual
) &&
test_cmp expected actual
)
'
test_expect_success 'submodule add properly re-creates deeper level submodules' '
(cd super &&
git reset --hard master &&
rm -rf deeper/ &&
git submodule add --force ../submodule deeper/submodule
)
'
test_expect_success 'submodule update properly revives a moved submodule' '
(cd super &&
H=$(git rev-parse --short HEAD) &&
git commit -am "pre move" &&
H2=$(git rev-parse --short HEAD) &&
git status >out &&
sed "s/$H/XXX/" out >expect &&
H=$(cd submodule2 && git rev-parse HEAD) &&
git rm --cached submodule2 &&
rm -rf submodule2 &&
mkdir -p "moved/sub module" &&
git update-index --add --cacheinfo 160000 $H "moved/sub module" &&
git config -f .gitmodules submodule.submodule2.path "moved/sub module" &&
git commit -am "post move" &&
git submodule update &&
git status > out &&
sed "s/$H2/XXX/" out >actual &&
test_cmp expect actual
)
'
test_expect_success SYMLINKS 'submodule update can handle symbolic links in pwd' '
mkdir -p linked/dir &&
ln -s linked/dir linkto &&
(cd linkto &&
git clone "$TRASH_DIRECTORY"/super_update_r2 super &&
(cd super &&
git submodule update --init --recursive
)
)
'
test_expect_success 'submodule update clone shallow submodule' '
test_when_finished "rm -rf super3" &&
first=$(git -C cloned rev-parse HEAD:submodule) &&
second=$(git -C submodule rev-parse HEAD) &&
commit_count=$(git -C submodule rev-list --count $first^..$second) &&
git clone cloned super3 &&
pwd=$(pwd) &&
(
cd super3 &&
sed -e "s#url = ../#url = file://$pwd/#" <.gitmodules >.gitmodules.tmp &&
mv -f .gitmodules.tmp .gitmodules &&
git submodule update --init --depth=$commit_count &&
git -C submodule log --oneline >out &&
test_line_count = 1 out
)
'
test_expect_success 'submodule update clone shallow submodule outside of depth' '
test_when_finished "rm -rf super3" &&
git clone cloned super3 &&
pwd=$(pwd) &&
(
cd super3 &&
sed -e "s#url = ../#url = file://$pwd/#" <.gitmodules >.gitmodules.tmp &&
mv -f .gitmodules.tmp .gitmodules &&
# Some protocol versions (e.g. 2) support fetching
# unadvertised objects, so restrict this test to v0.
test_must_fail env GIT_TEST_PROTOCOL_VERSION=0 \
git submodule update --init --depth=1 2>actual &&
test_i18ngrep "Direct fetching of that commit failed." actual &&
git -C ../submodule config uploadpack.allowReachableSHA1InWant true &&
git submodule update --init --depth=1 >actual &&
git -C submodule log --oneline >out &&
test_line_count = 1 out
)
'
test_expect_success 'submodule update --recursive drops module name before recursing' '
(cd super2 &&
(cd deeper/submodule/subsubmodule &&
git checkout HEAD^
) &&
git submodule update --recursive deeper/submodule >actual &&
test_i18ngrep "Submodule path .deeper/submodule/subsubmodule.: checked out" actual
)
'
test_expect_success 'submodule update can be run in parallel' '
(cd super2 &&
GIT_TRACE=$(pwd)/trace.out git submodule update --jobs 7 &&
grep "7 tasks" trace.out &&
git config submodule.fetchJobs 8 &&
GIT_TRACE=$(pwd)/trace.out git submodule update &&
grep "8 tasks" trace.out &&
GIT_TRACE=$(pwd)/trace.out git submodule update --jobs 9 &&
grep "9 tasks" trace.out
)
'
test_expect_success 'git clone passes the parallel jobs config on to submodules' '
test_when_finished "rm -rf super4" &&
GIT_TRACE=$(pwd)/trace.out git clone --recurse-submodules --jobs 7 . super4 &&
grep "7 tasks" trace.out &&
rm -rf super4 &&
git config --global submodule.fetchJobs 8 &&
GIT_TRACE=$(pwd)/trace.out git clone --recurse-submodules . super4 &&
grep "8 tasks" trace.out &&
rm -rf super4 &&
GIT_TRACE=$(pwd)/trace.out git clone --recurse-submodules --jobs 9 . super4 &&
grep "9 tasks" trace.out &&
rm -rf super4
'
test_done
|
brunosantiagovazquez/git
|
t/t7406-submodule-update.sh
|
Shell
|
gpl-2.0
| 26,863 |
#!/bin/sh
URL="https://codeload.github.com";
AUTHOR="philwareham";
REPO="textpattern-classic-admin-theme";
EXTRACT="textpattern/admin-themes/classic";
TAG="master";
if [ ! -z "$1" ]; then
TAG="$1";
fi
echo "Get repo: $REPO :: $TAG";
echo "-------------------------------------------------------------------------------";
rm -rf $EXTRACT/*;
curl $URL/$AUTHOR/$REPO/tar.gz/$TAG | tar xz --strip=3 --directory=$EXTRACT $REPO-$TAG/dist/classic
|
jools-r/textpattern
|
.github/get-classic-admin-theme.sh
|
Shell
|
gpl-2.0
| 448 |
#!/bin/bash
if [ -n "$1" ]; then
host="$1"
else
echo "Usage: $0 container-name"
exit 1
fi
IPADDR=`docker inspect "--format={{.NetworkSettings.IPAddress}}" "$host"`
if [ -z "$IPADDR" ]; then
echo "Failed to find IP address for $host"
exit 1
else
echo "$IPADDR"
fi
ssh -t -l root "$IPADDR"
|
apergos/docker-lvs-nginx
|
ssh-to-instance.sh
|
Shell
|
gpl-2.0
| 316 |
#!/usr/bin/env bash
# vim: set sw=4 et sts=4 tw=80 :
# Copyright 2009 Ali Polatel <[email protected]>
# Distributed under the terms of the GNU General Public License v2
. test-lib.bash
clean_files+=( "lucifer.sam" )
start_test "t22-renameat-second-atfdcwd-deny"
SYDBOX_WRITE="${cwd}/see.emily.play" sydbox -- ./t22_renameat_second_atfdcwd
if [[ 0 == $? ]]; then
die "failed to deny rename"
elif [[ -f lucifer.sam ]]; then
die "file exists, failed to deny rename"
fi
end_test
start_test "t22-renameat-second-atfdcwd-write"
SYDBOX_WRITE="${cwd}" sydbox -- ./t22_renameat_second_atfdcwd
if [[ 0 != $? ]]; then
die "failed to allow renameat"
elif [[ ! -f lucifer.sam ]]; then
die "file doesn't exist, failed to allow renameat"
fi
end_test
|
larsuhartmann/sydbox
|
tests/progtests/t22-renameat-second-atfdcwd.bash
|
Shell
|
gpl-2.0
| 755 |
#!/bin/bash
# Deployer for Travis-CI
# Default Variables
#
# Here are all of the user-set variables used by Deployer.
# See the "Cross-platform deployment" page on SRB2 Wiki for documentation.
# Core Parameters
: ${DPL_ENABLED} # Enable Deployer behavior; must be set for any deployment activity
: ${DPL_TAG_ENABLED} # Trigger Deployer for all tag releases
: ${DPL_JOB_ENABLE_ALL} # Enable all jobs for deployment
: ${DPL_TERMINATE_TESTS} # Terminate all build test jobs (used in .travis.yml)
: ${DPL_TRIGGER} # Use a [word] in the commit message to trigger Deployer
: ${DPL_JOBNAMES} # Trigger Deployer by job name
: ${DPL_OSNAMES} # Trigger Deployer by OS name (osx,linux)
: ${DPL_BRANCHES} # Trigger Deployer by git branch name
# Job Parameters
: ${_DPL_JOB_ENABLED} # Enable Deployer for this specific job. DPL_ENABLED must be set too.
: ${_DPL_JOB_NAME} # Identifier for the job, used for logging and trigger word matching
: ${_DPL_FTP_TARGET} # Deploy to FTP
: ${_DPL_DPUT_TARGET} # Deploy to DPUT
: ${_DPL_PACKAGE_SOURCE} # Build packages into a Source distribution. Linux only.
: ${_DPL_PACKAGE_BINARY} # Build packages into a Binary distribution.
: ${_DPL_PACKAGE_MAIN:=1} # Build main installation package. Linux only; OS X assumes this.
: ${_DPL_PACKAGE_ASSET} # Build asset installation package. Linux only.
# Asset File Parameters
: ${ASSET_ARCHIVE_PATH:=https://github.com/mazmazz/SRB2/releases/download/SRB2_assets_220/SRB2-v220-assets.7z}
: ${ASSET_ARCHIVE_OPTIONAL_PATH:=https://github.com/mazmazz/SRB2/releases/download/SRB2_assets_220/SRB2-v220-optional-assets.7z}
: ${ASSET_FILES_HASHED:=srb2.pk3 zones.pk3 player.dta} # POST v2.2 NOTE: Don't forget to add patch.pk3!
: ${ASSET_FILES_DOCS:=README.txt LICENSE.txt LICENSE-3RD-PARTY.txt README-SDL.txt}
: ${ASSET_FILES_OPTIONAL_GET:=0}
# FTP Parameters
: ${DPL_FTP_PROTOCOL}
: ${DPL_FTP_USER}
: ${DPL_FTP_PASS}
: ${DPL_FTP_HOSTNAME}
: ${DPL_FTP_PORT}
: ${DPL_FTP_PATH}
# DPUT Parameters
: ${DPL_DPUT_DOMAIN:=ppa.launchpad.net}
: ${DPL_DPUT_METHOD:=sftp}
: ${DPL_DPUT_INCOMING}
: ${DPL_DPUT_LOGIN:=anonymous}
: ${DPL_SSH_KEY_PRIVATE} # Base64-encoded private key file. Used to sign repository uploads
: ${DPL_SSH_KEY_PASSPHRASE} # Decodes the private key file.
# Package Parameters
: ${PACKAGE_NAME:=srb2}
: ${PACKAGE_VERSION:=2.2.0}
: ${PACKAGE_SUBVERSION} # Highly recommended to set this to reflect the distro series target (e.g., ~18.04bionic)
: ${PACKAGE_REVISION} # Defaults to UTC timestamp
: ${PACKAGE_INSTALL_PATH:=/usr/games/SRB2}
: ${PACKAGE_LINK_PATH:=/usr/games}
: ${PACKAGE_DISTRO:=trusty}
: ${PACKAGE_URGENCY:=high}
: ${PACKAGE_NAME_EMAIL:=Sonic Team Junior <[email protected]>}
: ${PACKAGE_GROUP_NAME_EMAIL:=Sonic Team Junior <[email protected]>}
: ${PACKAGE_WEBSITE:=<http://www.srb2.org>}
: ${PACKAGE_ASSET_MINVERSION:=2.1.26} # Number this the version BEFORE the actual required version, because we do a > check
: ${PACKAGE_ASSET_MAXVERSION:=2.2.1} # Number this the version AFTER the actual required version, because we do a < check
: ${PROGRAM_NAME:=Sonic Robo Blast 2}
: ${PROGRAM_VENDOR:=Sonic Team Junior}
: ${PROGRAM_VERSION:=2.2.0}
: ${PROGRAM_DESCRIPTION:=A free 3D Sonic the Hedgehog fangame closely inspired by the original Sonic games on the Sega Genesis.}
: ${PROGRAM_FILENAME:=srb2}
: ${DPL_PGP_KEY_PRIVATE} # Base64-encoded private key file. Used to sign Debian packages
: ${DPL_PGP_KEY_PASSPHRASE} # Decodes the private key file.
# Export Asset and Package Parameters for envsubst templating
export ASSET_ARCHIVE_PATH="${ASSET_ARCHIVE_PATH}"
export ASSET_ARCHIVE_OPTIONAL_PATH="${ASSET_ARCHIVE_OPTIONAL_PATH}"
export ASSET_FILES_HASHED="${ASSET_FILES_HASHED}"
export ASSET_FILES_DOCS="${ASSET_FILES_DOCS}"
export ASSET_FILES_OPTIONAL_GET="${ASSET_FILES_OPTIONAL_GET}"
export PACKAGE_NAME="${PACKAGE_NAME}"
export PACKAGE_VERSION="${PACKAGE_VERSION}"
export PACKAGE_SUBVERSION="${PACKAGE_SUBVERSION}" # in case we have this
export PACKAGE_REVISION="${PACKAGE_REVISION}"
export PACKAGE_ASSET_MINVERSION="${PACKAGE_ASSET_MINVERSION}"
export PACKAGE_ASSET_MAXVERSION="${PACKAGE_ASSET_MAXVERSION}"
export PACKAGE_INSTALL_PATH="${PACKAGE_INSTALL_PATH}"
export PACKAGE_LINK_PATH="${PACKAGE_LINK_PATH}"
export PACKAGE_DISTRO="${PACKAGE_DISTRO}"
export PACKAGE_URGENCY="${PACKAGE_URGENCY}"
export PACKAGE_NAME_EMAIL="${PACKAGE_NAME_EMAIL}"
export PACKAGE_GROUP_NAME_EMAIL="${PACKAGE_GROUP_NAME_EMAIL}"
export PACKAGE_WEBSITE="${PACKAGE_WEBSITE}"
export PROGRAM_NAME="${PROGRAM_NAME}"
export PROGRAM_VERSION="${PROGRAM_VERSION}"
export PROGRAM_DESCRIPTION="${PROGRAM_DESCRIPTION}"
export PROGRAM_FILENAME="${PROGRAM_FILENAME}"
# This file is called in debian_template.sh, so mark our completion so we don't run it again
__DEBIAN_PARAMETERS_INITIALIZED=1
|
STJr/SRB2
|
deployer/travis/deployer_defaults.sh
|
Shell
|
gpl-2.0
| 4,965 |
: $XConsortium: hostinfo.sh /main/2 1996/05/10 16:42:59 drk $
:
# hostinfo [ cpu debug name rating regress type ... ]
#
# print info for the current host on one line in argument order
#
# some of this is a stretch but we have to standardize on something
# if you don't like the results then change it here
#
# to generate a type regression record for the current host:
#
# hostinfo regress type < /dev/null
#
# to test a file of type regression records:
#
# hostinfo regress type < hosttype.tst
#
# lib/hostinfo/typemap is a file of <pattern> <value> pairs
# if the generated type matches a <pattern> then the type
# is changed to the corresponding <value>
#
# @(#)hostinfo ([email protected]) 05/09/95
#
command=hostinfo
args=$*
debug=
ifs=$IFS
magic=4400000
map=
regress=
shell=`(eval 'x=$((0+0))ksh; echo ${x#0}') 2>/dev/null`
something=
PATH=$PATH:/usr/kvm:/usr/ccs/bin:/usr/local/bin:/usr/add-on/gnu/bin:/usr/add-on/GNU/bin:/opt/gnu/bin:/opt/GNU/bin
export PATH
# validate the args
for info in $args
do case $info in
debug) debug=1
;;
regress)regress=1
;;
cpu|name|rating|type)
something=1
;;
*) echo "$0: $info: unknown info name" >&2
echo "Usage: $0 [ cpu debug name rating regress type ... ]" >&2
exit 1
;;
esac
done
case $regress$something in
"") set "$@" type ;;
esac
case $debug in
"") exec 2>/dev/null
;;
*) PS4='+$LINENO+ '
set -x
;;
esac
# compute the info
output=
for info in $args
do case $info in
cpu) cpu=1
set /usr/kvm/mpstat 'cpu[0-9]' \
/usr/etc/cpustatus 'enable' \
/usr/alliant/showsched 'CE' \
prtconf 'cpu-unit'
while :
do case $# in
0) break ;;
esac
i=`$1 2>/dev/null | tr ' ' '
' | grep -c "^$2"`
case $i in
[1-9]*) cpu=$i
break
;;
esac
shift 2
done
case $cpu in
1) set \
\
hinv \
'/^[0-9][0-9]* .* Processors*$/' \
'/[ ].*//' \
\
/usr/bin/hostinfo \
'/^[0-9][0-9]* .* physically available\.*$/' \
'/[ ].*//' \
while :
do case $# in
0) break ;;
esac
i=`$1 2>/dev/null | sed -e "${2}!d" -e "s${3}"`
case $i in
[1-9]*) cpu=$i
break
;;
esac
shift 3
done
;;
esac
output="$output $cpu"
;;
name) name=`hostname || uname -n || cat /etc/whoami || echo local`
output="$output $name"
;;
rating) cd /tmp
tmp=hi$$
trap 'rm -f $tmp.*' 0 1 2
cat > $tmp.c <<!
main()
{
register unsigned long i;
register unsigned long j;
register unsigned long k = 0;
for (i = 0; i < 5000; i++)
for (j = 0; j < 50000; j++)
k += j;
return k == 0;
}
!
rating=1
if cc -o $tmp.exe -O $tmp.c ||
gcc -o $tmp.exe -O $tmp.c
then set "" `{ time ./$tmp.exe; } 2>&1`
while :
do shift
case $# in
0) break ;;
esac
case $1 in
*[uU]*) case $1 in
[uU]*) shift
;;
*) IFS=${ifs}uU
set $1
IFS=$ifs
;;
esac
case $shell in
ksh) IFS=${ifs}mMsS.
set $1
IFS=$ifs
;;
*) m=`echo $1 | tr '[mMsS.]' ' '`
set $m
;;
esac
case $# in
1) m=0 s=$1 f=0 ;;
2) m=0 s=$1 f=$2 ;;
*) m=$1 s=$2 f=$3 ;;
esac
case $shell in
ksh) i="$(( $magic / ( ( $m * 60 + $s ) * 100 + $f ) ))"
j="$(( ( $i % 10 ) / 10 ))"
i="$(( i / 100 ))"
if (( $i >= 10 ))
then if (( $j >= 5 ))
then i="$(( $i + 1 ))"
fi
j=
else j=.$j
fi
;;
*) i=`expr $magic / \( \( $m \* 60 + $s \) \* 100 + $f \)`
j=`expr \( $i % 10 \) / 10`
i=`expr $i / 100`
if expr $i \>= 10 >/dev/null
then if expr $j \>= 5 >/dev/null
then i=`expr $i + 1`
fi
j=
else j=.$j
fi
;;
esac
rating=$i$j
break
;;
esac
done
fi
output="$output $rating"
;;
type) IFS=:
set /:$PATH
IFS=$ifs
shift
f=../lib/hostinfo/typemap
for i
do case $i in
"") i=. ;;
esac
if test -f $i/$f
then map="`grep -v '^#' $i/$f` $map"
fi
done
while :
do case $regress in
?*) read expected host arch mach os sys rel ver ||
case $regress in
1) regress=0 ;;
*) break ;;
esac
;;
esac
case $regress in
""|0) set "" \
`hostname || uname -n || cat /etc/whoami || echo local` \
`{ arch || uname -m || att uname -m || uname -s || att uname -s || echo unknown ;} | sed "s/[ ]/-/g"` \
`{ mach || machine || uname -p || att uname -p || echo unknown ;} | sed -e "s/[ ]/-/g"` \
`uname -a || att uname -a || echo unknown $host unknown unknown unknown unknown unknown`
expected=$1 host=$2 arch=$3 mach=$4 os=$5 sys=$6 rel=$7 ver=$8
;;
esac
type=unknown
case $regress in
?*) regress=hostname
case $host in
*.*) regress="$regress.domain" ;;
esac
regress="$regress $arch $mach $os hostname"
case $sys in
*.*) regress="$regress.domain" ;;
esac
regress="$regress $rel $ver"
;;
esac
case $host in
*.*) case $shell in
ksh) host=${host%%.*} ;;
*) host=`echo $host | sed -e 's/\..*//'` ;;
esac
;;
esac
case $mach in
unknown)
mach=
;;
r[3-9]000)
case $shell in
ksh) mach="mips$((${mach#r}/1000-2))"
;;
*) mach=`echo $mach | sed -e 's/^.//'`
mach=mips`expr $mach / 1000 - 2`
;;
esac
;;
esac
case $os in
$host|unknown)
set \
\
/NextDeveloper -d next \
while :
do case $# in
0) break ;;
esac
if test $2 $1
then os=$3
break
fi
shift 3
done
;;
esac
case $os in
AIX*|aix*)
type=ibm.risc
;;
HP-UX) case $arch in
9000/[78]*)
type=hp.pa
;;
*) type=hp.ux
;;
esac
;;
IRIX*|irix*)
type=sgi.mips
case $arch in
[a-zA-Z][a-zA-Z][0-9][0-9]*|[a-zA-Z][a-zA-Z]1[3-9]*)
type=${type}2
;;
[a-zA-Z][a-zA-Z][0-9]*)
type=${type}1
;;
esac
;;
OSx*|SMP*|pyramid)
type=pyr
;;
[Ss]ol*)
type=sol.sun4
;;
[Ss]un*)
case $shell in
ksh) x=${arch#sun?}
type=${arch%$x}
;;
*) type=`echo $arch | sed -e 's/\(sun.\).*/\1/'`
;;
esac
case $rel in
[0-4]*) ;;
*) case $type in
'') case $mach in
sparc*) type=sun4 ;;
*) type=$mach ;;
esac
;;
esac
type=sol.$type
;;
esac
;;
$host) type=$arch
case $type in
*.*|*[0-9]*86|*68*)
;;
*) case $mach in
*[0-9]*86|*68*|mips)
type=$type.$mach
;;
esac
;;
esac
;;
unknown)
case $arch in
?*) case $arch in
sun*) mach= ;;
esac
type=$arch
case $mach in
?*) type=$type.$mach ;;
esac
;;
esac
;;
*) case $ver in
FTX*|ftx*)
case $mach in
*[0-9][a-zA-Z]*)
case $shell in
ksh) mach="${mach%%+([a-zA-Z])}" ;;
*) mach=`echo $mach | sed -e 's/[a-zA-Z]*$//'` ;;
esac
;;
esac
type=stratus.$mach
;;
*) case $shell in
ksh) type=${os%[0-9]*}
type=${type%%[!A-Za-z_0-9.]*}
;;
*) type=`echo $os | sed -e 's/[0-9].*//'`
;;
esac
case $arch in
'') case $mach in
?*) type=$type.$mach ;;
esac
;;
*) type=$type.$arch ;;
esac
;;
esac
esac
case $type in
[0-9]*) case $mach in
?*) type=$mach ;;
esac
case $type in
*/MC) type=ncr.$type ;;
esac
;;
*.*) ;;
*[0-9]*86|*68*)
case $rel in
[34].[0-9]*)
type=att.$type
;;
esac
;;
[a-z]*[0-9])
;;
[a-z]*) case $mach in
$type) case $ver in
Fault*|fault*|FAULT*)
type=ft.$type
;;
esac
;;
?*) type=$type.$mach
;;
esac
;;
esac
case $shell in
ksh) type=${type%%[-+/]*} ;;
*) type=`echo $type | sed -e 's/[-+/].*//'` ;;
esac
case $type in
*.*) case $shell in
ksh) lhs=${type%.*}
rhs=${type#*.}
;;
*) lhs=`echo $type | sed -e 's/\..*//'`
rhs=`echo $type | sed -e 's/.*\.//'`
;;
esac
case $rhs in
[0-9]*86)
rhs=i$rhs
;;
68*) rhs=m$rhs
;;
esac
case $rhs in
i[2-9]86) rhs=i386 ;;
esac
case $lhs in
$rhs) type=$lhs ;;
*) type=$lhs.$rhs ;;
esac
;;
esac
case $shell in
*) type=`echo $type | tr '[A-Z]' '[a-z]'` ;;
esac
# last chance mapping
set "" "" $map
while :
do case $# in
[012]) break ;;
esac
shift 2
eval " case \$type in
$1) type=\$2; break ;;
esac"
done
case $regress in
"") break ;;
esac
case $expected in
""|$type) echo $type $regress ;;
*) echo FAIL $expected $type $regress ;;
esac
done
output="$output $type"
;;
esac
done
case $regress in
"") echo $output ;;
esac
|
sTeeLM/MINIME
|
toolkit/srpm/SOURCES/cde-2.2.4/programs/dtksh/ksh93/src/lib/lib0ast/hostinfo.sh
|
Shell
|
gpl-2.0
| 8,761 |
#!/bin/sh
# Clear out daemon
# Startup Items is deprecated, should this be removed?
# the daemon is already been launched using launchctl and this folder doent appear to be created anymore.
# https://developer.apple.com/library/mac/documentation/MacOSX/Conceptual/BPSystemStartup/Chapters/StartupItems.html
if [ -d /Library/StartupItems/360ControlDaemon ]; then
/bin/rm -r /Library/StartupItems/360ControlDaemon
fi
if [ -f /Library/LaunchDaemons/com.mice.360Daemon.plist ]; then
launchctl stop com.mice.360Daemon
launchctl unload /Library/LaunchDaemons/com.mice.360Daemon.plist
/bin/rm /Library/LaunchDaemons/com.mice.360Daemon.plist
fi
# this folder doesnt appear to be created in recent versions too
if [ -d /Library/Application\ Support/MICE/360Daemon ]; then
/bin/rm -r /Library/Application\ Support/MICE/360Daemon
fi
if [ -d /Library/Application\ Support/MICE/360Daemon.app ]; then
/bin/rm -r /Library/Application\ Support/MICE/360Daemon.app
fi
# Remove preference pane
if [ -d /Library/PreferencePanes/Pref360Control.prefPane ]; then
/bin/rm -r /Library/PreferencePanes/Pref360Control.prefPane
fi
# Remove drivers
if [ -d /System/Library/Extensions/360Controller.kext ]; then
kextunload /System/Library/Extensions/360Controller.kext
/bin/rm -r /System/Library/Extensions/360Controller.kext
fi
if [ -d /System/Library/Extensions/Wireless360Controller.kext ]; then
kextunload /System/Library/Extensions/Wireless360Controller.kext
/bin/rm -r /System/Library/Extensions/Wireless360Controller.kext
fi
if [ -d /System/Library/Extensions/WirelessGamingReceiver.kext ]; then
kextunload /System/Library/Extensions/WirelessGamingReceiver.kext
/bin/rm -r /System/Library/Extensions/WirelessGamingReceiver.kext
fi
# Mavericks and later
if [ -d /Library/Extensions/360Controller.kext ]; then
kextunload /Library/Extensions/360Controller.kext
/bin/rm -r /Library/Extensions/360Controller.kext
fi
if [ -d /Library/Extensions/Wireless360Controller.kext ]; then
kextunload /Library/Extensions/Wireless360Controller.kext
/bin/rm -r /Library/Extensions/Wireless360Controller.kext
fi
if [ -d /Library/Extensions/WirelessGamingReceiver.kext ]; then
kextunload /Library/Extensions/WirelessGamingReceiver.kext
/bin/rm -r /Library/Extensions/WirelessGamingReceiver.kext
fi
# Remove bluetooth driver
if [ -d /Library/Extensions/XboxOneBluetooth.kext ]; then
kextunload /Library/Extensions/XboxOneBluetooth.kext
/bin/rm -r /Library/Extensions/XboxOneBluetooth.kext
fi
exit 0
|
360Controller/360Controller
|
Install360Controller/Scripts/upgrade.sh
|
Shell
|
gpl-2.0
| 2,536 |
USE_RAMDISK=YES \
MFSEXPORTS_EXTRA_OPTIONS="allcanchangequota,ignoregid" \
MOUNT_EXTRA_CONFIG="mfscachemode=NEVER" \
setup_local_empty_lizardfs info
cd "${info[mount0]}"
gid1=$(id -g lizardfstest_1)
gid2=$(id -g lizardfstest)
mfssetquota -g $gid1 0 0 3 8 .
# exceed quota by creating 1 directory and some files (8 inodes in total):
sudo -nu lizardfstest_1 mkdir dir_$gid1
for i in 2 3 4; do
verify_quota "Group $gid1 -- 0 0 0 $((i-1)) 3 8" lizardfstest_1
sudo -nu lizardfstest_1 touch dir_$gid1/$i
done
for i in 5 6; do
# after exceeding soft limit - changed into +:
verify_quota "Group $gid1 -+ 0 0 0 $((i-1)) 3 8" lizardfstest_1
sudo -nu lizardfstest_1 touch dir_$gid1/$i
done
# soft links do affect usage and are checked against limits:
sudo -nu lizardfstest_1 ln -s dir_$gid1/4 dir_$gid1/soft1
verify_quota "Group $gid1 -+ 0 0 0 7 3 8" lizardfstest_1
# snapshots are allowed, if none of the uid/gid of files residing
# in a directory reached its limit:
sudo -nu lizardfstest_1 $(which mfsmakesnapshot) dir_$gid1 snapshot
# sudo does not necessarily pass '$PATH', even if -E is used, that's
# why a workaround with 'which' was used above
verify_quota "Group $gid1 -+ 0 0 0 14 3 8" lizardfstest_1
# check if quota can't be exceeded further:
expect_failure sudo -nu lizardfstest_1 touch dir_$gid1/file
expect_failure sudo -nu lizardfstest_1 mkdir dir2_$gid1
expect_failure sudo -nu lizardfstest_1 ln -s dir_$gid1/4 dir_$gid1/soft2
expect_failure sudo -nu lizardfstest_1 $(which mfsmakesnapshot) dir_$gid1 snapshot2
verify_quota "Group $gid1 -+ 0 0 0 14 3 8" lizardfstest_1
# hard links don't affect usage and are not checked against limits:
sudo -nu lizardfstest_1 ln dir_$gid1/4 hard
verify_quota "Group $gid1 -+ 0 0 0 14 3 8" lizardfstest_1
# check if chgrp is properly handled
sudo -nu lizardfstest_1 chgrp -R $gid2 dir_$gid1
verify_quota "Group $gid1 -+ 0 0 0 7 3 8" lizardfstest_1
verify_quota "Group $gid2 -- 0 0 0 7 0 0" lizardfstest
# check if quota can't be exceeded by one:
sudo -nu lizardfstest_1 touch dir_$gid1/file1
expect_failure sudo -nu lizardfstest_1 touch dir_$gid1/file2
verify_quota "Group $gid1 -+ 0 0 0 8 3 8" lizardfstest_1
#It would be nice to test chown as well, but I don't know how to do that without using superuser
|
cloudweavers/lizardfs
|
tests/test_suites/ShortSystemTests/test_quota_inodes.sh
|
Shell
|
gpl-3.0
| 2,266 |
#!/bin/bash
/usr/local/bin/demo-worker -a stateline:5555
|
traitecoevo/stateline
|
docker/config/launch_worker.sh
|
Shell
|
gpl-3.0
| 58 |
#!/bin/sh
export CFLAGS="-Os"
TARCH=mips TARGET_ARCH=mips HOST_COMPILER=mipsel-linux-android "$(dirname "$0")/android-build.sh"
|
xiangshouding/ShadowVPN
|
dist-build/android-mips.sh
|
Shell
|
gpl-3.0
| 128 |
#!/bin/bash
gnuplot test_g/Hist-L=32_PP=16-8/temp/*.plot
convert -delay 10 -loop 0 test_g/Hist-L=32_PP=16-8/graphs/{1..200}.jpg animate-Hist-L=32_PP=16-8.gif
|
nlare/Landau-Wang-omp
|
plot_test_hist_graph-L=32_PP=16-8.sh
|
Shell
|
gpl-3.0
| 158 |
#!/bin/sh
#
# Copyright (c) 2013-2015 by The SeedStack authors. All rights reserved.
#
# This file is part of SeedStack, An enterprise-oriented full development stack.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
(
cd doc/target/site/apidocs
git init
git config user.name "Travis-CI"
git config user.email "[email protected]"
git add .
git commit -m "Built for gh-pages of http://seedstack.github.io/seed"
git push --force --quiet "https://${GITHUB_TOKEN}@github.com/seedstack/seed" master:gh-pages > /dev/null 2>&1
)
|
ydautremay/seed
|
deploy_ghpages.sh
|
Shell
|
mpl-2.0
| 688 |
# http://stackoverflow.com/a/5148851
if [[ `git status --porcelain` ]]; then
exit 1
fi
|
ttanner/mucmiete
|
require-clean-working-tree.sh
|
Shell
|
agpl-3.0
| 89 |
#!/bin/bash
mkdir -p "${TOMCAT_HOME}"
pushd "${TOMCAT_HOME}" >/dev/null
tar -xf "${DOCKER_SRC}/apache-tomcat-${TOMCAT_VERSION}.tar.gz" --strip-components=1
ln -s "${TOMCAT_HOME}/" "/var/lib/tomcat${TOMCAT_MAJOR}"
ln -s "${TOMCAT_HOME}/conf/" "/etc/tomcat${TOMCAT_MAJOR}"
ln -s "${TOMCAT_HOME}/logs/" "/var/log/tomcat${TOMCAT_MAJOR}"
ln -s "${TOMCAT_HOME}/work/" "/var/cache/tomcat${TOMCAT_MAJOR}"
popd >/dev/null
|
inn1983/docker-builder
|
www-servers/tomcat/7.0.57/build.sh
|
Shell
|
lgpl-3.0
| 418 |
#!/bin/sh
#
# create_core_pkg.sh
#
# part of pfSense (https://www.pfsense.org)
# Copyright (c) 2004-2013 BSD Perimeter
# Copyright (c) 2013-2016 Electric Sheep Fencing
# Copyright (c) 2014-2021 Rubicon Communications, LLC (Netgate)
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin
scripts_path=$(dirname $(realpath $0))
if [ ! -f "${scripts_path}/common.subr" ]; then
echo >&2 "ERROR: common.subr is missing"
exit 1
fi
. ${scripts_path}/common.subr
usage() {
cat >&2 <<END
Usage: $(basename $0) -t template -d destdir [-h]
Options:
-t template -- Path to package template directory
-f flavor -- package flavor
-v version -- package version
-r root -- root directory containing package files
-s search -- search path
-F filter -- filter pattern to exclude files from plist
-d destdir -- Destination directory to create package
-a ABI -- Package ABI
-A ALTABI -- Package ALTABI (aka arch)
-h -- Show this help and exit
Environment:
TMPDIR -- Temporary directory (default: /tmp)
PRODUCT_NAME -- Product name (default: pfSense)
PRODUCT_URL -- Product URL (default: https://www.pfsense.org)
END
exit 1
}
while getopts s:t:f:v:r:F:d:ha:A: opt; do
case "$opt" in
t)
template=$OPTARG
;;
f)
flavor=$OPTARG
;;
v)
version=$OPTARG
;;
r)
root=$OPTARG
;;
s)
findroot=$OPTARG
;;
F)
filter=$OPTARG
;;
d)
destdir=$OPTARG
;;
a)
ABI=$OPTARG
;;
A)
ALTABI=$OPTARG
;;
*)
usage
;;
esac
done
[ -z "$template" ] \
&& err "template directory is not defined"
[ -e $template -a ! -d $template ] \
&& err "template path is not a directory"
[ -z "$destdir" ] \
&& err "destination directory is not defined"
[ -e $destdir -a ! -d $destdir ] \
&& err "destination path already exists and is not a directory"
: ${TMPDIR=/tmp}
: ${PRODUCT_NAME=pfSense}
: ${PRODUCT_URL=http://www.pfsense.org/}
[ -d $destdir ] \
|| mkdir -p ${destdir}
template_path=$(realpath ${template})
template_name=$(basename ${template})
template_metadir=${template_path}/metadir
template_licensedir=${template_path}/_license
[ -d ${template_metadir} ] \
|| err "template directory not found for package ${template_name}"
scratchdir=$(mktemp -d -q ${TMPDIR}/${template_name}.XXXXXXX)
[ -n "${scratchdir}" -a -d ${scratchdir} ] \
|| err "error creating temporary directory"
trap "force_rm ${scratchdir}" 1 2 15 EXIT
metadir=${scratchdir}/${template_name}_metadir
run "Copying metadata for package ${template_name}" \
"cp -r ${template_metadir} ${metadir}"
manifest=${metadir}/+MANIFEST
plist=${scratchdir}/${template_name}_plist
exclude_plist=${scratchdir}/${template_name}_exclude_plist
if [ -f "${template_path}/pkg-plist" ]; then
cp ${template_path}/pkg-plist ${plist}
else
if [ -n "${filter}" ]; then
filter="-name ${filter}"
fi
if [ -z "${findroot}" ]; then
findroot="."
fi
for froot in ${findroot}; do
(cd ${root} \
&& find ${froot} ${filter} -type f -or -type l \
| sed 's,^.,,' \
| sort -u \
) >> ${plist}
done
fi
if [ -f "${template_path}/exclude_plist" ]; then
cp ${template_path}/exclude_plist ${exclude_plist}
else
touch ${exclude_plist}
fi
sed \
-i '' \
-e "s,%%PRODUCT_NAME%%,${PRODUCT_NAME},g" \
-e "s,%%PRODUCT_URL%%,${PRODUCT_URL},g" \
-e "s,%%FLAVOR%%,${flavor:+-}${flavor},g" \
-e "s,%%FLAVOR_DESC%%,${flavor:+ (${flavor})},g" \
-e "s,%%VERSION%%,${version},g" \
${metadir}/* \
${plist} \
${exclude_plist}
if [ -f "${exclude_plist}" ]; then
sort -u ${exclude_plist} > ${plist}.exclude
mv ${plist} ${plist}.tmp
comm -23 ${plist}.tmp ${plist}.exclude > ${plist}
rm -f ${plist}.tmp ${plist}.exclude
fi
# Add license information
if [ -d "${template_licensedir}" ]; then
portname=$(sed '/^name: /!d; s,^[^"]*",,; s,",,' ${manifest})
licenses_dir="/usr/local/share/licenses/${portname}-${version}"
mkdir -p ${root}${licenses_dir}
for f in ${template_licensedir}/*; do
cp ${f} ${licenses_dir}
echo "${licenses_dir}/$(basename ${f})" >> ${plist}
done
fi
# Force desired ABI and arch
[ -n "${ABI}" ] \
&& echo "abi: ${ABI}" >> ${manifest}
[ -n "${ALTABI}" ] \
&& echo "arch: ${ALTABI}" >> ${manifest}
run "Creating core package ${template_name}" \
"pkg create -o ${destdir} -p ${plist} -r ${root} -m ${metadir}"
force_rm ${scratchdir}
trap "-" 1 2 15 EXIT
|
NewEraCracker/pfsense
|
build/scripts/create_core_pkg.sh
|
Shell
|
apache-2.0
| 4,935 |
#!/bin/bash
source "$(dirname "${BASH_SOURCE}")/../../hack/lib/init.sh"
trap os::test::junit::reconcile_output EXIT
# Cleanup cluster resources created by this test
(
set +e
oc delete project/example project/ui-test-project project/recreated-project
oc delete sa/router -n default
oc delete node/fake-node
oc delete groups/shortoutputgroup
oc delete groups/group1
oc delete groups/cascaded-group
oc delete groups/orphaned-group
oc delete users/cascaded-user
oc delete users/orphaned-user
oc delete identities/anypassword:orphaned-user
oc delete identities/anypassword:cascaded-user
oadm policy reconcile-cluster-roles --confirm --additive-only=false
oadm policy reconcile-cluster-role-bindings --confirm --additive-only=false
) &>/dev/null
project="$( oc project -q )"
defaultimage="openshift/origin-\${component}:latest"
USE_IMAGES=${USE_IMAGES:-$defaultimage}
os::test::junit::declare_suite_start "cmd/admin"
# This test validates admin level commands including system policy
os::test::junit::declare_suite_start "cmd/admin/start"
# Check failure modes of various system commands
os::cmd::expect_failure_and_text 'openshift start network' 'kubeconfig must be set'
os::cmd::expect_failure_and_text 'openshift start network --config=${NODECONFIG} --enable=kubelet' 'the following components are not recognized: kubelet'
os::cmd::expect_failure_and_text 'openshift start network --config=${NODECONFIG} --enable=kubelet,other' 'the following components are not recognized: kubelet, other'
os::cmd::expect_failure_and_text 'openshift start network --config=${NODECONFIG} --disable=other' 'the following components are not recognized: other'
os::cmd::expect_failure_and_text 'openshift start network --config=${NODECONFIG} --disable=dns,proxy,plugins' 'at least one node component must be enabled \(dns, plugins, proxy\)'
os::cmd::expect_failure_and_text 'openshift start node' 'kubeconfig must be set'
os::cmd::expect_failure_and_text 'openshift start node --config=${NODECONFIG} --disable=other' 'the following components are not recognized: other'
os::cmd::expect_failure_and_text 'openshift start node --config=${NODECONFIG} --disable=dns,kubelet,proxy,plugins' 'at least one node component must be enabled \(dns, kubelet, plugins, proxy\)'
os::cmd::expect_failure_and_text 'openshift start --write-config=/tmp/test --hostname=""' 'error: --hostname must have a value'
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/manage-node"
# Test admin manage-node operations
os::cmd::expect_success_and_text 'openshift admin manage-node --help' 'Manage nodes'
# create a node object to mess with
os::cmd::expect_success "echo 'apiVersion: v1
kind: Node
metadata:
labels:
kubernetes.io/hostname: fake-node
name: fake-node
spec:
externalID: fake-node
status:
conditions:
- lastHeartbeatTime: 2015-09-08T16:58:02Z
lastTransitionTime: 2015-09-04T11:49:06Z
reason: kubelet is posting ready status
status: \"True\"
type: Ready
allocatable:
cpu: \"4\"
memory: 8010948Ki
pods: \"110\"
capacity:
cpu: \"4\"
memory: 8010948Ki
pods: \"110\"
' | oc create -f -"
os::cmd::expect_success_and_text 'oadm manage-node --selector= --schedulable=true' 'Ready'
os::cmd::expect_success_and_not_text 'oadm manage-node --selector= --schedulable=true' 'SchedulingDisabled'
os::cmd::expect_success_and_not_text 'oc get node -o yaml' 'unschedulable: true'
os::cmd::expect_success_and_text 'oadm manage-node --selector= --schedulable=false' 'SchedulingDisabled'
os::cmd::expect_success_and_text 'oc get node -o yaml' 'unschedulable: true'
echo "manage-node: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/certs"
# check create-master-certs validation
os::cmd::expect_failure_and_text 'oadm ca create-master-certs --hostnames=example.com --master=' 'master must be provided'
os::cmd::expect_failure_and_text 'oadm ca create-master-certs --hostnames=example.com --master=example.com' 'master must be a valid URL'
os::cmd::expect_failure_and_text 'oadm ca create-master-certs --hostnames=example.com --master=https://example.com --public-master=example.com' 'public master must be a valid URL'
# check encrypt/decrypt of plain text
os::cmd::expect_success "echo -n 'secret data 1' | oadm ca encrypt --genkey='${ARTIFACT_DIR}/secret.key' --out='${ARTIFACT_DIR}/secret.encrypted'"
os::cmd::expect_success_and_text "oadm ca decrypt --in='${ARTIFACT_DIR}/secret.encrypted' --key='${ARTIFACT_DIR}/secret.key'" '^secret data 1$'
# create a file with trailing whitespace
echo "data with newline" > "${ARTIFACT_DIR}/secret.whitespace.data"
os::cmd::expect_success_and_text "oadm ca encrypt --key='${ARTIFACT_DIR}/secret.key' --in='${ARTIFACT_DIR}/secret.whitespace.data' --out='${ARTIFACT_DIR}/secret.whitespace.encrypted'" 'Warning.*whitespace'
os::cmd::expect_success "oadm ca decrypt --key='${ARTIFACT_DIR}/secret.key' --in='${ARTIFACT_DIR}/secret.whitespace.encrypted' --out='${ARTIFACT_DIR}/secret.whitespace.decrypted'"
os::cmd::expect_success "diff '${ARTIFACT_DIR}/secret.whitespace.data' '${ARTIFACT_DIR}/secret.whitespace.decrypted'"
# create a binary file
echo "hello" | gzip > "${ARTIFACT_DIR}/secret.data"
# encrypt using file and pipe input/output
os::cmd::expect_success "oadm ca encrypt --key='${ARTIFACT_DIR}/secret.key' --in='${ARTIFACT_DIR}/secret.data' --out='${ARTIFACT_DIR}/secret.file-in-file-out.encrypted'"
os::cmd::expect_success "oadm ca encrypt --key='${ARTIFACT_DIR}/secret.key' --in='${ARTIFACT_DIR}/secret.data' > '${ARTIFACT_DIR}/secret.file-in-pipe-out.encrypted'"
os::cmd::expect_success "oadm ca encrypt --key='${ARTIFACT_DIR}/secret.key' < '${ARTIFACT_DIR}/secret.data' > '${ARTIFACT_DIR}/secret.pipe-in-pipe-out.encrypted'"
# decrypt using all three methods
os::cmd::expect_success "oadm ca decrypt --key='${ARTIFACT_DIR}/secret.key' --in='${ARTIFACT_DIR}/secret.file-in-file-out.encrypted' --out='${ARTIFACT_DIR}/secret.file-in-file-out.decrypted'"
os::cmd::expect_success "oadm ca decrypt --key='${ARTIFACT_DIR}/secret.key' --in='${ARTIFACT_DIR}/secret.file-in-pipe-out.encrypted' > '${ARTIFACT_DIR}/secret.file-in-pipe-out.decrypted'"
os::cmd::expect_success "oadm ca decrypt --key='${ARTIFACT_DIR}/secret.key' < '${ARTIFACT_DIR}/secret.pipe-in-pipe-out.encrypted' > '${ARTIFACT_DIR}/secret.pipe-in-pipe-out.decrypted'"
# verify lossless roundtrip
os::cmd::expect_success "diff '${ARTIFACT_DIR}/secret.data' '${ARTIFACT_DIR}/secret.file-in-file-out.decrypted'"
os::cmd::expect_success "diff '${ARTIFACT_DIR}/secret.data' '${ARTIFACT_DIR}/secret.file-in-pipe-out.decrypted'"
os::cmd::expect_success "diff '${ARTIFACT_DIR}/secret.data' '${ARTIFACT_DIR}/secret.pipe-in-pipe-out.decrypted'"
echo "certs: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/groups"
os::cmd::expect_success_and_text 'oadm groups new shortoutputgroup -o name' 'group/shortoutputgroup'
os::cmd::expect_failure_and_text 'oadm groups new shortoutputgroup' 'groups.user.openshift.io "shortoutputgroup" already exists'
os::cmd::expect_failure_and_text 'oadm groups new errorgroup -o blah' 'error: output format "blah" not recognized'
os::cmd::expect_failure_and_text 'oc get groups/errorgroup' 'groups.user.openshift.io "errorgroup" not found'
os::cmd::expect_success_and_text 'oadm groups new group1 foo bar' 'group1.*foo, bar'
os::cmd::expect_success_and_text 'oc get groups/group1 --no-headers' 'foo, bar'
os::cmd::expect_success 'oadm groups add-users group1 baz'
os::cmd::expect_success_and_text 'oc get groups/group1 --no-headers' 'baz'
os::cmd::expect_success 'oadm groups remove-users group1 bar'
os::cmd::expect_success_and_not_text 'oc get groups/group1 --no-headers' 'bar'
echo "groups: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/admin-scc"
os::cmd::expect_success 'oadm policy who-can get pods'
os::cmd::expect_success 'oadm policy who-can get pods -n default'
os::cmd::expect_success 'oadm policy who-can get pods --all-namespaces'
# check to make sure that the resource arg conforms to resource rules
os::cmd::expect_success_and_text 'oadm policy who-can get Pod' "Resource: pods"
os::cmd::expect_success_and_text 'oadm policy who-can get PodASDF' "Resource: PodASDF"
os::cmd::expect_success_and_text 'oadm policy who-can get hpa.autoscaling -n default' "Resource: horizontalpodautoscalers.autoscaling"
os::cmd::expect_success_and_text 'oadm policy who-can get hpa.v1.autoscaling -n default' "Resource: horizontalpodautoscalers.autoscaling"
os::cmd::expect_success_and_text 'oadm policy who-can get hpa.extensions -n default' "Resource: horizontalpodautoscalers.extensions"
os::cmd::expect_success_and_text 'oadm policy who-can get hpa -n default' "Resource: horizontalpodautoscalers.autoscaling"
os::cmd::expect_success 'oadm policy add-role-to-group cluster-admin system:unauthenticated'
os::cmd::expect_success 'oadm policy add-role-to-user cluster-admin system:no-user'
os::cmd::expect_success 'oadm policy add-role-to-user admin -z fake-sa'
os::cmd::expect_success_and_text 'oc get rolebinding/admin -o jsonpath={.subjects}' 'fake-sa'
os::cmd::expect_success 'oadm policy remove-role-from-user admin -z fake-sa'
os::cmd::expect_success_and_not_text 'oc get rolebinding/admin -o jsonpath={.subjects}' 'fake-sa'
os::cmd::expect_success 'oadm policy add-role-to-user admin -z fake-sa'
os::cmd::expect_success_and_text 'oc get rolebinding/admin -o jsonpath={.subjects}' 'fake-sa'
os::cmd::expect_success "oadm policy remove-role-from-user admin system:serviceaccount:$(oc project -q):fake-sa"
os::cmd::expect_success_and_not_text 'oc get rolebinding/admin -o jsonpath={.subjects}' 'fake-sa'
os::cmd::expect_success 'oadm policy remove-role-from-group cluster-admin system:unauthenticated'
os::cmd::expect_success 'oadm policy remove-role-from-user cluster-admin system:no-user'
os::cmd::expect_success 'oadm policy remove-group system:unauthenticated'
os::cmd::expect_success 'oadm policy remove-user system:no-user'
os::cmd::expect_success 'oadm policy add-cluster-role-to-group cluster-admin system:unauthenticated'
os::cmd::expect_success 'oadm policy remove-cluster-role-from-group cluster-admin system:unauthenticated'
os::cmd::expect_success 'oadm policy add-cluster-role-to-user cluster-admin system:no-user'
os::cmd::expect_success 'oadm policy remove-cluster-role-from-user cluster-admin system:no-user'
os::cmd::expect_success 'oadm policy add-scc-to-user privileged fake-user'
os::cmd::expect_success_and_text 'oc get scc/privileged -o yaml' 'fake-user'
os::cmd::expect_success 'oadm policy add-scc-to-user privileged -z fake-sa'
os::cmd::expect_success_and_text 'oc get scc/privileged -o yaml' "system:serviceaccount:$(oc project -q):fake-sa"
os::cmd::expect_success 'oadm policy add-scc-to-group privileged fake-group'
os::cmd::expect_success_and_text 'oc get scc/privileged -o yaml' 'fake-group'
os::cmd::expect_success 'oadm policy remove-scc-from-user privileged fake-user'
os::cmd::expect_success_and_not_text 'oc get scc/privileged -o yaml' 'fake-user'
os::cmd::expect_success 'oadm policy remove-scc-from-user privileged -z fake-sa'
os::cmd::expect_success_and_not_text 'oc get scc/privileged -o yaml' "system:serviceaccount:$(oc project -q):fake-sa"
os::cmd::expect_success 'oadm policy remove-scc-from-group privileged fake-group'
os::cmd::expect_success_and_not_text 'oc get scc/privileged -o yaml' 'fake-group'
echo "admin-scc: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/reconcile-cluster-roles"
os::cmd::expect_success 'oc delete clusterrole/cluster-status --cascade=false'
os::cmd::expect_failure 'oc get clusterrole/cluster-status'
os::cmd::expect_success 'oadm policy reconcile-cluster-roles'
os::cmd::expect_failure 'oc get clusterrole/cluster-status'
os::cmd::expect_success 'oadm policy reconcile-cluster-roles --confirm --loglevel=8'
os::cmd::expect_success 'oc get clusterrole/cluster-status'
# check the reconcile again with a specific cluster role name
os::cmd::expect_success 'oc delete clusterrole/cluster-status --cascade=false'
os::cmd::expect_failure 'oc get clusterrole/cluster-status'
os::cmd::expect_success 'oadm policy reconcile-cluster-roles cluster-admin --confirm'
os::cmd::expect_failure 'oc get clusterrole/cluster-status'
os::cmd::expect_success 'oadm policy reconcile-cluster-roles clusterrole/cluster-status --confirm'
os::cmd::expect_success 'oc get clusterrole/cluster-status'
# test reconciliation protection by replacing the basic-user role with one that has missing default permissions, and extra non-default permissions
os::cmd::expect_success 'oc replace --force -f ./test/testdata/basic-user-with-groups-without-projectrequests.yaml'
# 1. mark the role as protected, and ensure the role is skipped by reconciliation
os::cmd::expect_success 'oc annotate clusterrole/basic-user openshift.io/reconcile-protect=true'
os::cmd::expect_success_and_text 'oadm policy reconcile-cluster-roles basic-user --additive-only=false --confirm' 'skipped: clusterrole/basic-user'
# 2. unmark the role as protected, and ensure reconcile expects to remove extra permissions, and put back removed permissions
os::cmd::expect_success 'oc annotate clusterrole/basic-user openshift.io/reconcile-protect=false --overwrite'
os::cmd::expect_success_and_text 'oc get clusterrole/basic-user -o jsonpath="{.rules[*].resources}"' 'groups'
os::cmd::expect_success_and_not_text 'oc get clusterrole/basic-user -o jsonpath="{.rules[*].resources}"' 'projectrequests'
os::cmd::expect_success_and_not_text 'oadm policy reconcile-cluster-roles basic-user -o jsonpath="{.items[*].rules[*].resources}" --additive-only=false' 'groups'
os::cmd::expect_success_and_text 'oadm policy reconcile-cluster-roles basic-user -o jsonpath="{.items[*].rules[*].resources}" --additive-only=false' 'projectrequests'
# reconcile updates the role
os::cmd::expect_success_and_text 'oadm policy reconcile-cluster-roles basic-user --additive-only=false --confirm' 'clusterrole/basic-user'
# a second reconcile doesn't need to update the role
os::cmd::expect_success_and_not_text 'oadm policy reconcile-cluster-roles basic-user --additive-only=false --confirm' 'clusterrole/basic-user'
# test label/annotation reconciliation by replacing the basic-user role with one that has custom labels, annotations, and permissions
os::cmd::expect_success 'oc replace --force -f ./test/testdata/basic-user-with-annotations-labels-groups-without-projectrequests.yaml'
# display shows customized labels/annotations
os::cmd::expect_success_and_text 'oadm policy reconcile-cluster-roles' 'custom-label'
os::cmd::expect_success_and_text 'oadm policy reconcile-cluster-roles' 'custom-annotation'
os::cmd::expect_success_and_text 'oadm policy reconcile-cluster-roles --additive-only --confirm' 'clusterrole/basic-user'
# reconcile preserves added rules, labels, and annotations
os::cmd::expect_success_and_text 'oc get clusterroles/basic-user -o json' 'custom-label'
os::cmd::expect_success_and_text 'oc get clusterroles/basic-user -o json' 'custom-annotation'
os::cmd::expect_success_and_text 'oc get clusterroles/basic-user -o json' 'groups'
os::cmd::expect_success 'oadm policy reconcile-cluster-roles --additive-only=false --confirm'
os::cmd::expect_success_and_not_text 'oc get clusterroles/basic-user -o yaml' 'groups'
echo "admin-reconcile-cluster-roles: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/reconcile-cluster-role-bindings"
# Ensure a removed binding gets re-added
os::cmd::expect_success 'oc delete clusterrolebinding/cluster-status-binding'
os::cmd::expect_failure 'oc get clusterrolebinding/cluster-status-binding'
os::cmd::expect_success 'oadm policy reconcile-cluster-role-bindings'
os::cmd::expect_failure 'oc get clusterrolebinding/cluster-status-binding'
os::cmd::expect_success 'oadm policy reconcile-cluster-role-bindings --confirm'
os::cmd::expect_success 'oc get clusterrolebinding/cluster-status-binding'
# Customize a binding
os::cmd::expect_success 'oc replace --force -f ./test/testdata/basic-users-binding.json'
# display shows customized labels/annotations
os::cmd::expect_success_and_text 'oadm policy reconcile-cluster-role-bindings' 'custom-label'
os::cmd::expect_success_and_text 'oadm policy reconcile-cluster-role-bindings' 'custom-annotation'
os::cmd::expect_success 'oadm policy reconcile-cluster-role-bindings --confirm'
# Ensure a customized binding's subjects, labels, annotations are retained by default
os::cmd::expect_success_and_text 'oc get clusterrolebindings/basic-users -o json' 'custom-label'
os::cmd::expect_success_and_text 'oc get clusterrolebindings/basic-users -o json' 'custom-annotation'
os::cmd::expect_success_and_text 'oc get clusterrolebindings/basic-users -o json' 'custom-user'
# Ensure a customized binding's roleref is corrected
os::cmd::expect_success_and_not_text 'oc get clusterrolebindings/basic-users -o json' 'cluster-status'
# Ensure --additive-only=false removes customized users from the binding
os::cmd::expect_success 'oadm policy reconcile-cluster-role-bindings --additive-only=false --confirm'
os::cmd::expect_success_and_not_text 'oc get clusterrolebindings/basic-users -o json' 'custom-user'
# check the reconcile again with a specific cluster role name
os::cmd::expect_success 'oc delete clusterrolebinding/basic-users'
os::cmd::expect_failure 'oc get clusterrolebinding/basic-users'
os::cmd::expect_success 'oadm policy reconcile-cluster-role-bindings cluster-admin --confirm'
os::cmd::expect_failure 'oc get clusterrolebinding/basic-users'
os::cmd::expect_success 'oadm policy reconcile-cluster-role-bindings basic-user --confirm'
os::cmd::expect_success 'oc get clusterrolebinding/basic-users'
echo "admin-reconcile-cluster-role-bindings: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/role-reapers"
os::cmd::expect_success "oc process -f test/extended/testdata/roles/policy-roles.yaml -v NAMESPACE='${project}' | oc create -f -"
os::cmd::expect_success "oc get rolebinding/basic-users"
os::cmd::expect_success "oc delete role/basic-user"
os::cmd::expect_failure "oc get rolebinding/basic-users"
os::cmd::expect_success "oc create -f test/extended/testdata/roles/policy-clusterroles.yaml"
os::cmd::expect_success "oc get clusterrolebinding/basic-users2"
os::cmd::expect_success "oc delete clusterrole/basic-user2"
os::cmd::expect_failure "oc get clusterrolebinding/basic-users2"
os::cmd::expect_success "oc policy add-role-to-user edit foo"
os::cmd::expect_success "oc get rolebinding/edit"
os::cmd::expect_success "oc delete clusterrole/edit"
os::cmd::expect_failure "oc get rolebinding/edit"
os::cmd::expect_success "oadm policy reconcile-cluster-roles --confirm"
os::cmd::expect_success "oadm policy reconcile-cluster-role-bindings --confirm"
echo "admin-role-reapers: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/role-selectors"
os::cmd::expect_success "oc create -f test/extended/testdata/roles/policy-clusterroles.yaml"
os::cmd::expect_success "oc get clusterrole/basic-user2"
os::cmd::expect_success "oc label clusterrole/basic-user2 foo=bar"
os::cmd::expect_success_and_not_text "oc get clusterroles --selector=foo=bar" "No resources found"
os::cmd::expect_success_and_text "oc get clusterroles --selector=foo=unknown" "No resources found"
os::cmd::expect_success "oc get clusterrolebinding/basic-users2"
os::cmd::expect_success "oc label clusterrolebinding/basic-users2 foo=bar"
os::cmd::expect_success_and_not_text "oc get clusterrolebindings --selector=foo=bar" "No resources found"
os::cmd::expect_success_and_text "oc get clusterroles --selector=foo=unknown" "No resources found"
os::cmd::expect_success "oc delete clusterrole/basic-user2"
os::test::junit::declare_suite_end
echo "admin-role-selectors: ok"
os::test::junit::declare_suite_start "cmd/admin/ui-project-commands"
# Test the commands the UI projects page tells users to run
# These should match what is described in projects.html
os::cmd::expect_success 'oadm new-project ui-test-project --admin="createuser"'
os::cmd::expect_success 'oadm policy add-role-to-user admin adduser -n ui-test-project'
# Make sure project can be listed by oc (after auth cache syncs)
os::cmd::try_until_text 'oc get projects' 'ui\-test\-project'
# Make sure users got added
os::cmd::expect_success_and_text "oc describe policybinding ':default' -n ui-test-project" 'createuser'
os::cmd::expect_success_and_text "oc describe policybinding ':default' -n ui-test-project" 'adduser'
echo "ui-project-commands: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/new-project"
# Test deleting and recreating a project
os::cmd::expect_success 'oadm new-project recreated-project --admin="createuser1"'
os::cmd::expect_success 'oc delete project recreated-project'
os::cmd::try_until_failure 'oc get project recreated-project'
os::cmd::expect_success 'oadm new-project recreated-project --admin="createuser2"'
os::cmd::expect_success_and_text "oc describe policybinding ':default' -n recreated-project" 'createuser2'
echo "new-project: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/router"
# Test running a router
os::cmd::expect_failure_and_text 'oadm router --dry-run' 'does not exist'
os::cmd::expect_success "oadm policy add-scc-to-user privileged system:serviceaccount:default:router"
os::cmd::expect_success_and_text "oadm router -o yaml --service-account=router -n default" 'image:.*\-haproxy\-router:'
os::cmd::expect_success "oadm router --images='${USE_IMAGES}' --service-account=router -n default"
os::cmd::expect_success_and_text 'oadm router -n default' 'service exists'
os::cmd::expect_success_and_text 'oc get dc/router -o yaml -n default' 'readinessProbe'
echo "router: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/registry"
# Test running a registry as a daemonset
os::cmd::expect_success "oc delete clusterrolebinding/registry-registry-role"
os::cmd::expect_failure_and_text 'oadm registry --daemonset --dry-run' 'does not exist'
os::cmd::expect_success_and_text "oadm registry --daemonset -o yaml" 'DaemonSet'
os::cmd::expect_success "oadm registry --daemonset --images='${USE_IMAGES}'"
os::cmd::expect_success_and_text 'oadm registry --daemonset' 'service exists'
os::cmd::try_until_text 'oc get ds/docker-registry --template="{{.status.desiredNumberScheduled}}"' '1'
# clean up so we can test non-daemonset
os::cmd::expect_success "oadm registry --daemonset -o yaml | oc delete -f -"
echo "registry daemonset: ok"
# Test running a registry
os::cmd::expect_failure_and_text 'oadm registry --dry-run' 'does not exist'
os::cmd::expect_success_and_text "oadm registry -o yaml" 'image:.*\-docker\-registry'
os::cmd::expect_success "oadm registry --images='${USE_IMAGES}'"
os::cmd::expect_success_and_text 'oadm registry' 'service exists'
os::cmd::expect_success_and_text 'oc describe svc/docker-registry' 'Session Affinity:\s*ClientIP'
os::cmd::expect_success_and_text 'oc get dc/docker-registry -o yaml' 'readinessProbe'
os::cmd::expect_success_and_text 'oc env --list dc/docker-registry' 'REGISTRY_MIDDLEWARE_REPOSITORY_OPENSHIFT_ENFORCEQUOTA=false'
echo "registry: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/apply"
workingdir=$(mktemp -d)
os::cmd::expect_success "oadm registry -o yaml > ${workingdir}/oadm_registry.yaml"
os::util::sed "s/5000/6000/g" ${workingdir}/oadm_registry.yaml
os::cmd::expect_success "oc apply -f ${workingdir}/oadm_registry.yaml"
os::cmd::expect_success_and_text 'oc get dc/docker-registry -o yaml' '6000'
echo "apply: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/build-chain"
# Test building a dependency tree
os::cmd::expect_success 'oc process -f examples/sample-app/application-template-stibuild.json -l build=sti | oc create -f -'
# Test both the type/name resource syntax and the fact that istag/origin-ruby-sample:latest is still
# not created but due to a buildConfig pointing to it, we get back its graph of deps.
os::cmd::expect_success_and_text 'oadm build-chain istag/origin-ruby-sample' 'istag/origin-ruby-sample:latest'
os::cmd::expect_success_and_text 'oadm build-chain ruby-22-centos7 -o dot' 'digraph "ruby-22-centos7:latest"'
os::cmd::expect_success 'oc delete all -l build=sti'
echo "ex build-chain: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/complex-scenarios"
# Make sure no one commits data with allocated values that could flake
os::cmd::expect_failure 'grep -r "clusterIP.*172" test/testdata/app-scenarios'
os::cmd::expect_success 'oadm new-project example --admin="createuser"'
os::cmd::expect_success 'oc project example'
os::cmd::try_until_success 'oc get serviceaccount default'
os::cmd::expect_success 'oc create -f test/testdata/app-scenarios'
os::cmd::expect_success 'oc status'
os::cmd::expect_success_and_text 'oc status -o dot' '"example"'
echo "complex-scenarios: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/reconcile-security-context-constraints"
# Test reconciling SCCs
os::cmd::expect_success 'oc delete scc/restricted'
os::cmd::expect_failure 'oc get scc/restricted'
os::cmd::expect_success 'oadm policy reconcile-sccs'
os::cmd::expect_failure 'oc get scc/restricted'
os::cmd::expect_success 'oadm policy reconcile-sccs --confirm'
os::cmd::expect_success 'oc get scc/restricted'
os::cmd::expect_success 'oadm policy add-scc-to-user restricted my-restricted-user'
os::cmd::expect_success_and_text 'oc get scc/restricted -o yaml' 'my-restricted-user'
os::cmd::expect_success 'oadm policy reconcile-sccs --confirm'
os::cmd::expect_success_and_text 'oc get scc/restricted -o yaml' 'my-restricted-user'
os::cmd::expect_success 'oadm policy remove-scc-from-group restricted system:authenticated'
os::cmd::expect_success_and_not_text 'oc get scc/restricted -o yaml' 'system:authenticated'
os::cmd::expect_success 'oadm policy reconcile-sccs --confirm'
os::cmd::expect_success_and_text 'oc get scc/restricted -o yaml' 'system:authenticated'
os::cmd::expect_success 'oc label scc/restricted foo=bar'
os::cmd::expect_success_and_text 'oc get scc/restricted -o yaml' 'foo: bar'
os::cmd::expect_success 'oadm policy reconcile-sccs --confirm --additive-only=true'
os::cmd::expect_success_and_text 'oc get scc/restricted -o yaml' 'foo: bar'
os::cmd::expect_success 'oadm policy reconcile-sccs --confirm --additive-only=false'
os::cmd::expect_success_and_not_text 'oc get scc/restricted -o yaml' 'foo: bar'
os::cmd::expect_success 'oc annotate scc/restricted topic="my-foo-bar"'
os::cmd::expect_success_and_text 'oc get scc/restricted -o yaml' 'topic: my-foo-bar'
os::cmd::expect_success 'oadm policy reconcile-sccs --confirm --additive-only=true'
os::cmd::expect_success_and_text 'oc get scc/restricted -o yaml' 'topic: my-foo-bar'
os::cmd::expect_success 'oadm policy reconcile-sccs --confirm --additive-only=false'
os::cmd::expect_success_and_not_text 'oc get scc/restricted -o yaml' 'topic: my-foo-bar'
echo "reconcile-scc: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/policybinding-required"
# Admin can't bind local roles without cluster-admin permissions
os::cmd::expect_success "oc create -f test/extended/testdata/roles/empty-role.yaml -n '${project}'"
os::cmd::expect_success "oc delete 'policybinding/${project}:default' -n '${project}'"
os::cmd::expect_success 'oadm policy add-role-to-user admin local-admin -n '${project}''
os::cmd::try_until_text "oc policy who-can get policybindings -n '${project}'" "local-admin"
os::cmd::expect_success 'oc login -u local-admin -p pw'
os::cmd::expect_failure 'oc policy add-role-to-user empty-role other --role-namespace='${project}''
os::cmd::expect_success 'oc login -u system:admin'
os::cmd::expect_success "oc create policybinding '${project}' -n '${project}'"
os::cmd::expect_success 'oc login -u local-admin -p pw'
os::cmd::expect_success 'oc policy add-role-to-user empty-role other --role-namespace='${project}' -n '${project}''
os::cmd::expect_success 'oc login -u system:admin'
os::cmd::expect_success "oc delete role/empty-role -n '${project}'"
echo "policybinding-required: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/user-group-cascade"
# Create test users/identities and groups
os::cmd::expect_success 'oc login -u cascaded-user -p pw'
os::cmd::expect_success 'oc login -u orphaned-user -p pw'
os::cmd::expect_success 'oc login -u system:admin'
os::cmd::expect_success_and_text 'oadm groups new cascaded-group cascaded-user orphaned-user' 'cascaded-group.*cascaded-user, orphaned-user'
os::cmd::expect_success_and_text 'oadm groups new orphaned-group cascaded-user orphaned-user' 'orphaned-group.*cascaded-user, orphaned-user'
# Add roles, sccs to users/groups
os::cmd::expect_success 'oadm policy add-scc-to-user restricted cascaded-user orphaned-user'
os::cmd::expect_success 'oadm policy add-scc-to-group restricted cascaded-group orphaned-group'
os::cmd::expect_success 'oadm policy add-role-to-user cluster-admin cascaded-user orphaned-user -n default'
os::cmd::expect_success 'oadm policy add-role-to-group cluster-admin cascaded-group orphaned-group -n default'
os::cmd::expect_success 'oadm policy add-cluster-role-to-user cluster-admin cascaded-user orphaned-user'
os::cmd::expect_success 'oadm policy add-cluster-role-to-group cluster-admin cascaded-group orphaned-group'
# Delete users
os::cmd::expect_success 'oc delete user cascaded-user'
os::cmd::expect_success 'oc delete user orphaned-user --cascade=false'
# Verify all identities remain
os::cmd::expect_success 'oc get identities/anypassword:cascaded-user'
os::cmd::expect_success 'oc get identities/anypassword:orphaned-user'
# Verify orphaned user references are left
os::cmd::expect_success_and_text "oc get clusterrolebindings/cluster-admins --output-version=v1 --template='{{.subjects}}'" 'orphaned-user'
os::cmd::expect_success_and_text "oc get rolebindings/cluster-admin --output-version=v1 --template='{{.subjects}}' -n default" 'orphaned-user'
os::cmd::expect_success_and_text "oc get scc/restricted --output-version=v1 --template='{{.users}}'" 'orphaned-user'
os::cmd::expect_success_and_text "oc get group/cascaded-group --output-version=v1 --template='{{.users}}'" 'orphaned-user'
# Verify cascaded user references are removed
os::cmd::expect_success_and_not_text "oc get clusterrolebindings/cluster-admins --output-version=v1 --template='{{.subjects}}'" 'cascaded-user'
os::cmd::expect_success_and_not_text "oc get rolebindings/cluster-admin --output-version=v1 --template='{{.subjects}}' -n default" 'cascaded-user'
os::cmd::expect_success_and_not_text "oc get scc/restricted --output-version=v1 --template='{{.users}}'" 'cascaded-user'
os::cmd::expect_success_and_not_text "oc get group/cascaded-group --output-version=v1 --template='{{.users}}'" 'cascaded-user'
# Delete groups
os::cmd::expect_success 'oc delete group cascaded-group'
os::cmd::expect_success 'oc delete group orphaned-group --cascade=false'
# Verify orphaned group references are left
os::cmd::expect_success_and_text "oc get clusterrolebindings/cluster-admins --output-version=v1 --template='{{.subjects}}'" 'orphaned-group'
os::cmd::expect_success_and_text "oc get rolebindings/cluster-admin --output-version=v1 --template='{{.subjects}}' -n default" 'orphaned-group'
os::cmd::expect_success_and_text "oc get scc/restricted --output-version=v1 --template='{{.groups}}'" 'orphaned-group'
# Verify cascaded group references are removed
os::cmd::expect_success_and_not_text "oc get clusterrolebindings/cluster-admins --output-version=v1 --template='{{.subjects}}'" 'cascaded-group'
os::cmd::expect_success_and_not_text "oc get rolebindings/cluster-admin --output-version=v1 --template='{{.subjects}}' -n default" 'cascaded-group'
os::cmd::expect_success_and_not_text "oc get scc/restricted --output-version=v1 --template='{{.groups}}'" 'cascaded-group'
echo "user-group-cascade: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_start "cmd/admin/serviceaccounts"
# create a new service account
os::cmd::expect_success_and_text 'oc create serviceaccount my-sa-name' 'serviceaccount "my-sa-name" created'
os::cmd::expect_success 'oc get sa my-sa-name'
# extract token and ensure it links us back to the service account
os::cmd::expect_success_and_text 'oc get user/~ --token="$( oc sa get-token my-sa-name )"' 'system:serviceaccount:.+:my-sa-name'
# add a new token and ensure it links us back to the service account
os::cmd::expect_success_and_text 'oc get user/~ --token="$( oc sa new-token my-sa-name )"' 'system:serviceaccount:.+:my-sa-name'
# add a new labeled token and ensure the label stuck
os::cmd::expect_success 'oc sa new-token my-sa-name --labels="mykey=myvalue,myotherkey=myothervalue"'
os::cmd::expect_success_and_text 'oc get secrets --selector="mykey=myvalue"' 'my-sa-name'
os::cmd::expect_success_and_text 'oc get secrets --selector="myotherkey=myothervalue"' 'my-sa-name'
os::cmd::expect_success_and_text 'oc get secrets --selector="mykey=myvalue,myotherkey=myothervalue"' 'my-sa-name'
echo "serviceacounts: ok"
os::test::junit::declare_suite_end
# user creation
os::test::junit::declare_suite_start "cmd/admin/user-creation"
os::cmd::expect_success 'oc create user test-cmd-user'
os::cmd::expect_success 'oc create identity test-idp:test-uid'
os::cmd::expect_success 'oc create useridentitymapping test-idp:test-uid test-cmd-user'
os::cmd::expect_success_and_text 'oc describe identity test-idp:test-uid' 'test-cmd-user'
os::cmd::expect_success_and_text 'oc describe user test-cmd-user' 'test-idp:test-uid'
os::test::junit::declare_suite_end
# images
os::test::junit::declare_suite_start "cmd/admin/images"
# import image and check its information
os::cmd::expect_success "oc create -f ${OS_ROOT}/test/testdata/stable-busybox.yaml"
os::cmd::expect_success_and_text "oadm top images" "sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6\W+default/busybox \(latest\)\W+<none>\W+<none>\W+yes\W+0\.65MiB"
os::cmd::expect_success_and_text "oadm top imagestreams" "default/busybox\W+0.65MiB\W+1\W+1"
os::cmd::expect_success "oc delete is/busybox -n default"
# log in as an image-pruner and test that oadm prune images works against the atomic binary
os::cmd::expect_success "oadm policy add-cluster-role-to-user system:image-pruner pruner --config='${MASTER_CONFIG_DIR}/admin.kubeconfig'"
os::cmd::expect_success "oc login --server=${KUBERNETES_MASTER} --certificate-authority='${MASTER_CONFIG_DIR}/ca.crt' -u pruner -p anything"
os::cmd::expect_success_and_text "oadm prune images" "Dry run enabled - no modifications will be made. Add --confirm to remove images"
echo "images: ok"
os::test::junit::declare_suite_end
os::test::junit::declare_suite_end
|
tdawson/origin
|
test/cmd/admin.sh
|
Shell
|
apache-2.0
| 35,549 |
#!/bin/sh -xe
# Simple integration test. Make sure to activate virtualenv beforehand
# (source venv/bin/activate) and that you are running Boulder test
# instance (see ./boulder-start.sh).
#
# Environment variables:
# SERVER: Passed as "letsencrypt --server" argument. Boulder
# monolithic defaults to :4000, AMQP defaults to :4300. This
# script defaults to monolithic.
#
# Note: this script is called by Boulder integration test suite!
. ./tests/integration/_common.sh
export PATH="/usr/sbin:$PATH" # /usr/sbin/nginx
common() {
letsencrypt_test \
--authenticator standalone \
--installer null \
"$@"
}
common --domains le1.wtf auth
common --domains le2.wtf run
export CSR_PATH="${root}/csr.der" KEY_PATH="${root}/key.pem" \
OPENSSL_CNF=examples/openssl.cnf
./examples/generate-csr.sh le3.wtf
common auth --csr "$CSR_PATH" \
--cert-path "${root}/csr/cert.pem" \
--chain-path "${root}/csr/chain.pem"
openssl x509 -in "${root}/csr/0000_cert.pem" -text
openssl x509 -in "${root}/csr/0000_chain.pem" -text
common --domain le3.wtf install \
--cert-path "${root}/csr/cert.pem" \
--key-path "${root}/csr/key.pem"
# the following assumes that Boulder issues certificates for less than
# 10 years, otherwise renewal will not take place
cat <<EOF > "$root/conf/renewer.conf"
renew_before_expiry = 10 years
deploy_before_expiry = 10 years
EOF
letsencrypt-renewer $store_flags
dir="$root/conf/archive/le1.wtf"
for x in cert chain fullchain privkey;
do
latest="$(ls -1t $dir/ | grep -e "^${x}" | head -n1)"
live="$(readlink -f "$root/conf/live/le1.wtf/${x}.pem")"
[ "${dir}/${latest}" = "$live" ] # renewer fails this test
done
if type nginx;
then
. ./letsencrypt-nginx/tests/boulder-integration.sh
fi
|
PeterMosmans/letsencrypt
|
tests/boulder-integration.sh
|
Shell
|
apache-2.0
| 1,802 |
#!/usr/bin/env bash
# This script sets up a go workspace locally and builds all go components.
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
function cleanup() {
return_code=$?
os::util::describe_return_code "${return_code}"
exit "${return_code}"
}
trap "cleanup" EXIT
platform="$(os::build::host_platform)"
build_targets=("$@")
if [[ -z "$@" ]]; then
if [[ "${platform}" == linux/* ]]; then
build_targets=(vendor/k8s.io/kubernetes/cmd/hyperkube)
else
build_targets=(vendor/k8s.io/kubernetes/cmd/hyperkube)
fi
fi
OS_BUILD_PLATFORMS=("${OS_BUILD_PLATFORMS[@]:-${platform}}")
os::build::build_binaries "${build_targets[@]}"
os::build::place_bins "${build_targets[@]}"
os::build::make_openshift_binary_symlinks
|
sdminonne/origin
|
hack/build-go.sh
|
Shell
|
apache-2.0
| 733 |
#!/bin/bash
# FILE=01
# NODE=Act_1
#
# FILE=02
# NODE=5
#
# FILE=03
# NODE=A
LENGTH=$1
FILE=$2
NODE=$3
perl -Ilib scripts/find.fixed.length.paths.pl \
-allow_cycles 0 \
-input_file data/fixed.length.paths.in.$FILE.gv \
-output_file out/fixed.length.paths.out.$FILE.gv \
-max notice \
-path_length $LENGTH \
-report_paths 1 \
-start_node $NODE
dot -Tsvg data/fixed.length.paths.in.$FILE.gv > html/fixed.length.paths.in.$FILE.svg
dot -Tsvg out/fixed.length.paths.out.$FILE.gv > html/fixed.length.paths.out.$FILE.svg
# $DR is my web server's doc root.
PM=Perl-modules/html/graphviz2.marpa.pathutils
cp html/fixed.length.paths.* $DR/$PM
|
ronsavage/GraphViz2-Marpa-PathUtils
|
scripts/find.fixed.length.paths.sh
|
Shell
|
artistic-2.0
| 647 |
#!/bin/bash
cd `dirname $0`
. ./script_root.sh
. ./addhistory.sh
addhistory $0 "$@"
# NOTICE: cirrus uses this script too.
# mount EFI-SYSLINUX partition
root=`rootdev`
#root=/dev/sda3
syslinux=`echo ${root}|sed -e 's/\(3\|5\)/12/g'`
echo mount ${syslinux} | tee /dev/tty1 | logger -t myscript
mkdir /tmp/mnt
mount ${syslinux} /tmp/mnt
if [ 0 -ne $? ]; then
exit 1
fi
cd /tmp/mnt
# rewrite kernel parameter
cfgdirlist=(/boot /tmp/mnt)
for cfgdir in ${cfgdirlist[@]}; do
cfglist=(root.A.cfg root.B.cfg usb.A.cfg)
cd ${cfgdir}
for cfg in ${cfglist[@]}; do
content=`cat syslinux/${cfg}`
content_new=`echo "${content}" | sed -e "s/i915.modeset=0/i915.modeset=1/g" -e "s/radeon.modeset=1/radeon.modeset=0/g" -e "s/nouveau.modeset=1//g"`
if [ "${content}" != "${content_new}" ]; then
echo modify ${cfg} | tee /dev/tty1 | logger -t myscript
echo "${content_new}" > syslinux/${cfg}
fi
done
done
#umount EFI-SYSLINUX
cd ${script_root}
umount /tmp/mnt
echo umount ${syslinux} | tee /dev/tty1 | logger -t myscript
# disable vesa
${script_root}/disable_vesa.sh nohistory
|
crosbuilder/CustomBuilds
|
script_dev/i915_modeset.sh
|
Shell
|
bsd-3-clause
| 1,105 |
#!/bin/bash
pip install numpy
pip install scipy
pip install Cython
pip install numexpr
pip install tables
export CFLAGS=-I/usr/include/gdal
pip install GDAL==1.8.1
export CFLAGS=
pip install -r ga.renci.org.txt
|
hydroshare/hydroshare2
|
requirements/install.sh
|
Shell
|
bsd-3-clause
| 212 |
#!/bin/bash
function shapeupdatetotemplate {
# local declaration of values
dim=${DIM}
template=${TEMPLATE}
templatename=${TEMPLATENAME}
outputname=${OUTPUTNAME}
gradientstep=-${GRADIENTSTEP}
# debug only
# echo $dim
# echo ${template}
# echo ${templatename}
# echo ${outputname}
# echo ${outputname}*formed.nii*
# echo ${gradientstep}
# We find the average warp to the template and apply its inverse to the template image
# This keeps the template shape stable over multiple iterations of template building
echo
echo "--------------------------------------------------------------------------------------"
echo " shapeupdatetotemplate 1"
echo "--------------------------------------------------------------------------------------"
${ANTSPATH}AverageImages $dim ${template} 1 ${outputname}*formed.nii.gz
echo
echo "--------------------------------------------------------------------------------------"
echo " shapeupdatetotemplate 2"
echo "--------------------------------------------------------------------------------------"
${ANTSPATH}AverageImages $dim ${templatename}warp.nii.gz 0 `ls ${outputname}*Warp.nii.gz | grep -v "InverseWarp"`
echo
echo "--------------------------------------------------------------------------------------"
echo " shapeupdatetotemplate 3"
echo "--------------------------------------------------------------------------------------"
${ANTSPATH}MultiplyImages $dim ${templatename}warp.nii.gz ${gradientstep} ${templatename}warp.nii.gz
echo
echo "--------------------------------------------------------------------------------------"
echo " shapeupdatetotemplate 4"
echo "--------------------------------------------------------------------------------------"
rm -f ${templatename}Affine.txt
echo
echo "--------------------------------------------------------------------------------------"
echo " shapeupdatetotemplate 5"
echo "--------------------------------------------------------------------------------------"
# Averaging and inversion code --- both are 1st order estimates.
if [ ${dim} -eq 2 ] ; then
ANTSAverage2DAffine ${templatename}Affine.txt ${outputname}*Affine.txt
elif [ ${dim} -eq 3 ] ; then
ANTSAverage3DAffine ${templatename}Affine.txt ${outputname}*Affine.txt
fi
${ANTSPATH}WarpImageMultiTransform ${dim} ${templatename}warp.nii.gz ${templatename}warp.nii.gz -i ${templatename}Affine.txt -R ${template}
${ANTSPATH}WarpImageMultiTransform ${dim} ${template} ${template} -i ${templatename}Affine.txt ${templatename}warp.nii.gz ${templatename}warp.nii.gz ${templatename}warp.nii.gz ${templatename}warp.nii.gz -R ${template}
echo
echo "--------------------------------------------------------------------------------------"
echo " shapeupdatetotemplate 6"
${ANTSPATH}MeasureMinMaxMean ${dim} ${templatename}warp.nii.gz ${templatename}warplog.txt 1
}
function ANTSAverage2DAffine {
OUTNM=${templatename}Affine.txt
FLIST=${outputname}*Affine.txt
NFILES=0
PARAM1=0
PARAM2=0
PARAM3=0
PARAM4=0
PARAM5=0
PARAM6=0
PARAM7=0
PARAM8=0
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 2 `
for x in $LL ; do PARAM1=` awk -v a=$PARAM1 -v b=$x 'BEGIN{print (a + b)}' ` ; let NFILES=$NFILES+1 ; done
PARAM1=` awk -v a=$PARAM1 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 3 `
for x in $LL ; do PARAM2=` awk -v a=$PARAM2 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM2=` awk -v a=$PARAM2 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 4 `
for x in $LL ; do PARAM3=` awk -v a=$PARAM3 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM3=` awk -v a=$PARAM3 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 5 `
for x in $LL ; do PARAM4=` awk -v a=$PARAM4 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM4=` awk -v a=$PARAM4 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 6 `
for x in $LL ; do PARAM5=` awk -v a=$PARAM5 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM5=0 # ` awk -v a=$PARAM5 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 7 `
for x in $LL ; do PARAM6=` awk -v a=$PARAM6 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM6=0 # ` awk -v a=$PARAM6 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 2 `
for x in $LL ; do PARAM7=` awk -v a=$PARAM7 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM7=` awk -v a=$PARAM7 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 3 `
for x in $LL ; do PARAM8=` awk -v a=$PARAM8 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM8=` awk -v a=$PARAM8 -v b=$NFILES 'BEGIN{print (a / b)}' `
echo "# Insight Transform File V1.0 " > $OUTNM
echo "# Transform 0 " >> $OUTNM
echo "Transform: MatrixOffsetTransformBase_double_2_2 " >> $OUTNM
echo "Parameters: $PARAM1 $PARAM2 $PARAM3 $PARAM4 $PARAM5 $PARAM6 " >> $OUTNM
echo "FixedParameters: $PARAM7 $PARAM8 " >> $OUTNM
}
function ANTSAverage3DAffine {
OUTNM=${templatename}Affine.txt
FLIST=${outputname}*Affine.txt
NFILES=0
PARAM1=0
PARAM2=0
PARAM3=0
PARAM4=0
PARAM5=0
PARAM6=0
PARAM7=0
PARAM8=0
PARAM9=0
PARAM10=0
PARAM11=0
PARAM12=0
PARAM13=0
PARAM14=0
PARAM15=0
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 2 `
for x in $LL ; do PARAM1=` awk -v a=$PARAM1 -v b=$x 'BEGIN{print (a + b)}' ` ; let NFILES=$NFILES+1 ; done
PARAM1=` awk -v a=$PARAM1 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 3 `
for x in $LL ; do PARAM2=` awk -v a=$PARAM2 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM2=` awk -v a=$PARAM2 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 4 `
for x in $LL ; do PARAM3=` awk -v a=$PARAM3 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM3=` awk -v a=$PARAM3 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 5 `
for x in $LL ; do PARAM4=` awk -v a=$PARAM4 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM4=` awk -v a=$PARAM4 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 6 `
for x in $LL ; do PARAM5=` awk -v a=$PARAM5 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM5=` awk -v a=$PARAM5 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 7 `
for x in $LL ; do PARAM6=` awk -v a=$PARAM6 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM6=` awk -v a=$PARAM6 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 8 `
for x in $LL ; do PARAM7=` awk -v a=$PARAM7 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM7=` awk -v a=$PARAM7 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 9 `
for x in $LL ; do PARAM8=` awk -v a=$PARAM8 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM8=` awk -v a=$PARAM8 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 10 `
for x in $LL ; do PARAM9=` awk -v a=$PARAM9 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM9=` awk -v a=$PARAM9 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 11 `
for x in $LL ; do PARAM10=` awk -v a=$PARAM10 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM10=0 # ` awk -v a=$PARAM10 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 12 `
for x in $LL ; do PARAM11=` awk -v a=$PARAM11 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM11=0 # ` awk -v a=$PARAM11 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` head -n 4 $FLIST | grep Paramet | cut -d ' ' -f 13 `
for x in $LL ; do PARAM12=` awk -v a=$PARAM12 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM12=0 # ` awk -v a=$PARAM12 -v b=$NFILES 'BEGIN{print (a / b)}' `
# origin params below
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 2 `
for x in $LL ; do PARAM13=` awk -v a=$PARAM13 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM13=` awk -v a=$PARAM13 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 3 `
for x in $LL ; do PARAM14=` awk -v a=$PARAM14 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM14=` awk -v a=$PARAM14 -v b=$NFILES 'BEGIN{print (a / b)}' `
LL=` cat $FLIST | grep FixedParamet | cut -d ' ' -f 4 `
for x in $LL ; do PARAM15=` awk -v a=$PARAM15 -v b=$x 'BEGIN{print (a + b)}' ` ; done
PARAM15=` awk -v a=$PARAM15 -v b=$NFILES 'BEGIN{print (a / b)}' `
echo "# Insight Transform File V1.0 " > $OUTNM
echo "# Transform 0 " >> $OUTNM
echo "Transform: MatrixOffsetTransformBase_double_3_3 " >> $OUTNM
echo "Parameters: $PARAM1 $PARAM2 $PARAM3 $PARAM4 $PARAM5 $PARAM6 $PARAM7 $PARAM8 $PARAM9 $PARAM10 $PARAM11 $PARAM12 " >> $OUTNM
echo "FixedParameters: $PARAM13 $PARAM14 $PARAM15 " >> $OUTNM
}
DIM=3
TEMPLATE=t2template.nii.gz
TEMPLATENAME=t2template
OUTPUTNAME=t2
GRADIENTSTEP=0.1
shapeupdatetotemplate ${DIM} ${TEMPLATE} ${TEMPLATENAME} ${OUTPUTNAME} ${GRADIENTSTEP}
|
bkandel/AtroposMin
|
Scripts/shapeupdatetotemplate.sh
|
Shell
|
bsd-3-clause
| 9,606 |
ghc -odir ../../manualDist/ -hidir ../../manualDist/ -o ../../manualDist/bin/CurveProject.exe -O2 --make Main.hs -package-db ../../.cabal-sandbox/i386-windows-ghc-7.6.3-packages.conf.d/ ; cp ../css/*.css ../../manualDist/bin/css
|
ocramz/CurveProject
|
src/manualCompilation.sh
|
Shell
|
bsd-3-clause
| 229 |
#!/usr/bin/env bash
__scriptpath=$(cd "$(dirname "$0")"; pwd -P)
__init_tools_log=$__scriptpath/init-tools.log
__PACKAGES_DIR=$__scriptpath/packages
__TOOLRUNTIME_DIR=$__scriptpath/Tools
__DOTNET_PATH=$__TOOLRUNTIME_DIR/dotnetcli
__DOTNET_CMD=$__DOTNET_PATH/dotnet
if [ -z "$__BUILDTOOLS_SOURCE" ]; then __BUILDTOOLS_SOURCE=https://dotnet.myget.org/F/dotnet-buildtools/api/v3/index.json; fi
export __BUILDTOOLS_USE_CSPROJ=true
__BUILD_TOOLS_PACKAGE_VERSION=$(cat $__scriptpath/BuildToolsVersion.txt)
__DOTNET_TOOLS_VERSION=$(cat $__scriptpath/DotnetCLIVersion.txt)
__BUILD_TOOLS_PATH=$__PACKAGES_DIR/microsoft.dotnet.buildtools/$__BUILD_TOOLS_PACKAGE_VERSION/lib
__INIT_TOOLS_RESTORE_PROJECT=$__scriptpath/init-tools.msbuild
__INIT_TOOLS_DONE_MARKER_DIR=$__TOOLRUNTIME_DIR/$__BUILD_TOOLS_PACKAGE_VERSION
__INIT_TOOLS_DONE_MARKER=$__INIT_TOOLS_DONE_MARKER_DIR/done
if [ -z "$__DOTNET_PKG" ]; then
if [ "$(uname -m | grep "i[3456]86")" = "i686" ]; then
echo "Warning: build not supported on 32 bit Unix"
fi
OSName=$(uname -s)
case $OSName in
Darwin)
OS=OSX
__DOTNET_PKG=dotnet-sdk-${__DOTNET_TOOLS_VERSION}-osx-x64
ulimit -n 2048
# Format x.y.z as single integer with three digits for each part
VERSION=`sw_vers -productVersion| sed -e 's/\./ /g' | xargs printf "%03d%03d%03d"`
if [ "$VERSION" -lt 010012000 ]; then
echo error: macOS version `sw_vers -productVersion` is too old. 10.12 is needed as minimum.
exit 1
fi
;;
Linux)
__DOTNET_PKG=dotnet-sdk-${__DOTNET_TOOLS_VERSION}-linux-x64
OS=Linux
;;
*)
echo "Unsupported OS '$OSName' detected. Downloading linux-x64 tools."
OS=Linux
__DOTNET_PKG=dotnet-sdk-${__DOTNET_TOOLS_VERSION}-linux-x64
;;
esac
fi
if [ ! -e $__INIT_TOOLS_DONE_MARKER ]; then
__PATCH_CLI_NUGET_FRAMEWORKS=0
if [ -e $__TOOLRUNTIME_DIR ]; then rm -rf -- $__TOOLRUNTIME_DIR; fi
echo "Running: $__scriptpath/init-tools.sh" > $__init_tools_log
if [ ! -e $__DOTNET_PATH ]; then
mkdir -p "$__DOTNET_PATH"
if [ -n "$DOTNET_TOOLSET_DIR" ] && [ -d "$DOTNET_TOOLSET_DIR/$__DOTNET_TOOLS_VERSION" ]; then
echo "Copying $DOTNET_TOOLSET_DIR/$__DOTNET_TOOLS_VERSION to $__DOTNET_PATH" >> $__init_tools_log
cp -r $DOTNET_TOOLSET_DIR/$__DOTNET_TOOLS_VERSION/* $__DOTNET_PATH
elif [ -n "$DOTNET_TOOL_DIR" ] && [ -d "$DOTNET_TOOL_DIR" ]; then
echo "Copying $DOTNET_TOOL_DIR to $__DOTNET_PATH" >> $__init_tools_log
cp -r $DOTNET_TOOL_DIR/* $__DOTNET_PATH
else
echo "Installing dotnet cli..."
__DOTNET_LOCATION="https://dotnetcli.azureedge.net/dotnet/Sdk/${__DOTNET_TOOLS_VERSION}/${__DOTNET_PKG}.tar.gz"
# curl has HTTPS CA trust-issues less often than wget, so lets try that first.
echo "Installing '${__DOTNET_LOCATION}' to '$__DOTNET_PATH/dotnet.tar'" >> $__init_tools_log
which curl > /dev/null 2> /dev/null
if [ $? -ne 0 ]; then
wget -q -O $__DOTNET_PATH/dotnet.tar ${__DOTNET_LOCATION}
else
curl --retry 10 -sSL --create-dirs -o $__DOTNET_PATH/dotnet.tar ${__DOTNET_LOCATION}
fi
cd $__DOTNET_PATH
tar -xf $__DOTNET_PATH/dotnet.tar
cd $__scriptpath
__PATCH_CLI_NUGET_FRAMEWORKS=1
fi
fi
if [ -n "$BUILD_TOOLS_TOOLSET_DIR" ] && [ -d "$BUILD_TOOLS_TOOLSET_DIR/$__BUILD_TOOLS_PACKAGE_VERSION" ]; then
echo "Copying $BUILD_TOOLS_TOOLSET_DIR/$__BUILD_TOOLS_PACKAGE_VERSION to $__TOOLRUNTIME_DIR" >> $__init_tools_log
cp -r $BUILD_TOOLS_TOOLSET_DIR/$__BUILD_TOOLS_PACKAGE_VERSION/* $__TOOLRUNTIME_DIR
elif [ -n "$BUILD_TOOLS_TOOL_DIR" ] && [ -d "$BUILD_TOOLS_TOOL_DIR" ]; then
echo "Copying $BUILD_TOOLS_TOOL_DIR to $__TOOLRUNTIME_DIR" >> $__init_tools_log
cp -r $BUILD_TOOLS_TOOL_DIR/* $__TOOLRUNTIME_DIR
else
if [ ! -e $__BUILD_TOOLS_PATH ]; then
echo "Restoring BuildTools version $__BUILD_TOOLS_PACKAGE_VERSION..."
echo "Running: $__DOTNET_CMD restore \"$__INIT_TOOLS_RESTORE_PROJECT\" --no-cache --packages $__PACKAGES_DIR --source $__BUILDTOOLS_SOURCE /p:BuildToolsPackageVersion=$__BUILD_TOOLS_PACKAGE_VERSION" >> $__init_tools_log
$__DOTNET_CMD restore "$__INIT_TOOLS_RESTORE_PROJECT" --no-cache --packages $__PACKAGES_DIR --source $__BUILDTOOLS_SOURCE /p:BuildToolsPackageVersion=$__BUILD_TOOLS_PACKAGE_VERSION >> $__init_tools_log
if [ ! -e "$__BUILD_TOOLS_PATH/init-tools.sh" ]; then echo "ERROR: Could not restore build tools correctly. See '$__init_tools_log' for more details."1>&2; fi
fi
echo "Initializing BuildTools..."
echo "Running: $__BUILD_TOOLS_PATH/init-tools.sh $__scriptpath $__DOTNET_CMD $__TOOLRUNTIME_DIR $__PACKAGES_DIR" >> $__init_tools_log
# Executables restored with .NET Core 2.0 do not have executable permission flags. https://github.com/NuGet/Home/issues/4424
chmod +x $__BUILD_TOOLS_PATH/init-tools.sh
$__BUILD_TOOLS_PATH/init-tools.sh $__scriptpath $__DOTNET_CMD $__TOOLRUNTIME_DIR $__PACKAGES_DIR >> $__init_tools_log
if [ "$?" != "0" ]; then
echo "ERROR: An error occured when trying to initialize the tools. Please check '$__init_tools_log' for more details."1>&2
exit 1
fi
fi
echo "Making all .sh files executable under Tools."
# Executables restored with .NET Core 2.0 do not have executable permission flags. https://github.com/NuGet/Home/issues/4424
ls $__scriptpath/Tools/*.sh | xargs chmod +x
ls $__scriptpath/Tools/scripts/docker/*.sh | xargs chmod +x
Tools/crossgen.sh $__scriptpath/Tools
mkdir -p $__INIT_TOOLS_DONE_MARKER_DIR
touch $__INIT_TOOLS_DONE_MARKER
echo "Done initializing tools."
else
echo "Tools are already initialized"
fi
|
ericstj/wcf
|
init-tools.sh
|
Shell
|
mit
| 6,072 |
#!/bin/bash
DIR=$(mktemp -d)
unzip -j $1 -d $DIR/
bash decompress_all.sh $DIR $2
rm -rf $DIR
|
WangXiang10/GWB_PRL16
|
include/probImage/probimage/ProbImage/decompress_zip.sh
|
Shell
|
mit
| 93 |
#!/bin/sh
# Copyright (C) 2008 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
. lib/inittest
aux prepare_vg 6
aux lvmconf 'allocation/maximise_cling = 0'
aux lvmconf 'allocation/mirror_logs_require_separate_pvs = 1'
# 3-way, disk log
# multiple failures, full replace
lvcreate -aey --mirrorlog disk --type mirror -m 2 --ignoremonitoring --nosync -L 1 -n 3way $vg "$dev1" "$dev2" "$dev3" "$dev4":0-1
aux disable_dev "$dev1" "$dev2"
lvconvert -y --repair $vg/3way 2>&1 | tee 3way.out
lvs -a -o +devices $vg | not grep unknown
not grep "WARNING: Failed" 3way.out
vgreduce --removemissing $vg
check mirror $vg 3way
aux enable_dev "$dev1" "$dev2"
vgremove -ff $vg
# 3-way, disk log
# multiple failures, partial replace
vgcreate $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
lvcreate -aey --mirrorlog disk --type mirror -m 2 --ignoremonitoring --nosync -L 1 -n 3way $vg "$dev1" "$dev2" "$dev3" "$dev4"
aux disable_dev "$dev1" "$dev2"
lvconvert -y --repair $vg/3way 2>&1 | tee 3way.out
grep "WARNING: Failed" 3way.out
lvs -a -o +devices $vg | not grep unknown
vgreduce --removemissing $vg
check mirror $vg 3way
aux enable_dev "$dev1" "$dev2"
vgremove -ff $vg
vgcreate $vg "$dev1" "$dev2" "$dev3"
lvcreate -aey --mirrorlog disk --type mirror -m 1 --ignoremonitoring --nosync -l 1 -n 2way $vg "$dev1" "$dev2" "$dev3"
aux disable_dev "$dev1"
lvconvert -y --repair $vg/2way 2>&1 | tee 2way.out
grep "WARNING: Failed" 2way.out
lvs -a -o +devices $vg | not grep unknown
vgreduce --removemissing $vg
check mirror $vg 2way
aux enable_dev "$dev1" "$dev2"
vgremove -ff $vg
# FIXME - exclusive activation for mirrors should work here
# conversion of inactive cluster logs is also unsupported
test -e LOCAL_CLVMD && exit 0
# Test repair of inactive mirror with log failure
# Replacement should fail, but convert should succeed (switch to corelog)
vgcreate $vg "$dev1" "$dev2" "$dev3" "$dev4"
lvcreate -aey --type mirror -m 2 --ignoremonitoring -l 2 -n mirror2 $vg "$dev1" "$dev2" "$dev3" "$dev4":0
vgchange -a n $vg
pvremove -ff -y "$dev4"
lvconvert -y --repair $vg/mirror2
check mirror $vg mirror2
vgs $vg
vgremove -ff $vg
if kernel_at_least 3 0 0; then
# 2-way, mirrored log
# Double log failure, full replace
vgcreate $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6"
lvcreate -aey --mirrorlog mirrored --type mirror -m 1 --ignoremonitoring --nosync -L 1 -n 2way $vg \
"$dev1" "$dev2" "$dev3":0 "$dev4":0
aux disable_dev "$dev3" "$dev4"
lvconvert -y --repair $vg/2way 2>&1 | tee 2way.out
lvs -a -o +devices $vg | not grep unknown
not grep "WARNING: Failed" 2way.out
vgreduce --removemissing $vg
check mirror $vg 2way
aux enable_dev "$dev3" "$dev4"
vgremove -ff $vg
fi
# 3-way, mirrored log
# Single log failure, replace
vgcreate $vg "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6"
lvcreate -aey --mirrorlog mirrored --type mirror -m 2 --ignoremonitoring --nosync -L 1 -n 3way $vg \
"$dev1" "$dev2" "$dev3" "$dev4":0 "$dev5":0
aux disable_dev "$dev4"
lvconvert -y --repair $vg/3way 2>&1 | tee 3way.out
lvs -a -o +devices $vg | not grep unknown
not grep "WARNING: Failed" 3way.out
vgreduce --removemissing $vg
check mirror $vg 3way
aux enable_dev "$dev4"
vgremove -ff $vg
|
vgmoose/lvm
|
test/shell/lvconvert-repair-replace.sh
|
Shell
|
gpl-2.0
| 3,569 |
#!/bin/bash
# (C)2016-2020 LGB Gabor Lenart
# https://github.com/lgblgblgb/xemu
set -e
if [ "$1" = "" -o "$2" = "" -o "$3" = "" ]; then
echo "BAD USAGE"
exit 1
fi
DIR="$1"
OUT="$2"
cd $DIR
rm -fr .zip
mkdir -p .zip
CMD="zip ../$OUT"
while [ "$3" != "" ]; do
file=`basename "$3"`
if [ ! -f "$file" ]; then
echo "FILE NOT FOUND (in $DIR): $file"
exit 1
fi
outfile=`echo "$file" | awk -F. '
{
if (NF < 2) {
print
} else {
if (NF == 2 && ($NF == "native" || $NF == "osx")) {
print $1
} else if (NF == 2 && ($NF == "win32" || $NF == "win64")) {
print $1 ".exe"
} else {
print $0
}
}
}'`
echo "$file -> .zip/$outfile"
cp -a $file .zip/$outfile
CMD="$CMD $outfile"
shift
done
echo "cd .zip"
cd .zip
echo $CMD
$CMD
cd ..
rm -fr .zip
exit 0
|
lgblgblgb/xclcd
|
build/zipper.sh
|
Shell
|
gpl-2.0
| 792 |
#!/bin/sh
rm -f ~/.vimrc
rm -rf ~/.vim
mv -f ~/.vimrc_old ~/.vimrc
mv -f ~/.vim_old ~/.vim
|
pegasuslw/vi_config
|
vim/uninstall.sh
|
Shell
|
gpl-2.0
| 91 |
#!/bin/bash
# Given base.png and base32.png, makes the various sizes of icons needed for
# OS X and Windows, and puts them into .icns and .ico files, respectively.
# To run this script, make sure you have png2icns (icnsutils) and imagemagick
# installed. On Debian, that means you should run:
# sudo aptitude install icnsutils imagemagick
ICN_PREFIX=idigbio-marks_idigbio-mark-
ICN_16="${ICN_PREFIX}16.png"
ICN_48="${ICN_PREFIX}48.png"
ICN_128="${ICN_PREFIX}128.png"
ICN_605="${ICN_PREFIX}605.png"
echo "Setting Up"
rm -rf osx_icon win_icon
mkdir -p osx_icon win_icon
echo "Rescaling and Copying OS X Icons"
convert $ICN_605 -resize 512x512 osx_icon/icn512.png
convert $ICN_605 -resize 256x256 osx_icon/icn256.png
cp $ICN_128 osx_icon/icn128.png
cp $ICN_48 osx_icon/icn48.png
convert $ICN_48 -resize 32x32 osx_icon/icn32.png
cp $ICN_16 osx_icon/icn16.png
echo "Building OS X .icns File"
png2icns osx_icon/icon.icns osx_icon/*.png > /dev/null # quiet, you!
echo "Rescaling and Converting Windows Icons"
convert $ICN_605 -resize 256x256 win_icon/icn256.bmp
convert $ICN_48 win_icon/icn48.bmp
convert $ICN_48 -resize 32x32 win_icon/icn32.bmp
convert $ICN_16 win_icon/icn16.bmp
echo "Building Windows .ico File"
convert win_icon/*.bmp win_icon/icon.ico
echo "Cleaning Up"
rm osx_icon/*.png win_icon/*.bmp
|
skoppisetty/idigbio-appliance
|
packaging/icons/build_icons.sh
|
Shell
|
gpl-3.0
| 1,314 |
#!/bin/sh
export CC="afl-clang"
if [ -d /usr/lib/afl ]; then
export AFL_PATH=/usr/lib/afl
fi
echo 'int main(){return 0;}' > .a.c
[ -z "${CC}" ] && CC=gcc
${CC} ${CFLAGS} ${LDFLAGS} -o .a.out .a.c
RET=$?
rm -f .a.out .a.c
if [ $RET != 0 ]; then
echo "Your compiler doesn't supports AFL"
exit 1
fi
exec sys/install.sh $@
|
bhootravi/radare2
|
sys/afl.sh
|
Shell
|
gpl-3.0
| 322 |
function remove_instance () {
echo "========================================"
echo " Remove any running instances if any of ${TARGET_IMAGE} virsh domain."
echo "========================================"
set +e
ssh -o StrictHostKeyChecking=no root@"${PROVISIONING_HOST}" virsh destroy ${TARGET_IMAGE}
ssh -o StrictHostKeyChecking=no root@"${PROVISIONING_HOST}" virsh undefine ${TARGET_IMAGE}
ssh -o StrictHostKeyChecking=no root@"${PROVISIONING_HOST}" virsh vol-delete --pool default /var/lib/libvirt/images/${TARGET_IMAGE}.img
set -e
}
function setup_instance () {
# Provision the instance using satellite6 base image as the source image.
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@"${PROVISIONING_HOST}" \
snap-guest -b "${SOURCE_IMAGE}" -t "${TARGET_IMAGE}" --hostname "${SERVER_HOSTNAME}" \
-m "${VM_RAM}" -c "${VM_CPU}" -d "${VM_DOMAIN}" -f -n bridge="${BRIDGE}" --static-ipaddr "${IPADDR}" \
--static-netmask "${NETMASK}" --static-gateway "${GATEWAY}"
# Let's wait for 60 secs for the instance to be up and along with it it's services
sleep 60
# Restart Satellite6 service for a clean state of the running instance.
ssh -o StrictHostKeyChecking=no root@"${SERVER_HOSTNAME}" 'katello-service restart'
}
if [[ "${SATELLITE_DISTRIBUTION}" != *"UPSTREAM"* ]]; then
# Provisioning jobs TARGET_IMAGE becomes the SOURCE_IMAGE for Tier and RHAI jobs.
# source-image at this stage for example: qe-sat63-rhel7-base
export SOURCE_IMAGE="${TARGET_IMAGE}"
# target-image at this stage for example: qe-sat63-rhel7-tier1
export TARGET_IMAGE="${TARGET_IMAGE%%-base}-${ENDPOINT}"
remove_instance
setup_instance
fi
|
sghai/robottelo-ci
|
scripts/satellite6-automation-instances.sh
|
Shell
|
gpl-3.0
| 1,729 |
#!/bin/sh
# Copyright (c) 2010-2020 The Open Source Geospatial Foundation and others.
# Licensed under the GNU LGPL.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License,
# or any later version. This library is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY, without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details, either
# in the "LICENSE.LGPL.txt" file distributed with this software or at
# web page "http://www.fsf.org/licenses/lgpl.html".
# About:
# =====
# This script will install GpsPrune in Ubuntu
# GpsPrune is an application for viewing and post-processing GPS data
# Homepage: http://activityworkshop.net/software/prune/
#
./diskspace_probe.sh "`basename $0`" begin
####
# live disc's username is "user"
if [ -z "$USER_NAME" ] ; then
USER_NAME="user"
fi
USER_HOME="/home/$USER_NAME"
apt-get -q update
apt-get install --assume-yes gpsprune
if [ $? -ne 0 ] ; then
echo 'ERROR: Package install failed!'
exit 1
fi
cp /usr/share/applications/gpsprune.desktop "$USER_HOME/Desktop/"
echo 'Downloading demo data ...'
mkdir -p /usr/local/share/data/vector/gpx
wget -c --progress=dot:mega \
"http://download.osgeo.org/livedvd/data/gpsprune/test_trk2.gpx" \
-O /usr/local/share/data/vector/gpx/test_trk2.gpx
####
./diskspace_probe.sh "`basename $0`" end
|
astroidex/OSGeoLive
|
bin/install_gpsprune.sh
|
Shell
|
lgpl-2.1
| 1,573 |
#! /bin/sh
#
# Copyright (c) 2001, 2003, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# @test 1.1, 02/14/01
# @author Ram Marti
# @bug 4399067
# @summary Subject.doAs(null, action) does not clear the executing
#
# ${TESTJAVA} is pointing to the jre
#
# set platform-dependent variables
OS=`uname -s`
case "$OS" in
SunOS )
PS=":"
FS="/"
RM="/bin/rm -f"
;;
Linux )
PS=":"
FS="/"
RM="/bin/rm -f"
;;
Windows* )
PS=";"
FS="\\"
RM="rm"
;;
* )
echo "Unrecognized system!"
exit 1;
;;
esac
# remove any leftover built class
cd ${TESTCLASSES}${FS}
${RM} Test.class
${TESTJAVA}${FS}bin${FS}javac -d ${TESTCLASSES}${FS} ${TESTSRC}${FS}Test.java
WD=`pwd`
cd ${TESTSRC}${FS}
cd $WD
echo $WD
${TESTJAVA}${FS}bin${FS}java -classpath "${TESTCLASSES}${FS}" \
-Djava.security.manager \
-Djava.security.policy=${TESTSRC}${FS}policy \
Test
exit $?
|
andreagenso/java2scala
|
test/J2s/java/openjdk-6-src-b27/jdk/test/javax/security/auth/Subject/doAs/Test.sh
|
Shell
|
apache-2.0
| 1,861 |
modpath=$HOME/modules/
git clone https://github.com/ekarlso/puppet-vswitch $modpath/vswitch
git clone https://github.com/cprice-puppet/puppetlabs-inifile $modpath/inifile
git clone https://github.com/EmilienM/openstack-quantum-puppet $modpath/quantum
git clone git://github.com/puppetlabs/puppetlabs-stdlib.git $modpath/stdlib
git clone -b folsom git://github.com/puppetlabs/puppetlabs-keystone.git $modpath/keystone
puppet apply --modulepath $modpath $(dirname $0)/quantum.pp --debug
|
gwdg/puppet-neutron
|
examples/install_sample.sh
|
Shell
|
apache-2.0
| 485 |
#!/bin/bash
set -e
function usage() {
echo "Confirms that no unexpected, generated files exist in the source repository"
echo
echo "Usage: $0 <timestamp file>"
echo
echo "<timestamp file>: any file newer than this one will be considered an error unless it is already exempted"
return 1
}
# parse arguments
# a file whose timestamp is the oldest acceptable timestamp for source files
COMPARE_TO_FILE="$1"
if [ "$COMPARE_TO_FILE" == "" ]; then
usage
fi
# get script path
SCRIPT_DIR="$(cd $(dirname $0) && pwd)"
SOURCE_DIR="$(cd $SCRIPT_DIR/../.. && pwd)"
# confirm that no files in the source repo were unexpectedly created (other than known exemptions)
function checkForGeneratedFilesInSourceRepo() {
# Paths that are still expected to be generated and that we have to allow
# If you need add or remove an exemption here, update cleanBuild.sh too
EXEMPT_PATHS=".gradle buildSrc/.gradle local.properties reports build"
# put "./" in front of each path to match the output from 'find'
EXEMPT_PATHS="$(echo " $EXEMPT_PATHS" | sed 's| | ./|g')"
# build a `find` argument for skipping descending into the exempt paths
EXEMPTIONS_ARGUMENT="$(echo $EXEMPT_PATHS | sed 's/ /\n/g' | sed 's|\(.*\)|-path \1 -prune -o|g' | xargs echo)"
# Search for files that were created or updated more recently than the build start.
# Unfortunately we can't also include directories because the `test` task seems to update
# the modification time in several projects
GENERATED_FILES="$(cd $SOURCE_DIR && find . $EXEMPTIONS_ARGUMENT -newer $COMPARE_TO_FILE -type f)"
UNEXPECTED_GENERATED_FILES=""
for f in $GENERATED_FILES; do
exempt=false
for exemption in $EXEMPT_PATHS; do
if [ "$f" == "$exemption" ]; then
exempt=true
break
fi
if [ "$f" == "$(dirname $exemption)" ]; then
# When the exempt directory gets created, its parent dir will be modified
# So, we ignore changes to the parent dir too (but not necessarily changes in sibling dirs)
exempt=true
break
fi
done
if [ "$exempt" == "false" ]; then
UNEXPECTED_GENERATED_FILES="$UNEXPECTED_GENERATED_FILES $f"
fi
done
if [ "$UNEXPECTED_GENERATED_FILES" != "" ]; then
echo >&2
echo "Unexpectedly found these files generated or modified by the build:
${UNEXPECTED_GENERATED_FILES}
Generated files should go in OUT_DIR instead because that is where developers expect to find them
(to make it easier to diagnose build problems: inspect or delete these files)" >&2
# copy these new files into DIST_DIR in case anyone wants to inspect them
COPY_TO=$DIST_DIR/new_files
for f in $UNEXPECTED_GENERATED_FILES; do
dest="$COPY_TO/$f"
mkdir -p "$(dirname $dest)"
cp "$SOURCE_DIR/$f" "$dest"
done
echo >&2
echo Copied these generated files into $COPY_TO >&2
exit 1
fi
}
echo checking compared to $COMPARE_TO_FILE
checkForGeneratedFilesInSourceRepo
|
androidx/androidx
|
busytown/impl/verify_no_caches_in_source_repo.sh
|
Shell
|
apache-2.0
| 2,966 |
#!/bin/sh
#
# Updates the platform update site in hale-build-support with the
# built update site in build/updatesite.
#
# Assumes the hale-build-repository is present next to hale-platform.
#./gradlew && rm -R ../hale-build-support/updatesites/platform/* && cp -R ./build/updatesite/* ../hale-build-support/updatesites/platform/
rm -R -v ../hale-build-support/updatesites/platform/* && cp -R -v ./build/updatesite/* ../hale-build-support/updatesites/platform/
|
halestudio/hale-platform
|
update-build-support.sh
|
Shell
|
apache-2.0
| 462 |
#!/bin/bash
# initialize script for Computer Programming Lab
# initialize script creates the initial environment for the Computer Programming
# Lab by installing the server side dependencies for the lab and invokes the build script.
# Mention all the server-side dependencies of the lab in
# dependencies.txt
# Usage of the Script
# To use initialize script, run the command
# initialize scripts/dependencies.txt
# initialize script takes dependencies.txt as an argument and installs the
# packages mentioned in the dependencies.txt file.
# exporting the proxy server
#export http_proxy=http://proxy.iiit.ac.in:8080/
# read proxy settings from config file
source ./config.sh
if [[ -n $http_proxy ]]; then
echo $http_proxy
export http_proxy=$http_proxy
fi
if [[ -n $https_proxy ]]; then
export https_proxy=$https_proxy
fi
# updating the packages
yum update
# $1 is the shell variable for command-line argument. cd /var/www/build/
FILENAME=dependencies.txt
# reads the file given as an argument to the script line by line and
# installs the packages
cat $FILENAME | while read LINE
do
echo $LINE
yum install -y $LINE
done
# invoke the build script
./build.sh
|
khushpreet-kaur/test-ui-tool-kit-repo
|
scripts/centos-scripts/initialize.sh
|
Shell
|
mit
| 1,171 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2013-2015 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Make sure gitignore files work properly in variants.
. ./tup.sh
check_no_windows variant
tmkdir build
touch build/tup.config
cat > Tupfile << HERE
.gitignore
: |> touch %o |> foo.txt
HERE
tmkdir bar
cat > bar/Tupfile << HERE
.gitignore
: |> touch %o |> bar.txt
HERE
update
gitignore_good bar.txt build/bar/.gitignore
gitignore_good foo.txt build/.gitignore
gitignore_good .tup .gitignore
gitignore_bad foo.txt .gitignore
eotup
|
p2rkw/tup
|
test/t8082-gitignore.sh
|
Shell
|
gpl-2.0
| 1,183 |
#!/bin/sh
#
# makedist - make an hp-ux distribution.
#
#
# Since the HP-UX software distribution stuff doesn't directly support
# symbolic links, we have the option of making an installation script that
# creates symbolic links, or include files that are symbolic links in the
# distribution. Since we want this distribution to be relocatable, the
# script method probably won't work and we have to make dummy link files...
#
echo "Making links needed for distribution..."
rm -rf links
mkdir links
for file in `cd ../../FL; ls *.H`; do
ln -sf $file links/`basename $file .H`.h
done
ln -sf FL links/Fl
ln -sf libfltk.sl.1 links/libfltk.sl
cd ../..
/usr/sbin/swpackage -v -s packages/hpux/fltk.info \
-d packages/hpux/fltk-1.0.5-hpux.depot -x write_remote_files=true \
-x target_type=tape fltk
echo "Compressing distribution..."
cd packages/hpux
rm -rf links
rm -f fltk-1.0.5-hpux.depot.gz
gzip -9 fltk-1.0.5-hpux.depot
|
ipwndev/DSLinux-Mirror
|
user/pixil/libs/flnx/packages/hpux/makedist.sh
|
Shell
|
gpl-2.0
| 933 |
#!/bin/bash
###########################################################################################
## Copyright 2003, 2015 IBM Corp ##
## ##
## Redistribution and use in source and binary forms, with or without modification, ##
## are permitted provided that the following conditions are met: ##
## 1.Redistributions of source code must retain the above copyright notice, ##
## this list of conditions and the following disclaimer. ##
## 2.Redistributions in binary form must reproduce the above copyright notice, this ##
## list of conditions and the following disclaimer in the documentation and/or ##
## other materials provided with the distribution. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND ANY EXPRESS ##
## OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ##
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ##
## THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ##
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ##
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ##
## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ##
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
############################################################################################
#cd `dirname $0`
#LTPBIN=${PWD%%/testcases/*}/testcases/bin
source $LTPBIN/tc_utils.source
## Author: Manoj Iyer
###########################################################################################
## source the utility functions
#
# tc_local_setup
#
function tc_local_setup()
{
tc_root_or_break || return
tc_exec_or_break grep cat rm || return
if [ $TC_OS_ARCH = ppcnf ]; then
ntpdate pool.ntp.org 1>$stdout 2>$stderr
tc_break_if_bad $? "ntpdate failed, please update system date before running the test"
# backup the older /var/lib/logrotate.status file
mv /var/lib/logrotate.status /var/lib/logrotate.status.org
fi
# create config file.
cat >$TCTMP/tst_logrotate.conf <<-EOF
#****** Begin Config file *******
# create new (empty) log files after rotating old ones
create
# compress the log files
compress
/var/log/tst_logfile$$ {
rotate 2
weekly
}
#****** End Config file *******
EOF
# create a log file in /var/log/
cat >/var/log/tst_logfile$$ <<-EOF
#****** Begin Log File ********
# This is a dummy log file.
Dummy log file used to test logrotate command.
Dummy log file used to test logrotate command.
Dummy log file used to test logrotate command.
Dummy log file used to test logrotate command.
Dummy log file used to test logrotate command.
Dummy log file used to test logrotate command.
Dummy log file used to test logrotate command.
Dummy log file used to test logrotate command.
Dummy log file used to test logrotate command.
Dummy log file used to test logrotate command.
#****** End Log File ********
EOF
return 0
}
#
# tc_local_cleanup
#
function tc_local_cleanup()
{
if [ $TC_OS_ARCH = ppcnf ]; then
mv /var/lib/logrotate.status.org /var/lib/logrotate.status
fi
rm -f /var/log/tst_logfile$$*
}
#
# test01 Installation check
#
function test01()
{
tc_register "Installation check"
tc_executes logrotate
tc_pass_or_fail $? "logrotate not installed"
}
#
# test02 See that logrotate creates backup of a logfile.
# Use force option to be sure.
# Use verbose option to get output that can be compared.
# See that current log file is emptied.
#
function test02()
{
tc_register "First rotation"
tc_info "starting first rotation"
# Force the rotation.
logrotate -fv $TCTMP/tst_logrotate.conf &>$stdout
tc_fail_if_bad $? "bad results from logrotate -fv" || return
# Look for some keywords in the output.
grep -q "reading config file $TCTMP/tst_logrotate.conf" $stdout
tc_fail_if_bad $? "missing reading" || return
grep -q "forced from command line (2 rotations)" $stdout
tc_fail_if_bad $? "missing forced" || return
grep -q "compressing log with" $stdout
tc_fail_if_bad $? "missing compressing" || return
# current log file should now be zero length
! [ -s /var/log/tst_logfile$$ ]
tc_fail_if_bad $? "/var/log/tst_logfile$$ should be zero length" || return
# Check if compressed log file is created.
tc_exists /var/log/tst_logfile$$.1.gz
tc_pass_or_fail $? "compressed backup file not creatred."
}
#
# test03 See that a second rotation creates a second log file with
# unique name.
#
function test03()
{
tc_register "Second rotation"
tc_info "starting second rotation"
# add a line to log file
echo "New data" >> /var/log/tst_logfile$$
# Force the rotation.
logrotate -fv $TCTMP/tst_logrotate.conf &>$stdout
tc_fail_if_bad $? "bad results from logrotate -fv" || return
# current log file should now be zero length
! [ -s /var/log/tst_logfile$$ ]
tc_fail_if_bad $? "/var/log/tst_logfile$$ should be zero length" || return
# Check if compressed log file is created.
tc_exists /var/log/tst_logfile$$.2.gz
tc_pass_or_fail $? "compressed backup file not creatred."
}
#
# test04 See that a third rotation leaves only two backup files.
#
function test04()
{
tc_register "Third rotation"
tc_info "starting third rotation"
# add a line to log file
echo "New data" >> /var/log/tst_logfile$$
# Force the rotation.
logrotate -fv $TCTMP/tst_logrotate.conf &>$stdout
tc_fail_if_bad $? "bad results from logrotate -fv" || return
# current log file should now be zero length
! [ -s /var/log/tst_logfile$$ ]
tc_fail_if_bad $? "/var/log/tst_logfile$$ should be zero length" || return
# Check if compressed log file is created.
tc_info "Should NOT find /var/log/tst_logfile$$.3.gz ..."
! tc_exists /var/log/tst_logfile$$.3.gz
tc_pass_or_fail $? "Too many backup files"
}
#
# main
#
TST_TOTAL=4
tc_get_os_arch
tc_setup
test01 &&
test02 &&
test03 &&
test04
|
PoornimaNayak/autotest-client-tests
|
linux-tools/logrotate/logrotate.sh
|
Shell
|
gpl-2.0
| 6,461 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2009-2015 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Make sure include_rules makes ghost Tuprules.tup files.
. ./tup.sh
tmkdir fs
tmkdir fs/sub
cat > fs/sub/Tupfile << HERE
include_rules
: foreach *.c |> gcc \$(CFLAGS) -c %f -o %o |> %B.o
: *.o |> gcc \$(LDFLAGS) %f -o %o |> prog
HERE
cat > Tuprules.tup << HERE
CFLAGS = -Wall
LDFLAGS = -lm
HERE
cat > fs/sub/Tuprules.tup << HERE
CFLAGS += -O0
HERE
tup touch fs/sub/Tupfile Tuprules.tup fs/sub/Tuprules.tup
tup touch fs/sub/helper.c
tup parse
tup_object_exist fs/sub 'gcc -Wall -O0 -c helper.c -o helper.o'
tup_sticky_exist fs/sub helper.o fs/sub 'gcc -lm helper.o -o prog'
tup_dep_exist . Tuprules.tup fs sub
tup_dep_exist fs Tuprules.tup fs sub
tup_dep_exist fs/sub Tuprules.tup fs sub
cat > fs/Tuprules.tup << HERE
CFLAGS += -DFS=1
LDFLAGS += -lfoo
HERE
tup touch fs/Tuprules.tup
tup parse
tup_object_exist fs/sub 'gcc -Wall -DFS=1 -O0 -c helper.c -o helper.o'
tup_sticky_exist fs/sub helper.o fs/sub 'gcc -lm -lfoo helper.o -o prog'
eotup
|
rmoorman/tup
|
test/t2060-ghost-tuprules.sh
|
Shell
|
gpl-2.0
| 1,700 |
#! /bin/sh
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# TAP support:
# - don't spuriously recognize lines that are "almost" TAP lines as
# real TAP lines
. test-init.sh
. tap-setup.sh
echo 1..5 > all.test
# The only recognized directives are "TODO" and "SKIP".
# So the following should count as passed tests.
cat >> all.test <<END
ok 1 # XFAIL
ok 2 # SKIPPED
ok 3 # TO DO
ok 4 # TODOALL
ok 5 # FIXME
END
# According to documentation of Test::Harness::TAP(3):
#
# Lines written to standard output matching /^(not )?ok\b/
# must be interpreted as test lines. All other lines must
# not be considered test output.
cat >> all.test <<END
ok
ok 1
${tab}ok
${tab}ok 1
not ok
not ok 1
${tab}not ok
${tab}not ok 1
notok
notok 1
not${tab}ok
not${tab}ok 1
not ok
not ok 1
no ok
no ok 1
# ok
# not ok
# ok 1
# not ok 1
#ok
#not ok
#ok 1
#not ok 1
END
set +x # Don't pollute logs too much.
for r in 'ok' 'not ok'; do
for s1 in \
a b c d e f g h i j k l m n o p q r s t u v w x y z \
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z \
0 1 2 3 4 5 6 7 8 9 _ ab 0a 23 a1B2c _o _x_y_
do
for s2 in '' @ % + - = / . : \; \* \? \& \! \# \$ \< \> \\; do
printf '%s\n' "$r$s1$s2"
done
done
done >> all.test
set -x # Reset shell xtraces.
# The prove(1) utility doesn't bail out on these, so our driver
# shouldn't either.
# See comments in 'tap-bailout-leading-space.sh' for an explanation
# of why we don't have a whitespace-prepended "Bail out!" line here.
cat >> all.test <<'END'
bailout
bailout!
bail out
bail out!
Bailout
Bailout!
Bail out
Bail out
#Bail out!
# Bail out!
END
# Debugging info and minor sanity check.
cat all.test \
&& test $(grep -c '^ok1$' all.test) -eq 1 \
&& test $(grep -c '^not ok1$' all.test) -eq 1 \
|| framework_failure_ "creating all.test"
run_make -O check
count_test_results total=5 pass=5 fail=0 xpass=0 xfail=0 skip=0 error=0
:
|
DDTChen/CookieVLC
|
vlc/extras/tools/automake/t/tap-no-spurious.sh
|
Shell
|
gpl-2.0
| 2,550 |
#! /bin/sh
## Quit immediately an error
set -e
## Build the R package from a github branch.
## We assume that all the build tools, and the
## igraph dependencies are already installed,
## but the R packages we depend on are not.
## If not specified, we build the master branch
branch=${1-master}
## If not specified, we use the system R version
R=${2-R}
## If not specified, no R version is used to determine output location
Rversion=${3-}
## We freshly clone the repo from github and build igraph from scratch.
builddir=`mktemp -d`
trap "rm -rf $builddir" EXIT
cd $builddir
git clone -b $branch https://github.com/igraph/igraph.git
cd igraph
./bootstrap.sh
./configure
./bootstrap.sh
make parsersources
cd interfaces/R
make
## A temporary directory for R packages
libdir=`mktemp -d`
trap "rm -rf $libdir" EXIT
## Install dependent packages
${R} -e "
options(repos=structure(c(CRAN='http://cran.rstudio.com/'))); \
desc <- read.dcf('igraph/DESCRIPTION'); \
depkeys <- c('Depends', 'Imports', 'Suggests', 'LinkingTo'); \
cn <- intersect(colnames(desc), depkeys); \
pkg <- gsub(' ', '', unlist(strsplit(desc[,cn], ','))); \
install.packages(pkg, lib='$libdir', dependencies=NA); \
"
${R} -e "
.libPaths('$libdir'); \
source('http://bioconductor.org/biocLite.R'); \
biocLite('graph', suppressUpdates=TRUE, suppressAutoUpdate=TRUE); \
"
package=`cat igraph/DESCRIPTION | grep ^Package: | cut -f2 -d" "`
version=`cat igraph/DESCRIPTION | grep ^Version | cut -f2 -d" "`
commit=`git rev-parse --short HEAD`
## Check R package
R_LIBS=${libdir} ${R} CMD check --as-cran ${package}_${version}.tar.gz || true
## Upload the output
eval `ssh-agent -s`
trap "kill $SSH_AGENT_PID" EXIT
ssh-add
ssh -p 2222 [email protected] mkdir -p www/nightly/check/r/${Rversion}/${branch}/${commit}
scp -P 2222 ${package}.Rcheck/00check.log ${package}.Rcheck/00install.out \
[email protected]:www/nightly/check/r/$Rversion/${branch}/${commit}/
## Clean up
rm -rf $builddir
rm -rf $libdir
kill $SSH_AGENT_PID
|
igraph/xdata-igraph
|
tools/virtual/vagrant/scripts/check-r.sh
|
Shell
|
gpl-2.0
| 2,140 |
#!/bin/sh
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
java -Xms1G -Xmx1G -Djdk.nio.maxCachedBufferSize=0 -Djava.net.preferIPv4Stack=true -Duser.timezone=UTC -XX:-MaxFDLimit -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:+ResizeTLAB -XX:-ResizePLAB -XX:MetaspaceSize=128m -XX:MinMetaspaceFreeRatio=50 -XX:MaxMetaspaceFreeRatio=80 -XX:+ParallelRefProcEnabled -XX:StackShadowPages=20 -XX:+UseCompressedOops -XX:+DisableExplicitGC -XX:StringTableSize=1000003 -XX:InitiatingHeapOccupancyPercent=40 -jar target/benchmarks.jar $@ -prof gc -prof stack:lines=5;time=1;top=3
|
ivankelly/bookkeeper
|
microbenchmarks/run.sh
|
Shell
|
apache-2.0
| 1,325 |
#!/bin/bash
##########################################################################
#Aqueduct - Compliance Remediation Content
#Copyright (C) 2011,2012
# Vincent C. Passaro ([email protected])
# Shannon Mitchell ([email protected])
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor,
#Boston, MA 02110-1301, USA.
##########################################################################
##################### Fusion Technology LLC #############################
# By Shannon Mitchell #
# Fusion Technology LLC #
# Shannon[.]Mitchell[@]fusiontechnology-llc[.]com #
# www.fusiontechnology-llc.com #
##################### Fusion Technology LLC #############################
#
# _____________________________________________________________________
# | Version | Change Information | Author | Date |
# |__________|_______________________|____________________|____________|
# | 1.0 | Initial Script | Shannon Mitchell | 15-jul-2012|
# | | Creation | | |
# |__________|_______________________|____________________|____________|
#
#######################DISA INFORMATION##################################
# Group ID (Vulid): V-786
# Group Title: GEN001180
# Rule ID: SV-37194r1_rule
# Severity: medium
# Rule Version (STIG-ID): GEN001180
# Rule Title: All network services daemon files must have mode 0755 or
# less permissive.
#
# Vulnerability Discussion: Restricting permission on daemons will
# protect them from unauthorized modification and possible system
# compromise.
#
# Responsibility: System Administrator
# IAControls: ECLP-1
#
# Check Content:
#
# Check the mode of network services daemons.
# find /usr/sbin -type f -perm +022 -exec stat -c %a:%n {} \;
# This will return the octal permissions and name of all files that are
# group or world writable.
# If any network services daemon listed is world or group writable (either
# or both of the 2 lowest order digits contain a 2, 3 or 6), this is a
# finding.
# Note: Network daemons not residing in these directories (such as httpd or
# sshd) must also be checked for the correct permissions.
#
# Fix Text:
#
# Change the mode of the network services daemon.
# chmod go-w <path>
#######################DISA INFORMATION##################################
# Global Variables
PDI=GEN001180
# Start-Lockdown
# Note: Even though the STIG has the 0 in the special bit filed, I'm leaving
# those commented out as they can break the system.
for CURFILE in `find /usr/sbin/ ! -type l`
do
if [ -e "$CURFILE" ]
then
# Pull the actual permissions
FILEPERMS=`stat -L --format='%04a' $CURFILE`
# Break the actual file octal permissions up per entity
#FILESPECIAL=${FILEPERMS:0:1}
FILEOWNER=${FILEPERMS:1:1}
FILEGROUP=${FILEPERMS:2:1}
FILEOTHER=${FILEPERMS:3:1}
# Run check by 'and'ing the unwanted mask(7022)
#if [ $(($FILESPECIAL&7)) != "0" ] || [ $(($FILEOWNER&0)) != "0" ] || [ $(($FILEGROUP&2)) != "0" ] || [ $(($FILEOTHER&2)) != "0" ]
if [ $(($FILEOWNER&0)) != "0" ] || [ $(($FILEGROUP&2)) != "0" ] || [ $(($FILEOTHER&2)) != "0" ]
then
#chmod u-s,g-ws,o-wt $CURFILE
chmod g-w,o-w $CURFILE
fi
fi
done
# Note: Some SUID/SGID bits will be remove that might break some things. Here
# is the default list from RHEL 5.7:
#-rws--x--x 1 root root 32192 Mar 11 2009 /usr/sbin/userhelper
#-rwxr-sr-x 1 root smmsp 775064 Aug 11 13:24 /usr/sbin/sendmail.sendmail
#-rwxr-sr-x 1 root lock 12080 Jan 9 2007 /usr/sbin/lockdev
#-rwsr-xr-x 1 root root 8672 Sep 19 2009 /usr/sbin/userisdnctl
#-r-s--x--- 1 root apache 14264 Oct 20 17:05 /usr/sbin/suexec
#-rwsr-xr-x 1 root root 8224 Jan 6 2007 /usr/sbin/ccreds_validate
#-rwsr-xr-x 1 root root 8848 Aug 19 11:31 /usr/sbin/usernetctl
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Bash/STIG/rhel-5/prod/GEN001180.sh
|
Shell
|
apache-2.0
| 4,690 |
#!/bin/sh -e
DIRECTORY=$(dirname "${0}")
SCRIPT_DIRECTORY=$(
cd "${DIRECTORY}" || exit 1
pwd
)
# shellcheck source=/dev/null
. "${SCRIPT_DIRECTORY}/../configuration/project.sh"
if [ "${1}" = --help ]; then
echo "Usage: ${0} [--ci-mode]"
exit 0
fi
if [ "${1}" = --ci-mode ]; then
#script/shell/test.sh --ci-mode
script/python/test.sh --ci-mode
else
#script/shell/test.sh
script/python/test.sh
fi
|
FunTimeCoding/python-utility
|
script/test.sh
|
Shell
|
mit
| 431 |
#! /bin/sh
# Copyright (C) 2010-2015 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Check that $(LFLAGS) takes precedence over both $(AM_LFLAGS) and
# $(foo_LFLAGS).
# Please keep this in sync with the sister tests:
# - lflags-cxx.sh
# - yflags.sh
# - yflags-cxx.sh
required=cc
. test-init.sh
cat >fake-lex <<'END'
#!/bin/sh
echo '/*' "$*" '*/' >lex.yy.c
echo 'extern int dummy;' >> lex.yy.c
END
chmod a+x fake-lex
cat >> configure.ac <<'END'
AC_SUBST([CC], [false])
# Simulate presence of Lex using our fake-lex script.
AC_SUBST([LEX], ['$(abs_top_srcdir)'/fake-lex])
AC_SUBST([LEX_OUTPUT_ROOT], [lex.yy])
AC_SUBST([LEXLIB], [''])
AC_OUTPUT
END
cat > Makefile.am <<'END'
AUTOMAKE_OPTIONS = no-dependencies
bin_PROGRAMS = foo bar
foo_SOURCES = main.c foo.l
bar_SOURCES = main.c bar.l
AM_LFLAGS = __am_flags__
bar_LFLAGS = __bar_flags__
END
$ACLOCAL
$AUTOMAKE -a
grep '\$(LFLAGS).*\$(bar_LFLAGS)' Makefile.in && exit 1
grep '\$(LFLAGS).*\$(AM_LFLAGS)' Makefile.in && exit 1
: > foo.l
: > bar.l
$AUTOCONF
./configure
run_make LFLAGS=__user_flags__ foo.c bar-bar.c
cat foo.c
cat bar-bar.c
grep '__am_flags__.*__user_flags__' foo.c
grep '__bar_flags__.*__user_flags__' bar-bar.c
:
|
evaautomation/automake
|
t/lflags.sh
|
Shell
|
gpl-2.0
| 1,799 |
#!/usr/bin/env bash
#
# Copyright (C) 2020 Kaspar Schleiser <[email protected]>
# 2020 Inria
# 2020 Freie Universität Berlin
# 2015 Philipp Rosenkranz <[email protected]>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
#
. "$(dirname "${0}")"/github_annotate.sh
declare -A DEPS
DEPS["./dist/tools/licenses/check.sh"]="head pcregrep"
DEPS["./dist/tools/doccheck/check.sh"]="doxygen tput"
DEPS["./dist/tools/cppcheck/check.sh"]="cppcheck"
DEPS["./dist/tools/vera++/check.sh"]="vera++"
DEPS["./dist/tools/coccinelle/check.sh"]="spatch"
DEPS["./dist/tools/flake8/check.sh"]="python3 flake8"
DEPS["./dist/tools/codespell/check.sh"]="codespell"
DEPS["./dist/tools/uncrustify/uncrustify.sh"]="uncrustify"
DEPS["./dist/tools/shellcheck/shellcheck.sh"]="shellcheck"
if ! command -v git &>/dev/null; then
echo -n "Required command 'git' for all static tests not found in PATH "
print_warning
set_result 1
exit 1
fi
function print_warning {
local YELLOW="\033[0;33m"
local NO_COLOUR="\033[0m"
echo -e "${YELLOW}•${NO_COLOUR}"
}
function print_result {
local RED="\033[0;31m"
local GREEN="\033[0;32m"
local NO_COLOUR="\033[0m"
if (( "$1" == 0 )); then
echo -e "${GREEN}✓$NO_COLOUR"
else
echo -e "${RED}x$NO_COLOUR"
fi
}
set_result() {
NEW_RESULT=$1
if (( NEW_RESULT != 0))
then
RESULT=$NEW_RESULT
fi
}
function run {
for dep in ${DEPS["$1"]}; do
if ! command -v ${dep} &>/dev/null; then
echo -n "Required command '${dep}' for '$*' not found in PATH "
print_warning
set_result 1
return 1
fi
done
if [ -n "${GITHUB_RUN_ID}" ]; then
echo -n "::group::$1 "
else
echo -n "Running \"$*\" "
fi
OUT=$("$@" 2>&1)
NEW_RESULT=$?
print_result $NEW_RESULT
set_result $NEW_RESULT
# Indent command output so that its easily discernible from the rest
if [ -n "$OUT" ]; then
echo "Command output:"
echo ""
# Using printf to avoid problems if the command output begins with a -
(printf "%s\n" "$OUT" | while IFS= read -r line; do printf "\t%s\n" "$line"; done)
echo ""
fi
if [ -n "${GITHUB_RUN_ID}" ]; then
github_annotate_report_last_run
echo "::endgroup::"
fi
}
RESULT=0
if [ -n "${CI_BASE_COMMIT}" ]; then
# on Murdock, there's no base branch in the checkout folder.
# Thus, tag it here.
echo "-- tagging ${CI_BASE_BRANCH} HEAD commit (${CI_BASE_COMMIT})"
git tag "${CI_BASE_BRANCH}" "${CI_BASE_COMMIT}"
fi
if [ -z "${GITHUB_RUN_ID}" ]; then
# only default to master when not running in a GitHub action
# (so GitHub can check release branches too)
CI_BASE_BRANCH=${CI_BASE_BRANCH:-master}
fi
export BASE_BRANCH="${CI_BASE_BRANCH}"
run ./dist/tools/whitespacecheck/check.sh "${BASE_BRANCH}"
DIFFFILTER="MR" ERROR_EXIT_CODE=0 run ./dist/tools/licenses/check.sh
DIFFFILTER="AC" run ./dist/tools/licenses/check.sh
run ./dist/tools/doccheck/check.sh
run ./dist/tools/externc/check.sh
# broken configuration produces many false positives
# TODO: fix config and re-enable
# run ./dist/tools/cppcheck/check.sh
run ./dist/tools/vera++/check.sh
run ./dist/tools/coccinelle/check.sh
run ./dist/tools/flake8/check.sh
run ./dist/tools/headerguards/check.sh
run ./dist/tools/buildsystem_sanity_check/check.sh
run ./dist/tools/feature_resolution/check.sh
run ./dist/tools/boards_supported/check.sh
run ./dist/tools/codespell/check.sh
if [ -z "${GITHUB_RUN_ID}" ]; then
run ./dist/tools/uncrustify/uncrustify.sh --check
else
run ./dist/tools/uncrustify/uncrustify.sh
fi
ERROR_EXIT_CODE=0 run ./dist/tools/shellcheck/check.sh
exit $RESULT
|
cgundogan/RIOT
|
dist/tools/ci/static_tests.sh
|
Shell
|
lgpl-2.1
| 3,920 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.