code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
#This script sets up a microservices environment based on the definitions in settings.yaml
#Author: Naveen Joy
#Usage:
#./setup.sh # execute all playbooks resulting in launching instances, deploying docker and kubernetes
#./setup.sh --tags "instances" # execute launch instances playbook section
#./setup.sh --tags "docker" # execute docker playbook section
#./setup.sh --tags "kube" # execute kubernetes playbook section
#./setup.sh --tags "iptables" # execute iptables playbook section
#./setup.sh --tags "ntp" # execute ntp playbook section
set -o xtrace
TOP_DIR=$(cd $(dirname "$0") && pwd)
if [[ ! -r ${TOP_DIR}/settings.yaml ]]; then
echo "missing $TOP_DIR/settings.yaml - cannot proceed"
exit 1
fi
if [[ ! -r ${TOP_DIR}/hosts ]]; then
echo "missing $TOP_DIR/hosts - cannot proceed"
exit 1
fi
packages=(
'ansible>=2.0'
'python-neutronclient'
'python-novaclient'
'netaddr'
)
for package in "${packages[@]}"; do
IFS='>=' read -r -a pkg <<< "$package"
pip freeze | grep -q "${pkg[0]}" || { echo "${pkg[0]} not found. Run: 'sudo pip install ${package}'" >&2 ; exit 1; }
if [[ -n "${pkg[1]}" ]]; then
pip freeze | grep -q "${pkg[1]}" || { echo "${pkg[0]} has incorrect version. Run: 'sudo pip install ${package}'" >&2 ; exit 1; }
fi
done
echo "Launching Instances"
if ansible-playbook -i hosts launch-instances.yml $@; then
#Wait for the instances to boot up
echo "Waiting for instances to boot"
sleep 6
echo "Deploying Docker"
if ansible-playbook -i ${TOP_DIR}/scripts/inventory.py deploy-docker.yml $@; then
echo "Deploying Kubernetes"
ansible-playbook -i ${TOP_DIR}/scripts/inventory.py deploy-kubernetes.yml $@
if { [[ -n "$@" ]] && echo "$@" | grep -q "kube"; } || [[ ! -n "$@" ]]; then
echo "Validating Cluster"
${TOP_DIR}/scripts/validate-cluster.sh
fi
fi
fi
|
naveenjoy/microservices
|
setup.sh
|
Shell
|
apache-2.0
| 1,967 |
#!/bin/bash
#
# Copyright IBM Corp All Rights Reserved
#
# SPDX-License-Identifier: Apache-2.0
#
# This script will orchestrate a sample end-to-end execution of the Hyperledger
# Fabric network.
#
# The end-to-end verification provisions a sample Fabric network consisting of
# two organizations, each maintaining two peers, and a “solo” ordering service.
#
# This verification makes use of two fundamental tools, which are necessary to
# create a functioning transactional network with digital signature validation
# and access control:
#
# * cryptogen - generates the x509 certificates used to identify and
# authenticate the various components in the network.
# * configtxgen - generates the requisite configuration artifacts for orderer
# bootstrap and channel creation.
#
# Each tool consumes a configuration yaml file, within which we specify the topology
# of our network (cryptogen) and the location of our certificates for various
# configuration operations (configtxgen). Once the tools have been successfully run,
# we are able to launch our network. More detail on the tools and the structure of
# the network will be provided later in this document. For now, let's get going...
# prepending $PWD/../bin to PATH to ensure we are picking up the correct binaries
# this may be commented out to resolve installed version of tools if desired
export PATH=${PWD}/../bin:${PWD}:$PATH
export FABRIC_CFG_PATH=${PWD}
# Print the usage message
function printHelp () {
echo "Usage: "
echo " byfn.sh -m up|down|restart|generate [-c <channel name>] [-t <timeout>]"
echo " byfn.sh -h|--help (print this message)"
echo " -m <mode> - one of 'up', 'down', 'restart' or 'generate'"
echo " - 'up' - bring up the network with docker-compose up"
echo " - 'down' - clear the network with docker-compose down"
echo " - 'restart' - restart the network"
echo " - 'generate' - generate required certificates and genesis block"
echo " -c <channel name> - channel name to use (defaults to \"mychannel\")"
echo " -t <timeout> - CLI timeout duration in microseconds (defaults to 10000)"
echo
echo "Typically, one would first generate the required certificates and "
echo "genesis block, then bring up the network. e.g.:"
echo
echo " byfn.sh -m generate -c <channelname>"
echo " byfn.sh -m up -c <channelname>"
echo " byfn.sh -m down -c <channelname>"
echo
echo "Taking all defaults:"
echo " byfn.sh -m generate"
echo " byfn.sh -m up"
echo " byfn.sh -m down"
}
# Ask user for confirmation to proceed
function askProceed () {
read -p "Continue (y/n)? " ans
case "$ans" in
y|Y )
echo "proceeding ..."
;;
n|N )
echo "exiting..."
exit 1
;;
* )
echo "invalid response"
askProceed
;;
esac
}
# Obtain CONTAINER_IDS and remove them
# TODO Might want to make this optional - could clear other containers
function clearContainers () {
CONTAINER_IDS=$(docker ps -aq)
if [ -z "$CONTAINER_IDS" -o "$CONTAINER_IDS" == " " ]; then
echo "---- No containers available for deletion ----"
else
docker rm -f $CONTAINER_IDS
fi
}
# Delete any images that were generated as a part of this setup
# specifically the following images are often left behind:
# TODO list generated image naming patterns
function removeUnwantedImages() {
DOCKER_IMAGE_IDS=$(docker images | grep "dev\|none\|test-vp\|peer[0-9]-" | awk '{print $3}')
if [ -z "$DOCKER_IMAGE_IDS" -o "$DOCKER_IMAGE_IDS" == " " ]; then
echo "---- No images available for deletion ----"
else
docker rmi -f $DOCKER_IMAGE_IDS
fi
}
# Generate the needed certificates, the genesis block and start the network.
function networkUp () {
# generate artifacts if they don't exist
if [ ! -d "crypto-config" ]; then
generateCerts
replacePrivateKey
generateChannelArtifacts
fi
CHANNEL_NAME=$CHANNEL_NAME TIMEOUT=$CLI_TIMEOUT docker-compose -f $COMPOSE_FILE up -d 2>&1
if [ $? -ne 0 ]; then
echo "ERROR !!!! Unable to start network"
docker logs -f cli
exit 1
fi
docker logs -f cli
}
# Tear down running network
function networkDown () {
docker-compose -f $COMPOSE_FILE down
# Don't remove containers, images, etc if restarting
if [ "$MODE" != "restart" ]; then
#Cleanup the chaincode containers
clearContainers
#Cleanup images
removeUnwantedImages
# remove orderer block and other channel configuration transactions and certs
rm -rf channel-artifacts/*.block channel-artifacts/*.tx crypto-config
# remove the docker-compose yaml file that was customized to the example
rm -f docker-compose-e2e.yaml
fi
}
# Using docker-compose-e2e-template.yaml, replace constants with private key file names
# generated by the cryptogen tool and output a docker-compose.yaml specific to this
# configuration
function replacePrivateKey () {
# sed on MacOSX does not support -i flag with a null extension. We will use
# 't' for our back-up's extension and depete it at the end of the function
ARCH=`uname -s | grep Darwin`
if [ "$ARCH" == "Darwin" ]; then
OPTS="-it"
else
OPTS="-i"
fi
# Copy the template to the file that will be modified to add the private key
cp docker-compose-e2e-template.yaml docker-compose-e2e.yaml
# The next steps will replace the template's contents with the
# actual values of the private key file names for the two CAs.
CURRENT_DIR=$PWD
cd crypto-config/peerOrganizations/org1.example.com/ca/
PRIV_KEY=$(ls *_sk)
cd $CURRENT_DIR
sed $OPTS "s/CA1_PRIVATE_KEY/${PRIV_KEY}/g" docker-compose-e2e.yaml
cd crypto-config/peerOrganizations/org2.example.com/ca/
PRIV_KEY=$(ls *_sk)
cd $CURRENT_DIR
sed $OPTS "s/CA2_PRIVATE_KEY/${PRIV_KEY}/g" docker-compose-e2e.yaml
# If MacOSX, remove the temporary backup of the docker-compose file
if [ "$ARCH" == "Darwin" ]; then
rm docker-compose-e2e.yamlt
fi
}
# We will use the cryptogen tool to generate the cryptographic material (x509 certs)
# for our various network entities. The certificates are based on a standard PKI
# implementation where validation is achieved by reaching a common trust anchor.
#
# Cryptogen consumes a file - ``crypto-config.yaml`` - that contains the network
# topology and allows us to generate a library of certificates for both the
# Organizations and the components that belong to those Organizations. Each
# Organization is provisioned a unique root certificate (``ca-cert``), that binds
# specific components (peers and orderers) to that Org. Transactions and communications
# within Fabric are signed by an entity's private key (``keystore``), and then verified
# by means of a public key (``signcerts``). You will notice a "count" variable within
# this file. We use this to specify the number of peers per Organization; in our
# case it's two peers per Org. The rest of this template is extremely
# self-explanatory.
#
# After we run the tool, the certs will be parked in a folder titled ``crypto-config``.
# Generates Org certs using cryptogen tool
function generateCerts (){
which cryptogen
if [ "$?" -ne 0 ]; then
echo "cryptogen tool not found. exiting"
exit 1
fi
echo
echo "##########################################################"
echo "##### Generate certificates using cryptogen tool #########"
echo "##########################################################"
cryptogen generate --config=./crypto-config.yaml
if [ "$?" -ne 0 ]; then
echo "Failed to generate certificates..."
exit 1
fi
echo
}
# The `configtxgen tool is used to create four artifacts: orderer **bootstrap
# block**, fabric **channel configuration transaction**, and two **anchor
# peer transactions** - one for each Peer Org.
#
# The orderer block is the genesis block for the ordering service, and the
# channel transaction file is broadcast to the orderer at channel creation
# time. The anchor peer transactions, as the name might suggest, specify each
# Org's anchor peer on this channel.
#
# Configtxgen consumes a file - ``configtx.yaml`` - that contains the definitions
# for the sample network. There are three members - one Orderer Org (``OrdererOrg``)
# and two Peer Orgs (``Org1`` & ``Org2``) each managing and maintaining two peer nodes.
# This file also specifies a consortium - ``SampleConsortium`` - consisting of our
# two Peer Orgs. Pay specific attention to the "Profiles" section at the top of
# this file. You will notice that we have two unique headers. One for the orderer genesis
# block - ``TwoOrgsOrdererGenesis`` - and one for our channel - ``TwoOrgsChannel``.
# These headers are important, as we will pass them in as arguments when we create
# our artifacts. This file also contains two additional specifications that are worth
# noting. Firstly, we specify the anchor peers for each Peer Org
# (``peer0.org1.example.com`` & ``peer0.org2.example.com``). Secondly, we point to
# the location of the MSP directory for each member, in turn allowing us to store the
# root certificates for each Org in the orderer genesis block. This is a critical
# concept. Now any network entity communicating with the ordering service can have
# its digital signature verified.
#
# This function will generate the crypto material and our four configuration
# artifacts, and subsequently output these files into the ``channel-artifacts``
# folder.
#
# If you receive the following warning, it can be safely ignored:
#
# [bccsp] GetDefault -> WARN 001 Before using BCCSP, please call InitFactories(). Falling back to bootBCCSP.
#
# You can ignore the logs regarding intermediate certs, we are not using them in
# this crypto implementation.
# Generate orderer genesis block, channel configuration transaction and
# anchor peer update transactions
function generateChannelArtifacts() {
which configtxgen
if [ "$?" -ne 0 ]; then
echo "configtxgen tool not found. exiting"
exit 1
fi
echo "##########################################################"
echo "######### Generating Orderer Genesis block ##############"
echo "##########################################################"
# Note: For some unknown reason (at least for now) the block file can't be
# named orderer.genesis.block or the orderer will fail to launch!
configtxgen -profile TwoOrgsOrdererGenesis -outputBlock ./channel-artifacts/genesis.block
if [ "$?" -ne 0 ]; then
echo "Failed to generate orderer genesis block..."
exit 1
fi
echo
echo "#################################################################"
echo "### Generating channel configuration transaction 'channel.tx' ###"
echo "#################################################################"
configtxgen -profile TwoOrgsChannel -outputCreateChannelTx ./channel-artifacts/channel.tx -channelID $CHANNEL_NAME
if [ "$?" -ne 0 ]; then
echo "Failed to generate channel configuration transaction..."
exit 1
fi
echo
echo "#################################################################"
echo "####### Generating anchor peer update for Org1MSP ##########"
echo "#################################################################"
configtxgen -profile TwoOrgsChannel -outputAnchorPeersUpdate ./channel-artifacts/Org1MSPanchors.tx -channelID $CHANNEL_NAME -asOrg Org1MSP
if [ "$?" -ne 0 ]; then
echo "Failed to generate anchor peer update for Org1MSP..."
exit 1
fi
echo
echo "#################################################################"
echo "####### Generating anchor peer update for Org2MSP ##########"
echo "#################################################################"
configtxgen -profile TwoOrgsChannel -outputAnchorPeersUpdate \
./channel-artifacts/Org2MSPanchors.tx -channelID $CHANNEL_NAME -asOrg Org2MSP
if [ "$?" -ne 0 ]; then
echo "Failed to generate anchor peer update for Org2MSP..."
exit 1
fi
echo
}
# Obtain the OS and Architecture string that will be used to select the correct
# native binaries for your platform
OS_ARCH=$(echo "$(uname -s|tr '[:upper:]' '[:lower:]'|sed 's/mingw64_nt.*/windows/')-$(uname -m | sed 's/x86_64/amd64/g')" | awk '{print tolower($0)}')
# timeout duration - the duration the CLI should wait for a response from
# another container before giving up
CLI_TIMEOUT=10000
# channel name defaults to "mychannel"
CHANNEL_NAME="mychannel"
# use this as the default docker-compose yaml definition
COMPOSE_FILE=docker-compose-cli.yaml
# Parse commandline args
while getopts "h?m:c:t:" opt; do
case "$opt" in
h|\?)
printHelp
exit 0
;;
m) MODE=$OPTARG
;;
c) CHANNEL_NAME=$OPTARG
;;
t) CLI_TIMEOUT=$OPTARG
;;
esac
done
# Determine whether starting, stopping, restarting or generating for announce
if [ "$MODE" == "up" ]; then
EXPMODE="Starting"
elif [ "$MODE" == "down" ]; then
EXPMODE="Stopping"
elif [ "$MODE" == "restart" ]; then
EXPMODE="Restarting"
elif [ "$MODE" == "generate" ]; then
EXPMODE="Generating certs and genesis block for"
else
printHelp
exit 1
fi
# Announce what was requested
echo "${EXPMODE} with channel '${CHANNEL_NAME}' and CLI timeout of '${CLI_TIMEOUT}'"
# ask for confirmation to proceed
askProceed
#Create the network using docker compose
if [ "${MODE}" == "up" ]; then
networkUp
elif [ "${MODE}" == "down" ]; then ## Clear the network
networkDown
elif [ "${MODE}" == "generate" ]; then ## Generate Artifacts
generateCerts
replacePrivateKey
generateChannelArtifacts
elif [ "${MODE}" == "restart" ]; then ## Restart the network
networkDown
networkUp
else
printHelp
exit 1
fi
|
eliza60309/fab
|
first-network/byfn.sh
|
Shell
|
apache-2.0
| 13,623 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
export AIRFLOW_CI_SILENT=${AIRFLOW_CI_SILENT:="true"}
export PYTHON_VERSION=${PYTHON_VERSION:-3.6}
# shellcheck source=scripts/ci/_script_init.sh
. "$( dirname "${BASH_SOURCE[0]}" )/_script_init.sh"
function run_pylint_tests() {
FILES=("$@")
if [[ "${#FILES[@]}" == "0" ]]; then
docker run "${AIRFLOW_CONTAINER_EXTRA_DOCKER_FLAGS[@]}" \
--entrypoint "/usr/local/bin/dumb-init" \
--env PYTHONDONTWRITEBYTECODE \
--env AIRFLOW_CI_VERBOSE="${VERBOSE}" \
--env AIRFLOW_CI_SILENT \
--env HOST_USER_ID="$(id -ur)" \
--env HOST_GROUP_ID="$(id -gr)" \
--rm \
"${AIRFLOW_CI_IMAGE}" \
"--" "/opt/airflow/scripts/ci/in_container/run_pylint_tests.sh" \
| tee -a "${OUTPUT_LOG}"
else
docker run "${AIRFLOW_CONTAINER_EXTRA_DOCKER_FLAGS[@]}" \
--entrypoint "/usr/local/bin/dumb-init" \
--env PYTHONDONTWRITEBYTECODE \
--env AIRFLOW_CI_VERBOSE="${VERBOSE}" \
--env AIRFLOW_CI_SILENT \
--env HOST_USER_ID="$(id -ur)" \
--env HOST_GROUP_ID="$(id -gr)" \
--rm \
"${AIRFLOW_CI_IMAGE}" \
"--" "/opt/airflow/scripts/ci/in_container/run_pylint_tests.sh" "${FILES[@]}" \
| tee -a "${OUTPUT_LOG}"
fi
}
rebuild_ci_image_if_needed
if [[ "${#@}" != "0" ]]; then
filter_out_files_from_pylint_todo_list "$@"
if [[ "${#FILTERED_FILES[@]}" == "0" ]]; then
echo "Filtered out all files. Skipping pylint."
else
run_pylint_tests "${FILTERED_FILES[@]}"
fi
else
run_pylint_tests
fi
|
lyft/incubator-airflow
|
scripts/ci/ci_pylint_tests.sh
|
Shell
|
apache-2.0
| 2,467 |
#!/bin/zsh -
#===============================================================================
#
# FILE: nvidia-osd.sh
#
# USAGE: ./nvidia-osd.sh
#
# DESCRIPTION: Shows in OSD some nvidia stats
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Cláudio "Patola" Sampaio (Patola), [email protected]
# ORGANIZATION: MakerLinux
# CREATED: 05/10/2019 14:05:51 -03
# REVISION: ---
#===============================================================================
set -o nounset # Treat unset variables as an error
trap "pkill osdsh; exit 0" TRAP INT ABRT KILL QUIT HUP TERM
/usr/bin/osdsh -p 0 -a 2 -c yellow -o 1 -d 5
while :
do
/usr/bin/osdctl -s "$(/usr/bin/nvidia-smi --query-gpu=fan.speed,temperature.gpu,pstate,power.draw,memory.used,memory.total,utilization.gpu --format=csv,noheader,nounits | read fan temp pstate power mem memtotal gpu ; echo "FAN ${fan%%,} TEMP ${temp%%,}C PST ${pstate%%,} POW ${power%%,}W MEM ${mem%%,}MiB GPU ${gpu%%,}%")"
sleep 5
done
pkill osdsh # just in case
|
Patola/patolascripts
|
nvidia-osd.sh
|
Shell
|
apache-2.0
| 1,113 |
#!/bin/bash
source ./_setenv.sh
rm -vf *log
rm -vf *png
rm -vf osioperf.py*
rm -rvf $JOB_BASE_NAME-$BUILD_NUMBER-*
rm -rvf common.git
|
ldimaggi/fabric8-test
|
perf_tests/osioperf/scripts/workshop-demo/_clean-locally.sh
|
Shell
|
apache-2.0
| 136 |
#!/bin/bash
mocha --harmony-proxies --timeout 100000 bin/test/Test.js #--reporter progress
|
Samsung/mimic
|
scripts/runtests.sh
|
Shell
|
apache-2.0
| 92 |
rm -rf *o a.out core\.* *.so __pycache__ .*exe *.ttrie
|
hexforge/pulp_db
|
src/pulp_db/kds/clean.sh
|
Shell
|
apache-2.0
| 56 |
#!/bin/bash -eu
#
# Copyright 2017 Open GEE Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Pushes databases built by run_tutorial.sh script
set -x
set -e
ASSET_ROOT="/usr/local/google/gevol_test/assets"
echo "Using asset root: $ASSET_ROOT"
/opt/google/bin/geserveradmin --adddb "$ASSET_ROOT/Tutorial/Databases/SFinset_1.kdatabase/gedb.kda/ver001/gedb"
/opt/google/bin/geserveradmin --pushdb "$ASSET_ROOT/Tutorial/Databases/SFinset_1.kdatabase/gedb.kda/ver001/gedb"
/opt/google/bin/geserveradmin --adddb "$ASSET_ROOT/Tutorial/Databases/SFinset_2.kdatabase/gedb.kda/ver001/gedb"
/opt/google/bin/geserveradmin --pushdb "$ASSET_ROOT/Tutorial/Databases/SFinset_2.kdatabase/gedb.kda/ver001/gedb"
/opt/google/bin/geserveradmin --adddb "$ASSET_ROOT/Tutorial/Databases/SFinset_3.kdatabase/gedb.kda/ver001/gedb"
/opt/google/bin/geserveradmin --pushdb "$ASSET_ROOT/Tutorial/Databases/SFinset_3.kdatabase/gedb.kda/ver001/gedb"
/opt/google/bin/geserveradmin --adddb "$ASSET_ROOT/Tutorial/Databases/SFinset_4.kdatabase/gedb.kda/ver001/gedb"
/opt/google/bin/geserveradmin --pushdb "$ASSET_ROOT/Tutorial/Databases/SFinset_4.kdatabase/gedb.kda/ver001/gedb"
/opt/google/bin/geserveradmin --adddb "$ASSET_ROOT/Tutorial/Databases/SFinset_5.kdatabase/gedb.kda/ver001/gedb"
/opt/google/bin/geserveradmin --pushdb "$ASSET_ROOT/Tutorial/Databases/SFinset_5.kdatabase/gedb.kda/ver001/gedb"
|
tst-ahernandez/earthenterprise
|
earth_enterprise/src/fusion/tools/gee_test/publish_historical.sh
|
Shell
|
apache-2.0
| 1,888 |
#!/bin/bash
# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0.
# This script does MPE or MMI or state-level minimum bayes risk (sMBR) training
# of neural nets.
# Begin configuration section.
cmd=run.pl
num_epochs=4 # Number of epochs of training
learning_rate=0.00002
effective_lrate= # If supplied, overrides the learning rate, which gets set to effective_lrate * num_jobs_nnet.
acoustic_scale=0.1 # acoustic scale for MMI/MPFE/SMBR training.
criterion=smbr
boost=0.0 # option relevant for MMI
drop_frames=false # option relevant for MMI
one_silence_class=true # Option relevant for MPE/SMBR
num_jobs_nnet=4 # Number of neural net jobs to run in parallel. Note: this
# will interact with the learning rates (if you decrease
# this, you'll have to decrease the learning rate, and vice
# versa).
samples_per_iter=400000 # measured in frames, not in "examples"
modify_learning_rates=true
last_layer_factor=1.0 # relates to modify-learning-rates
first_layer_factor=1.0 # relates to modify-learning-rates
shuffle_buffer_size=5000 # This "buffer_size" variable controls randomization of the samples
# on each iter. You could set it to 0 or to a large value for complete
# randomization, but this would both consume memory and cause spikes in
# disk I/O. Smaller is easier on disk and memory but less random. It's
# not a huge deal though, as samples are anyway randomized right at the start.
stage=-8
io_opts="-tc 5" # for jobs with a lot of I/O, limits the number running at one time. These don't
num_threads=16 # this is the default but you may want to change it, e.g. to 1 if
# using GPUs.
parallel_opts="-pe smp 16 -l ram_free=1G,mem_free=1G" # by default we use 4 threads; this lets the queue know.
# note: parallel_opts doesn't automatically get adjusted if you adjust num-threads.
transform_dir= # If this is a SAT system, directory for transforms
cleanup=true
transform_dir=
degs_dir=
retroactive=false
online_ivector_dir=
# End configuration section.
echo "$0 $@" # Print the command line for logging
if [ -f path.sh ]; then . ./path.sh; fi
. parse_options.sh || exit 1;
if [ $# != 6 ]; then
echo "Usage: $0 [opts] <data> <lang> <ali-dir> <denlat-dir> <src-model-file> <exp-dir>"
echo " e.g.: $0 data/train data/lang exp/tri3_ali exp/tri4_nnet_denlats exp/tri4/final.mdl exp/tri4_mpe"
echo ""
echo "Main options (for others, see top of script file)"
echo " --config <config-file> # config file containing options"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
echo " --num-epochs <#epochs|4> # Number of epochs of training"
echo " --learning-rate <learning-rate|0.0002> # Learning rate to use"
echo " --effective-lrate <effective-learning-rate> # If supplied, learning rate will be set to"
echo " # this value times num-jobs-nnet."
echo " --num-jobs-nnet <num-jobs|8> # Number of parallel jobs to use for main neural net"
echo " # training (will affect results as well as speed; try 8, 16)"
echo " # Note: if you increase this, you may want to also increase"
echo " # the learning rate."
echo " --num-threads <num-threads|16> # Number of parallel threads per job (will affect results"
echo " # as well as speed; may interact with batch size; if you increase"
echo " # this, you may want to decrease the batch size."
echo " --parallel-opts <opts|\"-pe smp 16 -l ram_free=1G,mem_free=1G\"> # extra options to pass to e.g. queue.pl for processes that"
echo " # use multiple threads... note, you might have to reduce mem_free,ram_free"
echo " # versus your defaults, because it gets multiplied by the -pe smp argument."
echo " --io-opts <opts|\"-tc 10\"> # Options given to e.g. queue.pl for jobs that do a lot of I/O."
echo " --samples-per-iter <#samples|400000> # Number of samples of data to process per iteration, per"
echo " # process."
echo " --stage <stage|-8> # Used to run a partially-completed training process from somewhere in"
echo " # the middle."
echo " --criterion <criterion|smbr> # Training criterion: may be smbr, mmi or mpfe"
echo " --boost <boost|0.0> # Boosting factor for MMI (e.g., 0.1)"
echo " --modify-learning-rates <true,false|false> # If true, modify learning rates to try to equalize relative"
echo " # changes across layers."
echo " --degs-dir <dir|""> # Directory for discriminative examples, e.g. exp/foo/degs"
echo " --drop-frames <true,false|false> # Option that affects MMI training: if true, we exclude gradients from frames"
echo " # where the numerator transition-id is not in the denominator lattice."
echo " --one-silence-class <true,false|false> # Option that affects MPE/SMBR training (will tend to reduce insertions)"
echo " --online-ivector-dir <dir|""> # Directory for online-estimated iVectors, used in the"
echo " # online-neural-net setup."
exit 1;
fi
data=$1
lang=$2
alidir=$3
denlatdir=$4
src_model=$5
dir=$6
extra_files=
[ ! -z $online_ivector_dir ] && \
extra_files="$online_ivector_dir/ivector_period $online_ivector_dir/ivector_online.scp"
# Check some files.
for f in $data/feats.scp $lang/L.fst $alidir/ali.1.gz $alidir/num_jobs $alidir/tree \
$denlatdir/lat.1.gz $denlatdir/num_jobs $src_model $extra_files; do
[ ! -f $f ] && echo "$0: no such file $f" && exit 1;
done
nj=$(cat $alidir/num_jobs) || exit 1; # caution: $nj is the number of
# splits of the denlats and alignments, but
# num_jobs_nnet is the number of nnet training
# jobs we run in parallel.
if ! [ $nj == $(cat $denlatdir/num_jobs) ]; then
echo "Number of jobs mismatch: $nj versus $(cat $denlatdir/num_jobs)"
exit 1;
fi
mkdir -p $dir/log || exit 1;
[ -z "$degs_dir" ] && mkdir -p $dir/degs
sdata=$data/split$nj
utils/split_data.sh $data $nj
# function to remove egs that might be soft links.
remove () { for x in $*; do [ -L $x ] && rm $(readlink -f $x); rm $x; done }
splice_opts=`cat $alidir/splice_opts 2>/dev/null`
silphonelist=`cat $lang/phones/silence.csl` || exit 1;
cmvn_opts=`cat $alidir/cmvn_opts 2>/dev/null`
cp $alidir/splice_opts $dir 2>/dev/null
cp $alidir/cmvn_opts $dir 2>/dev/null
cp $alidir/tree $dir
if [ ! -z "$online_ivector_dir" ]; then
ivector_period=$(cat $online_ivector_dir/ivector_period)
ivector_dim=$(feat-to-dim scp:$online_ivector_dir/ivector_online.scp -) || exit 1;
# the 'const_dim_opt' allows it to write only one iVector per example,
# rather than one per time-index... it has to average over
const_dim_opt="--const-feat-dim=$ivector_dim"
fi
## Set up features.
## Don't support deltas, only LDA or raw (mainly because deltas are less frequently used).
if [ -z $feat_type ]; then
if [ -f $alidir/final.mat ] && [ ! -f $transform_dir/raw_trans.1 ]; then feat_type=lda; else feat_type=raw; fi
fi
echo "$0: feature type is $feat_type"
case $feat_type in
raw) feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- |"
;;
lda)
splice_opts=`cat $alidir/splice_opts 2>/dev/null`
cp $alidir/final.mat $dir
feats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |"
;;
*) echo "$0: invalid feature type $feat_type" && exit 1;
esac
if [ -z "$transform_dir" ]; then
if [ -f $transform_dir/trans.1 ] || [ -f $transform_dir/raw_trans.1 ]; then
transform_dir=$alidir
fi
fi
if [ ! -z "$transform_dir" ]; then
echo "$0: using transforms from $transform_dir"
[ ! -s $transform_dir/num_jobs ] && \
echo "$0: expected $transform_dir/num_jobs to contain the number of jobs." && exit 1;
nj_orig=$(cat $transform_dir/num_jobs)
if [ $feat_type == "raw" ]; then trans=raw_trans;
else trans=trans; fi
if [ $feat_type == "lda" ] && ! cmp $transform_dir/final.mat $alidir/final.mat; then
echo "$0: LDA transforms differ between $alidir and $transform_dir"
exit 1;
fi
if [ ! -f $transform_dir/$trans.1 ]; then
echo "$0: expected $transform_dir/$trans.1 to exist (--transform-dir option)"
exit 1;
fi
if [ $nj -ne $nj_orig ]; then
# Copy the transforms into an archive with an index.
for n in $(seq $nj_orig); do cat $transform_dir/$trans.$n; done | \
copy-feats ark:- ark,scp:$dir/$trans.ark,$dir/$trans.scp || exit 1;
feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk scp:$dir/$trans.scp ark:- ark:- |"
else
# number of jobs matches with alignment dir.
feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$transform_dir/$trans.JOB ark:- ark:- |"
fi
fi
if [ ! -z $online_ivector_dir ]; then
# add iVectors to the features.
feats="$feats paste-feats --length-tolerance=$ivector_period ark:- 'ark,s,cs:utils/filter_scp.pl $sdata/JOB/utt2spk $online_ivector_dir/ivector_online.scp | subsample-feats --n=-$ivector_period scp:- ark:- |' ark:- |"
fi
if [ -z "$degs_dir" ]; then
if [ $stage -le -8 ]; then
echo "$0: working out number of frames of training data"
num_frames=$(steps/nnet2/get_num_frames.sh $data)
echo $num_frames > $dir/num_frames
# Working out number of iterations per epoch.
iters_per_epoch=`perl -e "print int($num_frames/($samples_per_iter * $num_jobs_nnet) + 0.5);"` || exit 1;
[ $iters_per_epoch -eq 0 ] && iters_per_epoch=1
echo $iters_per_epoch > $dir/degs/iters_per_epoch || exit 1;
else
num_frames=$(cat $dir/num_frames) || exit 1;
iters_per_epoch=$(cat $dir/degs/iters_per_epoch) || exit 1;
fi
samples_per_iter_real=$[$num_frames/($num_jobs_nnet*$iters_per_epoch)]
echo "$0: Every epoch, splitting the data up into $iters_per_epoch iterations,"
echo "$0: giving samples-per-iteration of $samples_per_iter_real (you requested $samples_per_iter)."
else
iters_per_epoch=$(cat $degs_dir/iters_per_epoch) || exit 1;
[ -z "$iters_per_epoch" ] && exit 1;
echo "$0: Every epoch, splitting the data up into $iters_per_epoch iterations"
fi
# we create these data links regardless of the stage, as there are situations where we
# would want to recreate a data link that had previously been deleted.
if [ -z "$degs_dir" ] && [ -d $dir/degs/storage ]; then
echo "$0: creating data links for distributed storage of degs"
# See utils/create_split_dir.pl for how this 'storage' directory
# is created.
for x in $(seq $num_jobs_nnet); do
for y in $(seq $nj); do
utils/create_data_link.pl $dir/degs/degs_orig.$x.$y.ark
done
for z in $(seq 0 $[$iters_per_epoch-1]); do
utils/create_data_link.pl $dir/degs/degs_tmp.$x.$z.ark
utils/create_data_link.pl $dir/degs/degs.$x.$z.ark
done
done
fi
if [ $stage -le -7 ]; then
echo "$0: Copying initial model and modifying preconditioning setup"
# Note, the baseline model probably had preconditioning, and we'll keep it;
# but we want online preconditioning with a larger number of samples of
# history, since in this setup the frames are only randomized at the segment
# level so they are highly correlated. It might make sense to tune this a
# little, later on, although I doubt it matters once the --num-samples-history
# is large enough.
if [ ! -z "$effective_lrate" ]; then
learning_rate=$(perl -e "print ($num_jobs_nnet*$effective_lrate);")
echo "$0: setting learning rate to $learning_rate = --num-jobs-nnet * --effective-lrate."
fi
$cmd $dir/log/convert.log \
nnet-am-copy --learning-rate=$learning_rate "$src_model" - \| \
nnet-am-switch-preconditioning --num-samples-history=50000 - $dir/0.mdl || exit 1;
fi
if [ $stage -le -6 ] && [ -z "$degs_dir" ]; then
echo "$0: getting initial training examples by splitting lattices"
egs_list=
for n in `seq 1 $num_jobs_nnet`; do
egs_list="$egs_list ark:$dir/degs/degs_orig.$n.JOB.ark"
done
$cmd $io_opts JOB=1:$nj $dir/log/get_egs.JOB.log \
nnet-get-egs-discriminative --criterion=$criterion --drop-frames=$drop_frames \
$dir/0.mdl "$feats" \
"ark,s,cs:gunzip -c $alidir/ali.JOB.gz |" \
"ark,s,cs:gunzip -c $denlatdir/lat.JOB.gz|" ark:- \| \
nnet-copy-egs-discriminative $const_dim_opt ark:- $egs_list || exit 1;
fi
if [ $stage -le -5 ] && [ -z "$degs_dir" ]; then
echo "$0: rearranging examples into parts for different parallel jobs"
# combine all the "egs_orig.JOB.*.scp" (over the $nj splits of the data) and
# then split into multiple parts egs.JOB.*.scp for different parts of the
# data, 0 .. $iters_per_epoch-1.
if [ $iters_per_epoch -eq 1 ]; then
echo "Since iters-per-epoch == 1, just concatenating the data."
for n in `seq 1 $num_jobs_nnet`; do
cat $dir/degs/degs_orig.$n.*.ark > $dir/degs/degs_tmp.$n.0.ark || exit 1;
remove $dir/degs/degs_orig.$n.*.ark # don't "|| exit 1", due to NFS bugs...
done
else # We'll have to split it up using nnet-copy-egs.
egs_list=
for n in `seq 0 $[$iters_per_epoch-1]`; do
egs_list="$egs_list ark:$dir/degs/degs_tmp.JOB.$n.ark"
done
$cmd $io_opts JOB=1:$num_jobs_nnet $dir/log/split_egs.JOB.log \
nnet-copy-egs-discriminative --srand=JOB \
"ark:cat $dir/degs/degs_orig.JOB.*.ark|" $egs_list || exit 1;
remove $dir/degs/degs_orig.*.*.ark
fi
fi
if [ $stage -le -4 ] && [ -z "$degs_dir" ]; then
# Next, shuffle the order of the examples in each of those files.
# Each one should not be too large, so we can do this in memory.
# Then combine the examples together to form suitable-size minibatches
# (for discriminative examples, it's one example per minibatch, so we
# have to combine the lattices).
echo "Shuffling the order of training examples"
echo "(in order to avoid stressing the disk, these won't all run at once)."
# note, the "|| true" below is a workaround for NFS bugs
# we encountered running this script with Debian-7, NFS-v4.
# Also, we should note that we used to do nnet-combine-egs-discriminative
# at this stage, but if iVectors are used this would expand the size of
# the examples on disk (because they could no longer be stored in the spk_info
# variable of the discrminative example, no longer being constant), so
# now we do the nnet-combine-egs-discriminative operation on the fly during
# training.
for n in `seq 0 $[$iters_per_epoch-1]`; do
$cmd $io_opts JOB=1:$num_jobs_nnet $dir/log/shuffle.$n.JOB.log \
nnet-shuffle-egs-discriminative "--srand=\$[JOB+($num_jobs_nnet*$n)]" \
ark:$dir/degs/degs_tmp.JOB.$n.ark ark:$dir/degs/degs.JOB.$n.ark || exit 1;
remove $dir/degs/degs_tmp.*.$n.ark
done
fi
if [ -z "$degs_dir" ]; then
degs_dir=$dir/degs
fi
num_iters=$[$num_epochs * $iters_per_epoch];
echo "$0: Will train for $num_epochs epochs = $num_iters iterations"
if [ $num_threads -eq 1 ]; then
train_suffix="-simple" # this enables us to use GPU code if
# we have just one thread.
else
train_suffix="-parallel --num-threads=$num_threads"
fi
x=0
while [ $x -lt $num_iters ]; do
if [ $stage -le $x ]; then
echo "Training neural net (pass $x)"
$cmd $parallel_opts JOB=1:$num_jobs_nnet $dir/log/train.$x.JOB.log \
nnet-train-discriminative$train_suffix --silence-phones=$silphonelist \
--criterion=$criterion --drop-frames=$drop_frames \
--one-silence-class=$one_silence_class --boost=$boost \
--acoustic-scale=$acoustic_scale $dir/$x.mdl \
"ark:nnet-combine-egs-discriminative ark:$degs_dir/degs.JOB.$[$x%$iters_per_epoch].ark ark:- |" \
$dir/$[$x+1].JOB.mdl \
|| exit 1;
nnets_list=$(for n in $(seq $num_jobs_nnet); do echo $dir/$[$x+1].$n.mdl; done)
$cmd $dir/log/average.$x.log \
nnet-am-average $nnets_list $dir/$[$x+1].mdl || exit 1;
if $modify_learning_rates; then
$cmd $dir/log/modify_learning_rates.$x.log \
nnet-modify-learning-rates --retroactive=$retroactive \
--last-layer-factor=$last_layer_factor \
--first-layer-factor=$first_layer_factor \
$dir/$x.mdl $dir/$[$x+1].mdl $dir/$[$x+1].mdl || exit 1;
fi
rm $nnets_list
fi
x=$[$x+1]
done
rm $dir/final.mdl 2>/dev/null
ln -s $x.mdl $dir/final.mdl
echo Done
if $cleanup; then
echo Cleaning up data
echo Removing training examples
if [ -d $dir/degs ] && [ ! -L $dir/degs ]; then # only remove if directory is not a soft link.
remove $dir/degs/degs.*
fi
echo Removing most of the models
for x in `seq 0 $num_iters`; do
if [ $[$x%$iters_per_epoch] -ne 0 ]; then
# delete all but the epoch-final models.
rm $dir/$x.mdl 2>/dev/null
fi
done
fi
for n in $(seq 0 $num_epochs); do
x=$[$n*$iters_per_epoch]
rm $dir/epoch$n.mdl 2>/dev/null
ln -s $x.mdl $dir/epoch$n.mdl
done
|
thorsonlinguistics/german-neutralization
|
steps/nnet2/train_discriminative.sh
|
Shell
|
apache-2.0
| 17,976 |
#!/bin/bash
# Copyright (C) 2017 SUSE LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
## --->
# Project-specific options and functions. In *theory* you shouldn't need to
# touch anything else in this script in order to use this elsewhere.
: "${LIBSECCOMP_VERSION:=2.5.2}"
project="runc"
root="$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..")"
# shellcheck source=./script/lib.sh
source "$root/script/lib.sh"
# This function takes an output path as an argument, where the built
# (preferably static) binary should be placed.
# Parameters:
# $1 -- destination directory to place build artefacts to.
# $2 -- native architecture (a .suffix for a native binary file name).
# $@ -- additional architectures to cross-build for.
function build_project() {
local builddir
builddir="$(dirname "$1")"
shift
local native_arch="$1"
shift
local arches=("$@")
# Assume that if /opt/libseccomp exists, then we are run
# via Dockerfile, and seccomp is already built.
local seccompdir=/opt/libseccomp temp_dir
if [ ! -d "$seccompdir" ]; then
temp_dir="$(mktemp -d)"
seccompdir="$temp_dir"
# Download and build libseccomp.
"$root/script/seccomp.sh" "$LIBSECCOMP_VERSION" "$seccompdir" "${arches[@]}"
fi
# For reproducible builds, add these to EXTRA_LDFLAGS:
# -w to disable DWARF generation;
# -s to disable symbol table;
# -buildid= to remove variable build id.
local ldflags="-w -s -buildid="
# Add -a to go build flags to make sure it links against
# the provided libseccomp, not the system one (otherwise
# it can reuse cached pkg-config results).
local make_args=(COMMIT_NO= EXTRA_FLAGS="-a" EXTRA_LDFLAGS="${ldflags}" static)
# Build natively.
make -C "$root" \
PKG_CONFIG_PATH="$seccompdir/lib/pkgconfig" \
"${make_args[@]}"
strip "$root/$project"
# Sanity check: make sure libseccomp version is as expected.
local ver
ver=$("$root/$project" --version | awk '$1 == "libseccomp:" {print $2}')
if [ "$ver" != "$LIBSECCOMP_VERSION" ]; then
echo >&2 "libseccomp version mismatch: want $LIBSECCOMP_VERSION, got $ver"
exit 1
fi
mv "$root/$project" "$builddir/$project.$native_arch"
# Cross-build for for other architectures.
local arch
for arch in "${arches[@]}"; do
set_cross_vars "$arch"
make -C "$root" \
PKG_CONFIG_PATH="$seccompdir/$arch/lib/pkgconfig" \
"${make_args[@]}"
"$STRIP" "$root/$project"
mv "$root/$project" "$builddir/$project.$arch"
done
# Copy libseccomp source tarball.
cp "$seccompdir"/src/* "$builddir"
# Clean up.
if [ -n "$tempdir" ]; then
rm -rf "$tempdir"
fi
}
# End of the easy-to-configure portion.
## <---
# Print usage information.
function usage() {
echo "usage: release.sh [-S <gpg-key-id>] [-c <commit-ish>] [-r <release-dir>] [-v <version>] [-a <cross-arch>]" >&2
exit 1
}
# Log something to stderr.
function log() {
echo "[*] $*" >&2
}
# Log something to stderr and then exit with 0.
function bail() {
log "$@"
exit 0
}
# Conduct a sanity-check to make sure that GPG provided with the given
# arguments can sign something. Inability to sign things is not a fatal error.
function gpg_cansign() {
gpg "$@" --clear-sign </dev/null >/dev/null
}
# When creating releases we need to build static binaries, an archive of the
# current commit, and generate detached signatures for both.
keyid=""
commit="HEAD"
version=""
releasedir=""
hashcmd=""
declare -a add_arches
while getopts "S:c:r:v:h:a:" opt; do
case "$opt" in
S)
keyid="$OPTARG"
;;
c)
commit="$OPTARG"
;;
r)
releasedir="$OPTARG"
;;
v)
version="$OPTARG"
;;
h)
hashcmd="$OPTARG"
;;
a)
add_arches+=("$OPTARG")
;;
:)
echo "Missing argument: -$OPTARG" >&2
usage
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage
;;
esac
done
version="${version:-$(<"$root/VERSION")}"
releasedir="${releasedir:-release/$version}"
hashcmd="${hashcmd:-sha256sum}"
native_arch="$(go env GOARCH || echo "amd64")"
# Suffixes of files to checksum/sign.
suffixes=("$native_arch" "${add_arches[@]}" tar.xz)
log "creating $project release in '$releasedir'"
log " version: $version"
log " commit: $commit"
log " key: ${keyid:-DEFAULT}"
log " hash: $hashcmd"
# Make explicit what we're doing.
set -x
# Make the release directory.
rm -rf "$releasedir" && mkdir -p "$releasedir"
# Build project.
build_project "$releasedir/$project" "$native_arch" "${add_arches[@]}"
# Generate new archive.
git archive --format=tar --prefix="$project-$version/" "$commit" | xz >"$releasedir/$project.tar.xz"
# Generate sha256 checksums for binaries and libseccomp tarball.
(
cd "$releasedir"
# Add $project. prefix to all suffixes.
"$hashcmd" "${suffixes[@]/#/$project.}" >"$project.$hashcmd"
)
# Set up the gpgflags.
gpgflags=()
[[ "$keyid" ]] && gpgflags=(--default-key "$keyid")
gpg_cansign "${gpgflags[@]}" || bail "Could not find suitable GPG key, skipping signing step."
# Sign everything.
for sfx in "${suffixes[@]}"; do
gpg "${gpgflags[@]}" --detach-sign --armor "$releasedir/$project.$sfx"
done
gpg "${gpgflags[@]}" --clear-sign --armor \
--output "$releasedir/$project.$hashcmd"{.tmp,} &&
mv "$releasedir/$project.$hashcmd"{.tmp,}
|
rhatdan/runc
|
script/release.sh
|
Shell
|
apache-2.0
| 5,643 |
#!/bin/sh
selenium() {
export DISPLAY=:0
java -Dwebdriver.chrome.driver=/usr/lib/chromium-browser/chromedriver -jar selenium-html-runner-3.5.3.jar -htmlSuite "*googlechrome" "http://localhost:8080/vivo/" "suites/$1.html" "/work/reports/$1/"
}
selenese() {
export DISPLAY=:0
java -jar selenese-runner.jar --width 1920 --height 1080 --chromedriver /usr/lib/chromium-browser/chromedriver --baseurl "http://localhost:8080/vivo/" --html-result "/work/reports/$1/" --screenshot-on-fail "/work/reports/$1/" "suites/$1.html"
# java -jar selenese-runner.jar --chromedriver /usr/lib/chromium-browser/chromedriver --baseurl "http://localhost:8080/vivo/" --html-result "/work/reports/$1/" --screenshot-all "/work/reports/$1/" "suites/$1.html"
}
cd ~/provision
./clear_reports.sh
./reset_vivo_db.sh
./tomcat.sh
selenese InitialSetup
./reset_vivo_db.sh
selenese AddNonPersonThings
./reset_vivo_db.sh
selenese AddPublications
./reset_vivo_db.sh
selenese AddRoles
./reset_vivo_db.sh
selenese AdminSecurity
./reset_vivo_db.sh
selenese CheckMapOfScience
./reset_vivo_db.sh
selenese CheckVisualizations
./reset_vivo_db.sh
selenese CreateAccounts
./reset_vivo_db.sh
selenese CreateClasses
./reset_vivo_db.sh
selenese CreatePeople
./reset_vivo_db.sh
selenese CreateProperties
./reset_vivo_db.sh
selenese CustomAdvisesForm
./reset_vivo_db.sh
selenese CustomAwardsForm
./reset_vivo_db.sh
selenese CustomEducationForm
./reset_vivo_db.sh
selenese CustomPositionsForm
./reset_vivo_db.sh
selenese LanguageSupport
./reset_vivo_db.sh
selenese OntologyManagement
./reset_vivo_db.sh
selenese ProcessRDFData
./reset_vivo_db.sh
selenese ProxyEditing
./reset_vivo_db.sh
selenese SearchBoost
./reset_vivo_db.sh
selenese SearchTextDiscovery
./reset_vivo_db.sh
selenese SelfEditing
#./reset_vivo_db.sh
#selenese ShortViews
#./reset_vivo_db.sh
#selenese SparqlQueryApi
#./reset_vivo_db.sh
#selenese SparqlUpdateApi
#./reset_vivo_db.sh
#selenese LinkedOpenData
#./reset_vivo_db.sh
#selenese SearchExclusion
|
grahamtriggs/acceptance-tests
|
provision/selenium.sh
|
Shell
|
apache-2.0
| 1,986 |
/home/team1/seokwoo/storm/bin/storm $*
|
dke-knu/i2am
|
sh-dir/stormCmd.sh
|
Shell
|
apache-2.0
| 39 |
#!/bin/bash
# Set pipefail to get status code of xcodebuild if it fails
set -v -o pipefail
# Test
xcodebuild -enableCodeCoverage YES -project SwiftGraph.xcodeproj -scheme SwiftGraph test | xcpretty
# xcodebuild -enableCodeCoverage NO -project SwiftGraph.xcodeproj -scheme SwiftGraphPerformanceTests test | xcpretty
|
davecom/SwiftGraph
|
BuildScripts/osx_script.sh
|
Shell
|
apache-2.0
| 315 |
#!/bin/bash
set -ex
rm -rf example.git checkout terraform.tfstate terraform.tfstate.backup
mkdir example.git
cd example.git
git init
touch .exists
git add .exists
git commit -m"Initial commit"
git checkout -b move_HEAD
cd ..
terraform apply
terraform apply
cd checkout
git fetch
# We did do a commit
git log origin/master | grep 'Created by terraform gitfile_commit'
if [ ! -L terraform ]; then
exit 1
fi
if [ "$(readlink terraform)" != "/etc/passwd" ]; then
exit 1
fi
|
Yelp/terraform-provider-gitfile
|
test/test_symlink/test.sh
|
Shell
|
apache-2.0
| 477 |
#!/bin/sh
exec /usr/bin/spawn-fcgi -n -P /run/fcgiwrap.pid -F '1' -s '/run/fcgiwrap.socket' -u 'nginx' -U 'nginx' -g 'nginx' -G 'nginx' -- /usr/bin/fcgiwrap -f
|
amazeeio/lagoon
|
local-dev/git/service/fcgiwrap.sh
|
Shell
|
apache-2.0
| 160 |
# Prerequs
# - development happens on a branch != master, changes are commited
# - ssh keys set up for communication with nodes
# - devstack runs under the stack user
# - script resides in the git repo
# list of hostnames/ips that should be updated
# nodes=( hostname1 hostname2 192.168.0.1 )
nodes=( )
# list of devstack services to restart
# services=( q-svc q-agt n-cpu )
services=( )
# get the current local branch to push to the remote nodes
branch=$(git rev-parse --abbrev-ref HEAD)
for node in "${nodes[@]}"; do
echo "*** Processing node $node"
ssh stack@$node 'cd /opt/stack/neutron; git checkout master'
git push $node $branch --force
ssh stack@$node 'cd /opt/stack/neutron; git checkout '"$branch"
for service in "${services[@]}"; do
echo "** restarting service $service"
ssh stack@$node "screen -X -p $service stuff '^C'"
ssh stack@$node "screen -X -p $service stuff '!!'"
#ssh stack@$node "screen -X -p $service stuff '^[[A''"
ssh stack@$node "screen -X -p $service stuff '\r'"
done
done
|
scheuran/openstack-tooling
|
development/update_devstack_nodes.sh
|
Shell
|
apache-2.0
| 1,074 |
#!/bin/bash
# Copyright 2017 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
set -x
. $(dirname $0)/../common.sh
rm -rf $CORPUS fuzz-*.log
mkdir $CORPUS
[ -e $EXECUTABLE_NAME_BASE ] && ./$EXECUTABLE_NAME_BASE -artifact_prefix=$CORPUS/ -jobs=$JOBS -workers=$JOBS $LIBFUZZER_FLAGS $CORPUS $SCRIPT_DIR/seeds
grep 'ERROR: AddressSanitizer: heap-buffer-overflow' fuzz-0.log || exit 1
|
google/fuzzer-test-suite
|
lcms-2017-03-21/test-libfuzzer.sh
|
Shell
|
apache-2.0
| 432 |
#!/bin/sh
################################################################################
##
## Licensed to the Apache Software Foundation (ASF) under one or more
## contributor license agreements. See the NOTICE file distributed with
## this work for additional information regarding copyright ownership.
## The ASF licenses this file to You under the Apache License, Version 2.0
## (the "License"); you may not use this file except in compliance with
## the License. You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
################################################################################
set -e
# Merges the release branch to the master branch.
# Uses the version id from gradle.properties to identify the branch.
# Prompts before taking actions.
#
# Run from the root of the release management git clone.
. `dirname $0`/common.sh
setUsage "`basename $0`"
handleHelp "$@"
noExtraArgs "$@"
checkEdgentSourceRootGitDie
checkUsingMgmtCloneWarn || confirm "Proceed using this clone?" || exit
VER=`getEdgentVer gradle`
RELEASE_BRANCH=`getReleaseBranch $VER`
(set -x; git checkout -q master)
(set -x; git status)
confirm "Proceed to refresh the local master branch prior to merging?" || exit
(set -x; git pull origin master)
echo
echo "If you proceed to merge and there are conflicts you will need to"
echo "fix the conflicts and then commit the merge and push:"
echo " git status # see the conflicts"
echo " ... fix the conflicts
echo " git commit -m \"merged ${RELEASE_BRANCH}\""
echo " git push origin master"
echo "If you choose not to proceed you may run this script again later."
confirm "Proceed to (no-commit) merge branch ${RELEASE_BRANCH} to master?" || exit
(set -x; git merge --no-commit --no-ff ${RELEASE_BRANCH})
echo
echo "If you choose not to proceed you will need to manually complete the"
echo "merge and push:"
echo " git commit -m \"merged ${RELEASE_BRANCH}\""
echo " git push origin master"
confirm "Proceed to commit the merge and push?" || exit
(set -x; git commit -m "merged ${RELEASE_BRANCH}")
(set -x; git push origin master)
|
vdogaru/incubator-quarks
|
buildTools/merge_release.sh
|
Shell
|
apache-2.0
| 2,476 |
#!/bin/bash
# Copyright 2017-2020 The Verible Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Tests verible-verilog-diff --mode=format
declare -r MY_INPUT_FILE1="${TEST_TMPDIR}/myinput1.txt"
declare -r MY_INPUT_FILE2="${TEST_TMPDIR}/myinput2.txt"
# Get tool from argument
[[ "$#" == 1 ]] || {
echo "Expecting 1 positional argument, verible-verilog-diff path."
exit 1
}
difftool="$(rlocation ${TEST_WORKSPACE}/${1})"
# contains lexical error token "1mm"
cat >${MY_INPUT_FILE1} <<EOF
module 1mm ;endmodule
EOF
cat >${MY_INPUT_FILE2} <<EOF
module
m
;
endmodule
EOF
# Run verible-verilog-diff.
"${difftool}" --mode=format "${MY_INPUT_FILE1}" "${MY_INPUT_FILE2}"
[[ $? -eq 1 ]] || exit 1
# switch files
"${difftool}" --mode=format "${MY_INPUT_FILE2}" "${MY_INPUT_FILE1}"
[[ $? -eq 1 ]] || exit 2
echo "PASS"
|
chipsalliance/verible
|
verilog/tools/diff/diff_format_lex_error_test.sh
|
Shell
|
apache-2.0
| 1,336 |
#!/bin/bash
#
# by Haraguroicha
tmpNumber=`date +%Y%m%d%H%M%S`
function uptimed() {
t=$(uptime | sed -e 's/,//g' | awk '{print $2" "$3" "$4", "$5}')
tup=$(echo $t | grep user)
ttup=$(echo $t | grep mins)
if [ "$tup" = "" ]
then
if [ "$ttup" = "" ]
then
tt=$t
else
tt=$(echo $t | sed -e 's/,//g' | awk '{print $1" "$2" "$3}')
fi
else
tt=$(echo $t | awk '{print $1" "$2}')
fi
echo -e "Uptime:\t\t$tt"
}
function getWIFIStatus() {
echo Wifi Status:
/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport -I | sed -e 's/ //g' | awk '{s=$1;printf("%20s %s\n", s, $2)}' | sed -e 's// /g' | sed -e 's/: 0:/: 00:/g' | sed -e 's/:0:/:00:/g' | sed -e 's/:0:/:00:/g' | sed -e 's/:0/:00/g' | sed -e 's/000/00/g'
}
function deviceInfo() {
echo Devices address:
devs=$(ifconfig -lu)
for dev in $devs
do
devsh=$(echo -e '' | awk '{print "ifconfig \$dev | grep -v tunnel | grep -v ::1 | grep -v 127.0.0.1 | grep inet | grep -v fe80: | sed -e \x27s/inet6/IPv6/g\x27 | sed -e \x27s/inet/IPv4/g\x27 | sed -e \x27s/temporary/temporary temporary/g\x27 | awk \x27{print \"echo ${dev} \"$1\":\x9\"$2\" \"$7}\x27\\n"}')
echo dev=$dev > /tmp/tmp${tmpNumber}.if.sh
echo -e $devsh >> /tmp/tmp${tmpNumber}.if.sh
echo dev=$dev >> /tmp/tmp${tmpNumber}.if.result.sh
chmod +x /tmp/tmp${tmpNumber}.if.sh
chmod +x /tmp/tmp${tmpNumber}.if.result.sh
/tmp/tmp${tmpNumber}.if.sh $dev >> /tmp/tmp${tmpNumber}.if.result.sh
/tmp/tmp${tmpNumber}.if.result.sh | awk '{s=$3;printf("%5s %s %s\n", $1, $2, s)}'
rm /tmp/tmp${tmpNumber}.if.sh
rm /tmp/tmp${tmpNumber}.if.result.sh
done
tundev=`ifconfig -l | sed -e 's/[ ]/\\\\n/g'`
tundev=(`echo -e $tundev | grep -E '^tun0|^gif0'`)
for((i=0; i<${#tundev[@]}; i++)); do
_tunipv6=$(ifconfig ${tundev[$i]} | grep inet6 | grep '>' | awk '{print $2}')
if [ "$_tunipv6" != "" ]
then
if [ "$tunipv6" = "" ]
then
tunipv6=$_tunipv6
ipv6dev=$(echo ${tundev[$i]} | sed -e 's/0//g')
fi
fi
done
if [ "$tunipv6" = "" ]; then
tunipv6="not available"
fi
echo -e "IPv6 $ipv6dev address: ${tunipv6}"
gw6c=`pgrep gw6c`
if [ "${gw6c}" = "" ]; then
gw6c="gw6c is not running"
fi
echo -e " gw6c PID: ${gw6c}"
}
function getIP() {
addr=$( echo -e `curl --connect-timeout 1 --compressed gzip --url http://$1/ip/ 2>/dev/null|sed -e 's/[,]/\\\\t/g'|sed -e 's/callback//g'|sed -e 's/[(){}]//g'`|awk '{print $1}'|sed -e 's/"//g'|sed -e 's/ip://g')
addrIP4=`echo "$addr"|grep "\\."`
addrIP6=`echo "$addr"|grep ":"`
addr="${addrIP4}${addrIP6}"
if [ "${addrIP}" = "" ]; then
echo -e "${addr}"
else
echo ""
fi
}
function getIPv4v6() {
dsColor="\033[0m"
ds=""
dsIP=""
v4IP=""
v6IP=""
v6=""
ipv4=$(getIP "ipv4.test-ipv6.com")
ipv6=$(getIP "ipv6.test-ipv6.com")
ipds=$(getIP "ds.test-ipv6.com")
if [ "$ipv4" != "" ]; then
v4IP="\n IPv4: ${ipv4}"
fi
if [ "$ipv6" != "$ipds" ] && [ "$ipv6" != "" ]; then
ds="\n\t\033[31mYou have IPv6, but your system is avoiding to use.\033[0m"
dsColor="\033[31m"
fi
if [ "$ipv4" = "" ] && [ "$ipv6" != "" ] && [ "$ipds" = "$ipv6" ]; then
ds="\n\t\033[33mYou have IPv6 only.\033[0m"
dsColor="\033[33m"
fi
if [ "$ipv6" != "" ]; then
v6IP="\n IPv6: ${ipv6}"
dsIP="\n DSIP: ${dsColor}${ipds}\033[0m"
fi
cip="${v4IP}${v6IP}${dsIP}${ds}"
if [ "$cip" = "" ]; then
cip="\n\t\033[33mCurrently, you don't have any connection.\033[0m"
fi
echo -e "Current External IP:${cip}"
}
function dnsInfo() {
dnsip=$(echo `cat /etc/resolv.conf|grep nameserver|awk '{print $2}'`)
if [ "$dnsip" != "" ]; then
dns=`echo $dnsip|sed -e 's/ /,\\\\n /g'`
echo -e "DNS Server: ${dns}"
for dip in $dnsip; do
dnsName=`nslookup $dip -timeout=1 | grep 'name = ' | awk '{print $4}'`
if [ "$dnsName" != "" ]; then
dig -t A $dnsName +noall +answer|grep A|grep -v ';'|grep -v '^$'|awk '{print "\t"$1"["$5"]"}'|sed -e 's/\.\[/ [/' > /tmp/dns${tmpNumber}.tmp
dig -t AAAA $dnsName +noall +answer|grep AAAA|grep -v ';'|grep -v '^$'|awk '{print "\t"$1"["$5"]"}'|sed -e 's/\.\[/ [/' >> /tmp/dns${tmpNumber}.tmp
sort /tmp/dns${tmpNumber}.tmp
rm /tmp/dns${tmpNumber}.tmp
else
echo -e "\033[31mWarning: Your dns server of '${dip}' doesn't have reverse record\033[0m"
fi
done
else
echo -e "DNS Server: not available"
fi
}
uptimed;
getWIFIStatus;
deviceInfo;
getIPv4v6;
dnsInfo;
|
CPRTeam/shellTools
|
if.sh
|
Shell
|
apache-2.0
| 4,462 |
#!/bin/env/bash
set -euo pipefail
SOURCE_DIR=$1
NEW_CHECKSUM=$(./falco --list -N | sha256sum | awk '{print $1}')
CUR_CHECKSUM=$(grep FALCO_FIELDS_CHECKSUM "${SOURCE_DIR}/userspace/engine/falco_engine_version.h" | awk '{print $3}' | sed -e 's/"//g')
if [ "$NEW_CHECKSUM" != "$CUR_CHECKSUM" ]; then
echo "Set of fields supported has changed (new checksum $NEW_CHECKSUM != old checksum $CUR_CHECKSUM)."
echo "Update checksum and/or version in falco_engine_version.h."
exit 1
fi
exit 0
|
draios/falco
|
userspace/falco/verify_engine_fields.sh
|
Shell
|
apache-2.0
| 499 |
#!/bin/bash
cd /opt/kylo/setup
wget https://archive.apache.org/dist/activemq/5.15.0/apache-activemq-5.15.0-bin.tar.gz -P ./activemq
# Modify to DEB file if necessary
# wget https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/deb/elasticsearch/2.3.0/elasticsearch-2.3.0.deb -P ./elasticsearch/
wget https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/rpm/elasticsearch/2.3.0/elasticsearch-2.3.0.rpm -P ./elasticsearch/
wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz -P ./java
wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jce/8/jce_policy-8.zip -P ./java
wget https://archive.apache.org/dist/nifi/1.0.0/nifi-1.0.0-bin.tar.gz -P ./nifi
cp /opt/kylo/kylo-services/lib/mariadb-java-client-*.jar ./nifi
tar -cvf kylo-install.tar *
|
peter-gergely-horvath/kylo
|
install/setup/generate-offline-install.sh
|
Shell
|
apache-2.0
| 1,069 |
if [ $1 = '--stats' ]; then
shift
STATS=1
fi
RUN_DIR=~/iSDX
RIBS_DIR=$RUN_DIR/xrs/ribs
TEST_DIR=$1
LOG_FILE=SDXLog.log
set -x
case $2 in
(1)
if [ ! -d $RIBS_DIR ]
then
mkdir $RIBS_DIR
fi
cd $RUN_DIR
sh pctrl/clean.sh
rm -f $LOG_FILE
python logServer.py $LOG_FILE
;;
(2)
# the following gets around issues with vagrant direct mount
cd ~
sudo python $RUN_DIR/examples/$TEST_DIR/mininet/simple_sdx.py
#cd $RUN_DIR/examples/$TEST_DIR/mininext
#sudo ./sdx_mininext.py
;;
(3)
cd $RUN_DIR/flanc
if [ -n "$STATS" ]; then
export GAUGE_CONFIG=$RUN_DIR/examples/$TEST_DIR/config/gauge.conf
STATS_APP=stats/gauge.py
fi
ryu-manager $STATS_APP ryu.app.ofctl_rest refmon.py --refmon-config $RUN_DIR/examples/$TEST_DIR/config/sdx_global.cfg &
sleep 1
cd $RUN_DIR/xctrl
python xctrl.py $RUN_DIR/examples/$TEST_DIR/config/sdx_global.cfg
cd $RUN_DIR/arproxy
sudo python arproxy.py $TEST_DIR &
sleep 1
cd $RUN_DIR/xrs
sudo python route_server.py $TEST_DIR &
sleep 1
cd $RUN_DIR/pctrl
sudo python participant_controller.py $TEST_DIR 1 &
sudo python participant_controller.py $TEST_DIR 2 &
sudo python participant_controller.py $TEST_DIR 3 &
sleep 1
cd $RUN_DIR
exabgp examples/$TEST_DIR/config/bgp.conf
;;
esac
|
h2020-endeavour/iSDX
|
launch.sh
|
Shell
|
apache-2.0
| 1,511 |
#!/usr/bin/env bash
if [ "$TRAVIS_BRANCH" == 'develop' ] || [ "$TRAVIS_BRANCH" == 'master' ]; then
if [ "$TRAVIS_PULL_REQUEST" == 'false' ]; then
mvn deploy -DskipTests=true -P sign --settings .travis/settings.xml
fi
fi
|
intuit/Autumn
|
.travis/deploy.sh
|
Shell
|
apache-2.0
| 229 |
#!/usr/bin/env bash
protoc --proto_path=src/main/proto -I=../proto-actor/src/main/proto --java_out=src/main/java/ src/main/proto/actor/proto/examples/remotebenchmark/*.proto
|
AsynkronIT/protoactor-kotlin
|
examples/build-protos.sh
|
Shell
|
apache-2.0
| 175 |
git config credential.helper 'cache --timeout=9000'
git add .
git commit -m "auto push"
git push
|
stevensia/stevensia.github.io
|
push.sh
|
Shell
|
apache-2.0
| 97 |
source ./lib1.sh
source ./lib2.sh
INFO "Loaded both"
|
panta82/bash-tools
|
sandbox/conditional_fns/main.sh
|
Shell
|
apache-2.0
| 54 |
python ./setup.py bdist_egg
|
nherbaut/jdev2015T6A01
|
worker/build_egg.sh
|
Shell
|
apache-2.0
| 28 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(jbeda): Provide a way to override project
# gcloud multiplexing for shared GCE/GKE tests.
GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-1}
MINION_SIZE=${MINION_SIZE:-n1-standard-1}
NUM_MINIONS=${NUM_MINIONS:-2}
MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB}
MINION_DISK_TYPE=pd-standard
MINION_DISK_SIZE=${MINION_DISK_SIZE:-100GB}
KUBE_APISERVER_REQUEST_TIMEOUT=300
OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20150611}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers}
MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-container-vm-v20150611}
MINION_IMAGE_PROJECT=${KUBE_GCE_MINION_PROJECT:-google-containers}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5}
NETWORK=${KUBE_GCE_NETWORK:-e2e}
INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
MASTER_TAG="${INSTANCE_PREFIX}-master"
MINION_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/logging.write" "https://www.googleapis.com/auth/monitoring")
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL=3
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
ENABLE_DOCKER_REGISTRY_CACHE=true
# Optional: Install node monitoring.
ENABLE_NODE_MONITORING="${KUBE_ENABLE_NODE_MONITORING:-true}"
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# Optional: Enable node logging.
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-elasticsearch}" # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}"
ELASTICSEARCH_LOGGING_REPLICAS=1
# Don't require https for registries in our local RFC1918 network
EXTRA_DOCKER_OPTS="--insecure-registry 10.0.0.0/8"
# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS=true
DNS_SERVER_IP="10.0.0.10"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1
ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
|
michaeltchapman/kubernetes
|
cluster/gce/config-test.sh
|
Shell
|
apache-2.0
| 3,376 |
DIR="$(cd "$(dirname "$0")" && pwd)"
source $DIR/setenv.sh
SCRIPT_NAME=`basename "$0"`
if [ -z "$1" ]
then
$DIR/jra-util/jra-util-print-service-usage-info.sh $SCRIPT_NAME
exit 1;
fi
CLUSTER_NODE_INFO_STRING=$(infra-swarm-info-manager-node.sh info-only)
SWARM_MANAGER_NODE_IP="$(printf "$CLUSTER_NODE_INFO_STRING" | grep PublicIpAddress | cut -d ' ' -f2)"
SWARM_SERVICE_INSPECT_JSON=$(infra-swarm-info-service-inspect.sh $1)
declare -A FIELD_PORT_MAPPINGS
FIELD_DOMAIN="$(echo $SWARM_SERVICE_INSPECT_JSON | jq '.[0].Spec.Labels["jra.domain-name"]' | sed -e 's/^"//' -e 's/"$//')"
FIELD_PUBLIC_IP="$SWARM_MANAGER_NODE_IP"
FIELD_PRIVATE_IP="$(printf "$CLUSTER_NODE_INFO_STRING" | grep PrivateIpAddress | cut -d ' ' -f2)"
FIELD_PRIVATE_DNS_NAME="$(printf "$CLUSTER_NODE_INFO_STRING" | grep PrivateDnsName | cut -d ' ' -f2)"
FIELD_PUBLIC_DNS_NAME="$(printf "$CLUSTER_NODE_INFO_STRING" | grep PublicDnsName | cut -d ' ' -f2)"
FIELD_DOCKER_NAME="$(echo $SWARM_SERVICE_INSPECT_JSON | jq '.[0].Spec.Name' | sed -e 's/^"//' -e 's/"$//')"
FIELD_PUBLIC_IP_URL=""
FIELD_PRIVATE_IP_URL=""
FIELD_PRIVATE_DNS_NAME_URL=""
FIELD_PUBLIC_DNS_NAME_URL=""
FIELD_INTERNAL_DOCKER_NETWORK_URL=""
CONST_DEFAULT_DOMAIN_PORT="80"
FIELD_SCHEME="http"
FIELD_DOMAIN_URL="$FIELD_SCHEME://$FIELD_DOMAIN"
NUM_OF_PORTS=$(echo $SWARM_SERVICE_INSPECT_JSON | jq '.[0].Endpoint.Ports | length')
REPORT_STRING=" domain"
REPORT_STRING="$REPORT_STRING,$FIELD_DOMAIN_URL"
REPORT_STRING="$REPORT_STRING,$FIELD_DOMAIN"
REPORT_STRING="$REPORT_STRING,$CONST_DEFAULT_DOMAIN_PORT"
REPORT_STRING="$REPORT_STRING\n"
for ((i = 0 ; i < $NUM_OF_PORTS ; i++ )); do
PORT_JSON=$(echo $SWARM_SERVICE_INSPECT_JSON | jq ".[0].Endpoint.Ports[$i] | {TargetPort: .TargetPort, PublishedPort: .PublishedPort}")
FIELD_TARGET_PORT="$(echo $PORT_JSON | jq '.TargetPort')"
FIELD_PUBLISH_PORT="$(echo $PORT_JSON | jq '.PublishedPort')"
FIELD_PORT_MAPPINGS[$FIELD_TARGET_PORT]=$FIELD_PUBLISH_PORT
done
ROW_NUM=0
for i in "${!FIELD_PORT_MAPPINGS[@]}"; do
FIELD_TARGET_PORT=$i
FIELD_PUBLISH_PORT="${FIELD_PORT_MAPPINGS[$i]}"
FIELD_URL_HOST_NAME=$FIELD_PUBLIC_IP
FIELD_URL_PORT=$FIELD_PUBLISH_PORT
FIELD_URL="$FIELD_SCHEME://$FIELD_URL_HOST_NAME:$FIELD_URL_PORT"
FIELD_PUBLIC_IP_URL=$FIELD_URL
if [ $ROW_NUM = "0" ]; then
FIELD_URL_TYPE="public-ip"
else
FIELD_URL_TYPE=" -"
fi
REPORT_STRING="$REPORT_STRING $FIELD_URL_TYPE"
REPORT_STRING="$REPORT_STRING,$FIELD_URL"
REPORT_STRING="$REPORT_STRING,$FIELD_URL_HOST_NAME"
REPORT_STRING="$REPORT_STRING,$FIELD_URL_PORT"
REPORT_STRING="$REPORT_STRING\n"
((ROW_NUM++))
done
REPORT_STRING="$REPORT_STRING\n"
ROW_NUM=0
for i in "${!FIELD_PORT_MAPPINGS[@]}"; do
FIELD_TARGET_PORT=$i
FIELD_PUBLISH_PORT="${FIELD_PORT_MAPPINGS[$i]}"
FIELD_URL_HOST_NAME=$FIELD_PUBLIC_DNS_NAME
FIELD_URL_PORT=$FIELD_PUBLISH_PORT
FIELD_URL="$FIELD_SCHEME://$FIELD_URL_HOST_NAME:$FIELD_URL_PORT"
FIELD_PUBLIC_DNS_NAME_URL=$FIELD_URL
if [ $ROW_NUM = "0" ]; then
FIELD_URL_TYPE="public-dns-name"
else
FIELD_URL_TYPE=" -"
fi
REPORT_STRING="$REPORT_STRING $FIELD_URL_TYPE"
REPORT_STRING="$REPORT_STRING,$FIELD_URL"
REPORT_STRING="$REPORT_STRING,$FIELD_URL_HOST_NAME"
REPORT_STRING="$REPORT_STRING,$FIELD_URL_PORT"
REPORT_STRING="$REPORT_STRING\n"
((ROW_NUM++))
done
ROW_NUM=0
for i in "${!FIELD_PORT_MAPPINGS[@]}"; do
FIELD_TARGET_PORT=$i
FIELD_PUBLISH_PORT="${FIELD_PORT_MAPPINGS[$i]}"
FIELD_URL_HOST_NAME=$FIELD_PRIVATE_IP
FIELD_URL_PORT=$FIELD_PUBLISH_PORT
FIELD_URL="$FIELD_SCHEME://$FIELD_URL_HOST_NAME:$FIELD_URL_PORT"
FIELD_PRIVATE_IP_URL=$FIELD_URL
if [ $ROW_NUM = "0" ]; then
FIELD_URL_TYPE="private-ip"
else
FIELD_URL_TYPE=" -"
fi
REPORT_STRING="$REPORT_STRING $FIELD_URL_TYPE"
REPORT_STRING="$REPORT_STRING,$FIELD_URL"
REPORT_STRING="$REPORT_STRING,$FIELD_URL_HOST_NAME"
REPORT_STRING="$REPORT_STRING,$FIELD_URL_PORT"
REPORT_STRING="$REPORT_STRING\n"
((ROW_NUM++))
done
REPORT_STRING="$REPORT_STRING\n"
ROW_NUM=0
for i in "${!FIELD_PORT_MAPPINGS[@]}"; do
FIELD_TARGET_PORT=$i
FIELD_PUBLISH_PORT="${FIELD_PORT_MAPPINGS[$i]}"
FIELD_URL_HOST_NAME=$FIELD_PRIVATE_DNS_NAME
FIELD_URL_PORT=$FIELD_PUBLISH_PORT
FIELD_URL="$FIELD_SCHEME://$FIELD_URL_HOST_NAME:$FIELD_URL_PORT"
FIELD_PRIVATE_DNS_NAME_URL=$FIELD_URL
if [ $ROW_NUM = "0" ]; then
FIELD_URL_TYPE="private-dns-name"
else
FIELD_URL_TYPE=" -"
fi
REPORT_STRING="$REPORT_STRING $FIELD_URL_TYPE"
REPORT_STRING="$REPORT_STRING,$FIELD_URL"
REPORT_STRING="$REPORT_STRING,$FIELD_URL_HOST_NAME"
REPORT_STRING="$REPORT_STRING,$FIELD_URL_PORT"
REPORT_STRING="$REPORT_STRING\n"
((ROW_NUM++))
done
ROW_NUM=0
for i in "${!FIELD_PORT_MAPPINGS[@]}"; do
FIELD_TARGET_PORT=$i
FIELD_PUBLISH_PORT="${FIELD_PORT_MAPPINGS[$i]}"
FIELD_URL_HOST_NAME=$FIELD_DOCKER_NAME
FIELD_URL_PORT=$FIELD_TARGET_PORT
FIELD_URL="$FIELD_SCHEME://$FIELD_URL_HOST_NAME:$FIELD_URL_PORT"
FIELD_INTERNAL_DOCKER_NETWORK_URL=$FIELD_URL
if [ $ROW_NUM = "0" ]; then
FIELD_URL_TYPE="internal-container-network"
else
FIELD_URL_TYPE=" -"
fi
REPORT_STRING="$REPORT_STRING $FIELD_URL_TYPE"
REPORT_STRING="$REPORT_STRING,$FIELD_URL"
REPORT_STRING="$REPORT_STRING,$FIELD_URL_HOST_NAME"
REPORT_STRING="$REPORT_STRING,$FIELD_URL_PORT"
REPORT_STRING="$REPORT_STRING\n"
((ROW_NUM++))
done
#printf '%b\n' "SWARM_SERVICE_INSPECT_JSON = \n$SWARM_SERVICE_INSPECT_JSON"
#printf '%b\n' "\n\n<-------CLUSTER_NODE_INFO_STRING: \n\n\"$CLUSTER_NODE_INFO_STRING\"\n\n"
#printf '%b\n' "\n\nFIELD_DOMAIN=$FIELD_DOMAIN" \
#"FIELD_DOMAIN_URL=$FIELD_DOMAIN_URL" \
#"FIELD_PUBLIC_IP=$FIELD_PUBLIC_IP" \
#"FIELD_PUBLIC_IP_URL=$FIELD_PUBLIC_IP_URL" \
#"FIELD_PRIVATE_IP=$FIELD_PRIVATE_IP" \
#"FIELD_PRIVATE_IP_URL=$FIELD_PRIVATE_IP_URL" \
#"FIELD_PUBLIC_DNS_NAME=$FIELD_PUBLIC_DNS_NAME" \
#"FIELD_PUBLIC_DNS_NAME_URL=$FIELD_PUBLIC_DNS_NAME_URL" \
#"FIELD_PRIVATE_DNS_NAME=$FIELD_PRIVATE_DNS_NAME" \
#"FIELD_PRIVATE_DNS_NAME_URL=$FIELD_PRIVATE_DNS_NAME_URL" \
#"FIELD_DOCKER_NAME=$FIELD_DOCKER_NAME" \
#"FIELD_INTERNAL_DOCKER_NETWORK_URL=$FIELD_INTERNAL_DOCKER_NETWORK_URL\n"
#echo +++++++++ 2 = $2
REPORT_STRING=" URL_TYPE,URL,HOST,PORT\n"$REPORT_STRING
if ! [ -z "$2" ]
then
if [ $2 = "domain" ]; then
echo $FIELD_DOMAIN_URL;
exit 0;
elif [ $2 = "public-dns" ]; then
echo $FIELD_PUBLIC_DNS_NAME_URL;
exit 0;
elif [ $2 = "public-ip" ]; then
echo $FIELD_PUBLIC_IP_URL;
exit 0;
elif [ $2 = "private-dns" ]; then
echo $FIELD_PRIVATE_DNS_NAME_URL;
exit 0;
elif [ $2 = "private-ip" ]; then
echo $FIELD_PRIVATE_IP_URL;
exit 0;
elif [ $2 = "internal-container" ]; then
echo $FIELD_INTERNAL_DOCKER_NETWORK_URL;
exit 0;
else
echo $FIELD_DOMAIN_URL;
exit 0;
fi
else
printf '%b\n' ""
printf '%b\n' "$REPORT_STRING" | column -t -s ','
fi
|
joericearchitect/shared-infra
|
scripts/infra-swarm-info-service-urls.sh
|
Shell
|
apache-2.0
| 6,803 |
#!/bin/bash
# Script to download deployment unit from a Maven artifact repository.
releaseRepo=http://mvnrepo.cantara.no/content/repositories/releases
snapshotRepo=http://mvnrepo.cantara.no/content/repositories/snapshots
groupId=net/whydah/identity
artifactId=UserIdentityBackend
V=SNAPSHOT
if [[ $V == *SNAPSHOT* ]]; then
echo Note: If the artifact version contains "SNAPSHOT", the latest snapshot version is downloaded, ignoring the version before SNAPSHOT.
path="$snapshotRepo/$groupId/$artifactId"
version=`curl -s "$path/maven-metadata.xml" | grep "<version>" | sed "s/.*<version>\([^<]*\)<\/version>.*/\1/" | tail -n 1`
echo "Version $version"
build=`curl -s "$path/$version/maven-metadata.xml" | grep '<value>' | head -1 | sed "s/.*<value>\([^<]*\)<\/value>.*/\1/"`
JARFILE="$artifactId-$build.jar"
url="$path/$version/$JARFILE"
else #A specific Release version
path="releaseRepo/$groupId/$artifactId"
url=$path/$V/$artifactId-$V.jar
JARFILE=$artifactId-$V.jar
fi
# Download artifact
echo Downloading $url
wget -O $JARFILE -q -N $url
# Create symlink or replace existing sym link
if [ -h $artifactId.jar ]; then
unlink $artifactId.jar
fi
ln -s $JARFILE $artifactId.jar
# Delete old jar files
jar=$artifactId*.jar
nrOfJarFilesToDelete=`ls $jar -A1t | tail -n +6 | wc -l`
if [[ $nrOfJarFilesToDelete > 0 ]]; then
echo Deleting $nrOfJarFilesToDelete old jar files. Keep the 4 newest + the symlink.
ls $jar -A1t | tail -n +6 | xargs rm -rf
fi
|
Cantara/Whydah
|
config/Docker/uib/uib-all-in-one/toRoot/update-service.sh
|
Shell
|
apache-2.0
| 1,494 |
#!/bin/bash
echo "dummy implementation of git tool was called with $*"
if [ "$1" == "push" ]; then
echo "Push command detected; dump of parameters passed:"
echo "1: $1"
echo "2: $2"
echo "3: $3"
if [ "$2" != '--receive-pack="git receive-pack receive_pack dummy"' ]; then
echo "Receive pack parameter was not part of the parameters to git"
exit 1
else
# we need to remove the parameter from the call, as local receive-packing won't work as expected
echo "running git command with $1 $3 instead"
git "$1" "$3"
exit $?
fi
fi
git "${@}"
exit $?
|
eaglerainbow/gerritautopush
|
test/git_check_for_receivepack.sh
|
Shell
|
apache-2.0
| 591 |
#!/bin/sh
# ----------------------------------------------------------------------------
# Copyright 2001-2006 The Apache Software Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# Copyright (c) 2001-2002 The Apache Software Foundation. All rights
# reserved.
BASEDIR=`dirname $0`/..
BASEDIR=`(cd "$BASEDIR"; pwd)`
# OS specific support. $var _must_ be set to either true or false.
cygwin=false;
darwin=false;
case "`uname`" in
CYGWIN*) cygwin=true ;;
Darwin*) darwin=true
if [ -z "$JAVA_VERSION" ] ; then
JAVA_VERSION="CurrentJDK"
else
echo "Using Java version: $JAVA_VERSION"
fi
if [ -z "$JAVA_HOME" ] ; then
JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/${JAVA_VERSION}/Home
fi
;;
esac
if [ -z "$JAVA_HOME" ] ; then
if [ -r /etc/gentoo-release ] ; then
JAVA_HOME=`java-config --jre-home`
fi
fi
# For Cygwin, ensure paths are in UNIX format before anything is touched
if $cygwin ; then
[ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
[ -n "$CLASSPATH" ] && CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
fi
# If a specific java binary isn't specified search for the standard 'java' binary
if [ -z "$JAVACMD" ] ; then
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD="$JAVA_HOME/jre/sh/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
else
JAVACMD=`which java`
fi
fi
if [ ! -x "$JAVACMD" ] ; then
echo "Error: JAVA_HOME is not defined correctly."
echo " We cannot execute $JAVACMD"
exit 1
fi
if [ -z "$REPO" ]
then
REPO="$BASEDIR"/repo
fi
CLASSPATH=$CLASSPATH_PREFIX:"$BASEDIR"/conf:"$REPO"/log4j/log4j/1.2.15/log4j-1.2.15.jar:"$REPO"/org/apache/zookeeper/zookeeper/3.4.9/zookeeper-3.4.9.jar:"$REPO"/org/slf4j/slf4j-api/1.6.1/slf4j-api-1.6.1.jar:"$REPO"/org/slf4j/slf4j-log4j12/1.6.1/slf4j-log4j12-1.6.1.jar:"$REPO"/jline/jline/0.9.94/jline-0.9.94.jar:"$REPO"/io/netty/netty/3.10.5.Final/netty-3.10.5.Final.jar:"$REPO"/org/codehaus/jackson/jackson-core-asl/1.8.5/jackson-core-asl-1.8.5.jar:"$REPO"/org/codehaus/jackson/jackson-mapper-asl/1.8.5/jackson-mapper-asl-1.8.5.jar:"$REPO"/commons-io/commons-io/1.4/commons-io-1.4.jar:"$REPO"/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:"$REPO"/com/101tec/zkclient/0.5/zkclient-0.5.jar:"$REPO"/org/apache/commons/commons-math/2.1/commons-math-2.1.jar:"$REPO"/commons-codec/commons-codec/1.6/commons-codec-1.6.jar:"$REPO"/com/google/guava/guava/15.0/guava-15.0.jar:"$REPO"/org/yaml/snakeyaml/1.12/snakeyaml-1.12.jar:"$REPO"/org/apache/helix/helix-core/0.6.7/helix-core-0.6.7.jar
EXTRA_JVM_ARGUMENTS="-Xms512m -Xmx512m"
# For Cygwin, switch paths to Windows format before running java
if $cygwin; then
[ -n "$CLASSPATH" ] && CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
[ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --path --windows "$JAVA_HOME"`
[ -n "$HOME" ] && HOME=`cygpath --path --windows "$HOME"`
[ -n "$BASEDIR" ] && BASEDIR=`cygpath --path --windows "$BASEDIR"`
[ -n "$REPO" ] && REPO=`cygpath --path --windows "$REPO"`
fi
exec "$JAVACMD" $JAVA_OPTS \
$EXTRA_JVM_ARGUMENTS \
-classpath "$CLASSPATH" \
-Dapp.name="run-helix-controller" \
-Dapp.pid="$$" \
-Dapp.repo="$REPO" \
-Dbasedir="$BASEDIR" \
org.apache.helix.controller.HelixControllerMain \
"$@"
|
funkygao/gafka
|
cmd/gk/command/template/helix-core-0.6.7/bin/run-helix-controller.sh
|
Shell
|
apache-2.0
| 4,044 |
#!/bin/sh
./gradlew clean
infer run -- ./gradlew build
|
flide/8Vim
|
cicd_scripts/inferAnalysis.sh
|
Shell
|
apache-2.0
| 55 |
#!/usr/bin/env bash
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
if [[ $# != 2 ]]; then
echo "Usage: $(basename "$0") <source-directory> <binary-directory>"
exit 1
fi
readonly SOURCE_DIR="$1"
readonly BINARY_DIR="$2"
# This script is supposed to run inside a Docker container, see
# ci/kokoro/build.sh for the expected setup. The /v directory is a volume
# pointing to a (clean-ish) checkout of google-cloud-cpp:
if [[ -z "${PROJECT_ROOT+x}" ]]; then
readonly PROJECT_ROOT="/v"
fi
source "${PROJECT_ROOT}/ci/colors.sh"
# Run the "bazel build"/"bazel test" cycle inside a Docker image.
# This script is designed to work in the context created by the
# ci/Dockerfile.* build scripts.
echo
echo "${COLOR_YELLOW}Starting docker build $(date) with ${NCPU} cores${COLOR_RESET}"
echo
echo "================================================================"
readonly BAZEL_BIN="/usr/local/bin/bazel"
echo "Using Bazel in ${BAZEL_BIN}"
"${BAZEL_BIN}" version
echo "================================================================"
bazel_args=("--test_output=errors" "--verbose_failures=true" "--keep_going")
if [[ -n "${RUNS_PER_TEST}" ]]; then
bazel_args+=("--runs_per_test=${RUNS_PER_TEST}")
fi
if [[ -n "${BAZEL_CONFIG}" ]]; then
bazel_args+=("--config" "${BAZEL_CONFIG}")
fi
# TODO(#26): Add "bazel test" here.
echo "================================================================"
echo "Compiling all the code, including integration tests $(date)"
echo "================================================================"
"${BAZEL_BIN}" build "${bazel_args[@]}" ...
echo "================================================================"
echo "Compiling and running unit tests $(date)"
echo "================================================================"
"${BAZEL_BIN}" test "${bazel_args[@]}" ...
echo "================================================================"
echo "Build finished at $(date)"
echo "================================================================"
|
googleapis/google-cloud-cpp-bigquery
|
ci/kokoro/docker/build-in-docker-bazel.sh
|
Shell
|
apache-2.0
| 2,561 |
#!/bin/bash
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
M_IP=10.10.1.107
C_IP=10.10.10.107
D_IP=10.10.20.107
#RABBIT_PASS=secrete
PASSWORD=PASS
#ADMIN_TOKEN=ADMIN
#[email protected]
#1.Install the packages:
sudo apt-get install -y openstack-dashboard
#2.Edit the /etc/openstack-dashboard/local_settings.py file and complete the following actions:
sed -i 's/OPENSTACK_HOST = "127.0.0.1"/OPENSTACK_HOST = "'$C_IP'"/g' /etc/openstack-dashboard/local_settings.py
sed -i "s/ALLOWED_HOSTS = '\*'/ALLOWED_HOSTS = \['\*', \]/g" /etc/openstack-dashboard/local_settings.py
sed -i "s/# memcached set CACHES to something like/# memcached set CACHES to something like\n\
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'/g" /etc/openstack-dashboard/local_settings.py
sed -i "s/'LOCATION': '127.0.0.1:11211'/'LOCATION': '$C_IP:11211'/g" /etc/openstack-dashboard/local_settings.py
sed -i "s/http:\/\/%s:5000\/v2.0/http:\/\/%s:5000\/v3/g" /etc/openstack-dashboard/local_settings.py
sed -i 's/#OPENSTACK_API_VERSIONS = {/OPENSTACK_API_VERSIONS = {/g' /etc/openstack-dashboard/local_settings.py
sed -i 's/# "data-processing": 1.1,/"identity": 3,/g' /etc/openstack-dashboard/local_settings.py
sed -i 's/# "identity": 3,/"image": 2,/g' /etc/openstack-dashboard/local_settings.py
sed -i 's/# "volume": 2,/"volume": 2,/g' /etc/openstack-dashboard/local_settings.py
sed -i 's/# "compute": 2,/}/g' /etc/openstack-dashboard/local_settings.py
sed -i "s/#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'default'/OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'default'/g" /etc/openstack-dashboard/local_settings.py
sed -i 's/OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"/OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"/g' /etc/openstack-dashboard/local_settings.py
sed -i "s/'enable_distributed_router': False,/'enable_distributed_router': True,/g" /etc/openstack-dashboard/local_settings.py
# multidomain support
sed -i "s/#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False/OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True/g" /etc/openstack-dashboard/local_settings.py
#•Reload the web server configuration:
service apache2 reload
|
SmartX-Box/OpenStack-Installation
|
dashboard.sh
|
Shell
|
apache-2.0
| 2,159 |
#!/bin/bash
#PYTHONPATH=. pyinstaller --onefile pyportify/views.py
#PYTHONPATH=. pyinstaller --onefile pyportify/copy_all.py
PYTHONPATH=. pyinstaller pyportify.spec
#mv dist/copy_all dist/pyportify-copyall
#cp -R pyportify/static dist/
|
rckclmbr/pyportify
|
make_exe.sh
|
Shell
|
apache-2.0
| 238 |
#!/bin/bash
sudo -u postgres psql $3 -c "SELECT * FROM $1 WHERE $2 ;"
|
project-asap/IReS-Platform
|
resources/unusedAsapLibraryComponents/Maxim/operators/Wind_Filter_Test_Postgres/Postgres_Filter.sh
|
Shell
|
apache-2.0
| 69 |
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
BIGTOP_DEFAULTS_DIR=${BIGTOP_DEFAULTS_DIR-/etc/default}
[ -n "${BIGTOP_DEFAULTS_DIR}" -a -r ${BIGTOP_DEFAULTS_DIR}/hbase ] && . ${BIGTOP_DEFAULTS_DIR}/hbase
# Autodetect JAVA_HOME if not defined
if [ -e /usr/libexec/bigtop-detect-javahome ]; then
. /usr/libexec/bigtop-detect-javahome
elif [ -e /usr/lib/bigtop-utils/bigtop-detect-javahome ]; then
. /usr/lib/bigtop-utils/bigtop-detect-javahome
fi
export HBASE_HOME=${HBASE_HOME:-/usr/hdp/current/hbase-client}
export METRON_VERSION=0.2.0BETA
export METRON_HOME=/usr/metron/$METRON_VERSION
export DM_JAR=metron-maas-service-$METRON_VERSION-uber.jar
CP=$METRON_HOME/lib/$DM_JAR
HADOOP_CLASSPATH=$(echo $CP )
for jar in $(echo $HADOOP_CLASSPATH | sed 's/:/ /g');do
if [ -f $jar ];then
LIBJARS="$jar,$LIBJARS"
fi
done
export HADOOP_CLASSPATH
yarn jar $METRON_HOME/lib/$DM_JAR org.apache.metron.maas.submit.ModelSubmission "$@"
|
charlesporter/incubator-metron
|
metron-analytics/metron-maas-service/src/main/scripts/maas_deploy.sh
|
Shell
|
apache-2.0
| 1,694 |
#!/bin/bash
# Copyright (c) 2016. Zuercher Hochschule fuer Angewandte Wissenschaften
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Author: Piyush Harsh, Martin Skoviera
sudo add-apt-repository ppa:webupd8team/java
sudo apt-get update
### Installing Java ###
sudo apt-get -y install python-software-properties
echo debconf shared/accepted-oracle-license-v1-1 select true | sudo debconf-set-selections
echo debconf shared/accepted-oracle-license-v1-1 seen true | sudo debconf-set-selections
sudo apt-get -y install oracle-java8-installer
export JAVA_HOME="/usr/lib/jvm/java-8-oracle"
cat << EOF | sudo tee -a /etc/environment
JAVA_HOME="/usr/lib/jvm/java-8-oracle"
EOF
sudo apt-get -y install maven
sudo apt-get -y install curl libcurl3 libcurl3-dev
source /etc/environment
sudo -k
|
icclab/cyclops-dashboard
|
install/install_mvn_java.sh
|
Shell
|
apache-2.0
| 1,309 |
#!/usr/bin/env bash
if [ ! -d "$PROTOBUF" ]; then
echo "Please set the PROTOBUF environment variable to your clone of google/protobuf."
exit -1
fi
if [ ! -d "$GOOGLEAPIS" ]; then
echo "Please set the GOOGLEAPIS environment variable to your clone of googleapis/googleapis."
exit -1
fi
PROTOC="protoc --dart_out=grpc:lib/src/generated -I$PROTOBUF/src -I$GOOGLEAPIS"
$PROTOC $GOOGLEAPIS/google/logging/v2/logging.proto
$PROTOC $GOOGLEAPIS/google/logging/v2/log_entry.proto
$PROTOC $GOOGLEAPIS/google/logging/type/log_severity.proto
$PROTOC $GOOGLEAPIS/google/logging/type/http_request.proto
$PROTOC $GOOGLEAPIS/google/api/monitored_resource.proto
$PROTOC $GOOGLEAPIS/google/api/label.proto
$PROTOC $GOOGLEAPIS/google/api/launch_stage.proto
$PROTOC $GOOGLEAPIS/google/rpc/status.proto
$PROTOC $PROTOBUF/src/google/protobuf/any.proto
$PROTOC $PROTOBUF/src/google/protobuf/duration.proto
$PROTOC $PROTOBUF/src/google/protobuf/empty.proto
$PROTOC $PROTOBUF/src/google/protobuf/struct.proto
$PROTOC $PROTOBUF/src/google/protobuf/timestamp.proto
dart format lib/src/generated
|
grpc/grpc-dart
|
example/googleapis/tool/regenerate.sh
|
Shell
|
apache-2.0
| 1,083 |
#! /bin/bash
sudo iptables -A OUTPUT -p tcp --tcp-flags RST RST -j DROP
|
last-login/TCPFuzz
|
setup_iptables.sh
|
Shell
|
bsd-2-clause
| 73 |
#!/bin/bash
## ec2-init.sh
##
## Simple bash script to use during boot for a AWS instance to dynamically set the hostname
## from a value provided as userdata.
##
## Copyright (C) 2015 KISS IT Consulting <http://www.kissitconsulting.com/>
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions
## are met:
##
## 1. Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## 2. Redistributions in binary form must reproduce the above
## copyright notice, this list of conditions and the following
## disclaimer in the documentation and/or other materials
## provided with the distribution.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
## PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
## OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
## NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
## Usage:
## 1. Download to your system somewhere, say /usr/local/bin/ec2-init.sh
## 2. Make it executable: chmod 755 /usr/local/bin/ec2-init.sh
## 3. Add it to /etc/rc.local so it gets run at boot, like so:
## ## Run our custom ec2-init script
## /usr/local/bin/ec2-init.sh
## 4. Set your desired hostname in the EC2 instance user-data field
## 5. Reboot. Include this setup in custom images, pass in your hostname when building instances from the API, things will have friendly names
## Set domain to suit your needs
DOMAIN=kissitconsulting.com
CHECK="^<.*"
## Set our hostname on boot (if we have one)
HOSTNAME=`/usr/bin/curl -s http://169.254.169.254/latest/user-data`
if [[ ! $HOSTNAME =~ $CHECK ]]; then
echo "Setting hostname to $HOSTNAME.$DOMAIN"
hostname $HOSTNAME.$DOMAIN
fi
exit 0
|
kissit/kiss-ops
|
cloud/ec2-init.sh
|
Shell
|
bsd-2-clause
| 2,361 |
#!/bin/sh
# karen was here o u o
{
karentools release "net.angelxwind.preferenceorganizer2"
exit
}
|
angelXwind/PreferenceOrganizer2
|
release-karentools.sh
|
Shell
|
bsd-2-clause
| 102 |
if ! [ -e ~/.nvm/nvm.sh ]; then
curl https://raw.githubusercontent.com/creationix/nvm/v0.17.2/install.sh | bash
fi
source ~/.nvm/nvm.sh
nvm list 0.10 || nvm install -s 0.10
nvm exec 0.10 which grunt || nvm exec 0.10 npm install -g grunt-cli jasmine
nvm list 0.11 || nvm install -s 0.11
nvm exec 0.11 which grunt || nvm exec 0.11 npm install -g grunt-cli jasmine
nvm list 0.12 || nvm install -s 0.12
nvm exec 0.12 which grunt || nvm exec 0.12 npm install -g grunt-cli jasmine
nvm alias default 0.10
|
mross-pivotal/node-gemfire
|
bin/vagrant_node.sh
|
Shell
|
bsd-2-clause
| 504 |
# Install dajaxice and dajax, for easier Ajax coding.
easy_install django-dajaxice
easy_install django-dajax
|
DevangS/CoralNet
|
changes_for_commits/bash/bash_3749eddf33ad6bc92e523cc9eda943823aff46b5.sh
|
Shell
|
bsd-2-clause
| 110 |
#!/bin/bash
pushd $COGU_HOME >> /dev/null
/jupyter_scripts/j2cli.sh
/jupyter_scripts/setup_git_filters.sh
popd
|
CognitiveScale/docker-demo-images
|
fromJupyterDemo/run.sh
|
Shell
|
bsd-3-clause
| 112 |
#!/bin/bash
set -e
go get github.com/dvyukov/go-fuzz/go-fuzz
go get github.com/dvyukov/go-fuzz/go-fuzz-build
go-fuzz-build github.com/stephens2424/php/parser
go-fuzz -bin php-fuzz.zip -workdir testdata/fuzzdir -master ":1234"
|
stephens2424/php
|
fuzz.sh
|
Shell
|
bsd-3-clause
| 229 |
#!/bin/bash
#
# Install the Google Cloud SDK
#
curl -o /tmp/cloud-sdk.tar.gz https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-367.0.0-linux-x86_64.tar.gz
mkdir /tmp/cloud-sdk
tar -xf /tmp/cloud-sdk.tar.gz --directory /tmp/cloud-sdk
cd /tmp/cloud-sdk/google-cloud-sdk
./install.sh -q --additional-components cloud_sql_proxy core gsutil app-engine-python app-engine-python-extras bq
source /tmp/cloud-sdk/google-cloud-sdk/path.bash.inc
gcloud components list
|
all-of-us/raw-data-repository
|
ci/install_google_sdk.sh
|
Shell
|
bsd-3-clause
| 485 |
#!/bin/bash
# TODO: Test config for OSX
# TODO: Determine appropriate architecture (i.g. "args+=(arch=nocona)")
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
if [ -z "$3" ]; then
ARCH=x86-64
else
ARCH=$3
fi
echo Architecture is set to $ARCH
if [ "$1" = "clean" ] || [ "$1" = "build" ]; then
declare -a args=()
SYSTEM_NAME=`uname -s`
args+=(sse=0)
args+=(arch=$ARCH)
if [ "$SYSTEM_NAME" = "Darwin" ]; then
# OSX specific config:
args+=(use_openmesh=1 openmesh_libpath=#/../OpenMesh-2.0/build/Build/lib/OpenMesh openmesh_publiclibs='OpenMeshCore,OpenMeshTools' openmesh_include=#/../OpenMesh-2.0/src)
echo "" # Can't have an empty if block
elif echo "$SYSTEM_NAME" | grep -q "MINGW64_NT"; then
# Windows specific config:
args+=(libs_extra=psapi)
args+=(use_openmesh=1 Werror=0 openmesh_libpath=#/../OpenMesh-2.0/build/Build/lib openmesh_publiclibs='OpenMeshCore,OpenMeshTools' openmesh_include=#/../OpenMesh-2.0/src)
elif echo "$SYSTEM_NAME" | grep -q "MSYS"; then
echo "ERROR: the MSYS shell is not supported. Please use the MinGW-w64 Win64 Shell instead."
exit 1
else
echo "ERROR: Unrecognized or unsupported platform: $SYSTEM_NAME!"
exit 1
fi
args+=(use_python=0 use_libjpeg=0 use_libpng=0 use_openexr=0 use_boost=0)
args+=(shared=1 install=0)
args+=(use_gmp=1 gmp_libpath=#/../mpir/.libs/ gmp_include=#/../mpir/)
if [ "$2" = "debug" ]; then
echo "Building geode for debug only, using $ARCH architecture"
types=debug
elif [ "$2" = "release" ]; then
echo "Building geode for release only, using $ARCH architecture"
types=release
else
echo "Build type was not specified, building geode for both debug and release, using $ARCH architecture"
types="debug release"
fi
for t in $types; do
scons_args="--config=force -j7 prefix=#build/\$arch/\$type type=$t ${args[@]}"
if [ "$1" = "clean" ]; then
echo "Cleaning $t with: $scons_args"
(cd $DIR && scons -c $scons_args) || exit 1
else
echo "Building $t with: $scons_args"
(cd $DIR && scons $scons_args) || exit 1
fi
echo ""
done
else
echo 'Missing or unrecognized metaBuild argument: '\'$1\'
exit 1
fi
|
omco/geode
|
metaBuild.sh
|
Shell
|
bsd-3-clause
| 2,210 |
#! /bin/bash
set -o pipefail
checkLastCmdStatus() {
LAST_STATUS=$?
if [ $LAST_STATUS -ne 0 ] ; then
sudo iptables-restore < $DIR/iptable
echo "exiting abnormally with status $LAST_STATUS - aborting/failing test"
exit $LAST_STATUS
fi
}
checkLastCmdStatusExpectingFailure() {
LAST_STATUS=$?
if [ $LAST_STATUS -eq 0 ] ; then
sudo iptables-restore < $DIR/iptable
echo "expecting wdt failure, but transfer was successful, failing test"
exit 1
fi
}
startNewTransfer() {
$WDTBIN_SERVER -directory $DIR/dst${TEST_COUNT} -start_port=$STARTING_PORT \
-transfer_id=$RECEIVER_ID -protocol_version=$RECEIVER_PROTOCOL_VERSION \
>> $DIR/server${TEST_COUNT}.log 2>&1 &
pidofreceiver=$!
$WDTBIN_CLIENT -directory $SRC_DIR -destination $HOSTNAME \
-start_port=$STARTING_PORT -block_size_mbytes=$BLOCK_SIZE_MBYTES \
-transfer_id=$SENDER_ID -protocol_version=$SENDER_PROTOCOL_VERSION \
|& tee -a $DIR/client${TEST_COUNT}.log &
pidofsender=$!
}
waitForTransferEnd() {
wait $pidofreceiver
checkLastCmdStatus
wait $pidofsender
checkLastCmdStatus
}
waitForTransferEndWithoutCheckingStatus() {
wait $pidofreceiver
wait $pidofsender
}
waitForTransferEndExpectingFailure() {
wait $pidofreceiver
checkLastCmdStatusExpectingFailure
wait $pidofsender
checkLastCmdStatusExpectingFailure
}
killCurrentTransfer() {
kill -9 $pidofsender
kill -9 $pidofreceiver
wait $pidofsender
wait $pidofreceiver
}
usage="
The possible options to this script are
-s sender protocol version
-r receiver protocol version
"
#protocol versions, used to check version verification
#version 0 represents default version
SENDER_PROTOCOL_VERSION=0
RECEIVER_PROTOCOL_VERSION=0
if [ "$1" == "-h" ]; then
echo "$usage"
exit 0
fi
while getopts ":s:r:" opt; do
case $opt in
s) SENDER_PROTOCOL_VERSION="$OPTARG"
;;
r) RECEIVER_PROTOCOL_VERSION="$OPTARG"
;;
h) echo "$usage"
exit
;;
\?) echo "Invalid option -$OPTARG" >&2
;;
esac
done
echo "sender protocol version $SENDER_PROTOCOL_VERSION, receiver protocol \
version $RECEIVER_PROTOCOL_VERSION"
threads=4
STARTING_PORT=22500
ERROR_COUNT=10
#sender and receiver id, used to check transfer-id verification
SENDER_ID="123456"
RECEIVER_ID="123456"
WDTBIN_OPTS="-ipv4 -num_ports=$threads -full_reporting \
-avg_mbytes_per_sec=40 -max_mbytes_per_sec=50 -run_as_daemon=false \
-full_reporting -read_timeout_millis=500 -write_timeout_millis=500 \
-enable_download_resumption -keep_transfer_log=false -treat_fewer_port_as_error"
WDTBIN="_bin/wdt/wdt $WDTBIN_OPTS"
WDTBIN_CLIENT="$WDTBIN -recovery_id=abcdef"
WDTBIN_SERVER=$WDTBIN
BASEDIR=/tmp/wdtTest_$USER
mkdir -p $BASEDIR
DIR=`mktemp -d $BASEDIR/XXXXXX`
echo "Testing in $DIR"
#pkill -x wdt
mkdir -p $DIR/src
cd $DIR/src
for ((i = 0; i < 4; i++))
do
dd if=/dev/urandom of=sample${i} bs=16M count=1
done
# we will generate 1G of random data. 16 files each of size 64mb.
for ((i = 0; i < 16; i++))
do
touch file${i}
for ((j = 0; j < 4; j++))
do
sample=$((RANDOM % 4))
cat sample${sample} >> file${i}
done
done
cd -
SRC_DIR=$DIR/src
BLOCK_SIZE_MBYTES=10
TEST_COUNT=0
# Tests for which there is no need to verify source and destination md5s
TESTS_SKIP_VERIFICATION=()
echo "Testing that connection failure results in failed transfer"
# first create a deep directory structure
# this is done so that directory thread gets aborted before discovering any
# file
CURDIR=`pwd`
cd $DIR
for ((i = 0; i < 100; i++))
do
mkdir d
cd d
done
touch file
cd $CURDIR
# start the sender without starting receiver and set connect retries to 1
_bin/wdt/wdt -directory $DIR/d -destination $HOSTNAME -max_retries 1 \
-start_port $STARTING_PORT -num_ports $threads
checkLastCmdStatusExpectingFailure
TESTS_SKIP_VERIFICATION+=($TEST_COUNT)
TEST_COUNT=$((TEST_COUNT + 1))
echo "Download resumption test(1)"
startNewTransfer
sleep 5
killCurrentTransfer
# rm a file to create an invalid log entry
rm -f $DIR/dst${TEST_COUNT}/file0
startNewTransfer
waitForTransferEnd
TEST_COUNT=$((TEST_COUNT + 1))
echo "Download resumption test(2)"
startNewTransfer
sleep 5
killCurrentTransfer
# change the file size in the receiver side
fallocate -l 70M $DIR/dst${TEST_COUNT}/file0
startNewTransfer
sleep 5
killCurrentTransfer
startNewTransfer
waitForTransferEnd
TEST_COUNT=$((TEST_COUNT + 1))
echo "Download resumption with network error test(3)"
startNewTransfer
sleep 10
killCurrentTransfer
startNewTransfer
for ((i = 1; i <= ERROR_COUNT; i++))
do
sleep 0.3 # sleep for 300ms
port=$((STARTING_PORT + RANDOM % threads))
echo "blocking $port"
sudo iptables-save > $DIR/iptable
if [ $(($i % 2)) -eq 0 ]; then
sudo iptables -A INPUT -p tcp --sport $port -j DROP
else
sudo iptables -A INPUT -p tcp --dport $port -j DROP
fi
sleep 0.7 # sleep for 700ms, read/write timeout is 500ms, so must sleep more
# than that
echo "unblocking $port"
sudo iptables-restore < $DIR/iptable
done
waitForTransferEnd
TEST_COUNT=$((TEST_COUNT + 1))
echo "Download resumption with network error test(4)"
startNewTransfer
for ((i = 1; i <= ERROR_COUNT; i++))
do
sleep 0.3 # sleep for 300ms
port=$((STARTING_PORT + RANDOM % threads))
echo "blocking $port"
sudo iptables-save > $DIR/iptable
if [ $(($i % 2)) -eq 0 ]; then
sudo iptables -A INPUT -p tcp --sport $port -j DROP
else
sudo iptables -A INPUT -p tcp --dport $port -j DROP
fi
sleep 0.7 # sleep for 700ms, read/write timeout is 500ms, so must sleep more
# than that
echo "unblocking $port"
sudo iptables-restore < $DIR/iptable
done
killCurrentTransfer
# change the block size for next transfer
BLOCK_SIZE_MBYTES=8
startNewTransfer
waitForTransferEnd
TEST_COUNT=$((TEST_COUNT + 1))
echo "Download resumption test for append-only file(5)"
# truncate file0
cp $DIR/src/file0 $DIR/file0.bak
truncate -s 10M $DIR/src/file0
startNewTransfer
sleep 5
killCurrentTransfer
# restore file0
mv $DIR/file0.bak $DIR/src/file0
startNewTransfer
sleep 5
killCurrentTransfer
startNewTransfer
waitForTransferEnd
TEST_COUNT=$((TEST_COUNT + 1))
# abort set-up
ABORT_AFTER_SECONDS=5
ABORT_CHECK_INTERVAL_MILLIS=100
ABORT_AFTER_MILLIS=$((ABORT_AFTER_SECONDS * 1000))
EXPECTED_TRANSFER_DURATION_MILLIS=$((ABORT_AFTER_MILLIS + \
ABORT_CHECK_INTERVAL_MILLIS))
# add 500ms overhead. We need this because we can not control timeouts for disk
# writes
EXPECTED_TRANSFER_DURATION_MILLIS=$((EXPECTED_TRANSFER_DURATION_MILLIS + 500))
echo "Abort timing test(1) - Sender side abort"
WDTBIN_CLIENT_OLD=$WDTBIN_CLIENT
WDTBIN_CLIENT+=" -abort_check_interval_millis=$ABORT_CHECK_INTERVAL_MILLIS \
-abort_after_seconds=$ABORT_AFTER_SECONDS"
START_TIME_MILLIS=`date +%s%3N`
startNewTransfer
wait $pidofsender
END_TIME_MILLIS=`date +%s%3N`
wait $pidofreceiver
DURATION=$((END_TIME_MILLIS - START_TIME_MILLIS))
echo "Abort timing test, transfer duration ${DURATION} ms, expected duration \
${EXPECTED_TRANSFER_DURATION_MILLIS} ms."
if (( $DURATION > $EXPECTED_TRANSFER_DURATION_MILLIS \
|| $DURATION < $ABORT_AFTER_MILLIS )); then
echo "Abort timing test failed, exiting"
exit 1
fi
WDTBIN_CLIENT=$WDTBIN_CLIENT_OLD
TESTS_SKIP_VERIFICATION+=($TEST_COUNT)
TEST_COUNT=$((TEST_COUNT + 1))
echo "Abort timing test(2) - Receiver side abort"
WDTBIN_SERVER_OLD=$WDTBIN_SERVER
WDTBIN_SERVER+=" -abort_check_interval_millis=$ABORT_CHECK_INTERVAL_MILLIS \
-abort_after_seconds=$ABORT_AFTER_SECONDS"
START_TIME_MILLIS=`date +%s%3N`
# Block a port to the begining
sudo iptables-save > $DIR/iptable
sudo iptables -A INPUT -p tcp --dport $STARTING_PORT -j DROP
startNewTransfer
wait $pidofreceiver
END_TIME_MILLIS=`date +%s%3N`
wait $pidofsender
DURATION=$((END_TIME_MILLIS - START_TIME_MILLIS))
echo "Abort timing test, transfer duration ${DURATION} ms, expected duration \
${EXPECTED_TRANSFER_DURATION_MILLIS} ms."
sudo iptables-restore < $DIR/iptable
if (( $DURATION > $EXPECTED_TRANSFER_DURATION_MILLIS \
|| $DURATION < $ABORT_AFTER_MILLIS )); then
echo "Abort timing test failed, exiting"
exit 1
fi
WDTBIN_SERVER=$WDTBIN_SERVER_OLD
TESTS_SKIP_VERIFICATION+=($TEST_COUNT)
TEST_COUNT=$((TEST_COUNT + 1))
echo "Transfer-id mismatch test"
SENDER_ID_OLD=$SENDER_ID
SENDER_ID="abcdef"
startNewTransfer
waitForTransferEndExpectingFailure
SENDER_ID=$SENDER_ID_OLD
TESTS_SKIP_VERIFICATION+=($TEST_COUNT)
TEST_COUNT=$((TEST_COUNT + 1))
# create another src directory full of files with the same name and size as the
# actual src directory
mkdir -p $DIR/src1
cd $DIR/src1
for ((i = 0; i < 4; i++))
do
fallocate -l 16M sample${i}
done
for ((i = 0; i < 16; i++))
do
fallocate -l 64M file${i}
done
cd -
# list of tests for which we should compare destination with $DIR/src1
USE_OTHER_SRC_DIR=()
echo "Test hostname mismatch"
# start first transfer
startNewTransfer
sleep 5
killCurrentTransfer
# change src directory and make the trnafser IPV6
SRC_DIR=$DIR/src1
WDTBIN_CLIENT+=" -ipv4=false -ipv6"
WDTBIN_SERVER+=" -ipv4=false -ipv6"
startNewTransfer
waitForTransferEnd
SRC_DIR=$DIR/src
USE_OTHER_SRC_DIR+=($TEST_COUNT)
TEST_COUNT=$((TEST_COUNT + 1))
STATUS=0
(cd $DIR/src ; ( find . -type f -print0 | xargs -0 md5sum | sort ) > ../src.md5s )
(cd $DIR/src1 ; ( find . -type f -print0 | xargs -0 md5sum | sort ) > ../src1.md5s )
for ((i = 0; i < TEST_COUNT; i++))
do
cat $DIR/server${i}.log
if [[ ${TESTS_SKIP_VERIFICATION[*]} =~ $i ]]; then
echo "Skipping verification for test $i"
continue
fi
(cd $DIR/dst${i} ; ( find . -type f -print0 | xargs -0 md5sum | sort ) > \
../dst${i}.md5s )
echo "Verifying correctness for test $((i + 1))"
echo "Should be no diff"
if [[ ${USE_OTHER_SRC_DIR[*]} =~ $i ]]; then
SRC_MD5=src1.md5s
else
SRC_MD5=src.md5s
fi
(cd $DIR; diff -u $SRC_MD5 dst${i}.md5s)
CUR_STATUS=$?
if [ $STATUS -eq 0 ] ; then
STATUS=$CUR_STATUS
fi
# treating PROTOCOL_ERROR as errors
cd $DIR; grep "PROTOCOL_ERROR" server${i}.log > /dev/null && STATUS=1
cd $DIR; grep "PROTOCOL_ERROR" client${i}.log > /dev/null && STATUS=1
done
if [ $STATUS -eq 0 ] ; then
echo "Good run, deleting logs in $DIR"
find $DIR -type d | xargs chmod 755 # cp -r makes lib/locale not writeable somehow
rm -rf $DIR
else
echo "Bad run ($STATUS) - keeping full logs and partial transfer in $DIR"
fi
exit $STATUS
|
1yvT0s/wdt
|
wdt_download_resumption_test.sh
|
Shell
|
bsd-3-clause
| 10,287 |
#!/bin/sh
temp=/tmp/GrowlMail-Installation-Temp
running="$temp/running"
mkdir -p "$temp"
if [ `/usr/bin/pgrep Mail` ]; then
touch $running
fi
echo $running
#####
# We politely asked the user to quit Mail in the installer intro. Now
# we'll request the same a bit more strongly.
####
osascript -e "quit app \"Mail\""
# Delete any old copies of the bundle
rm -rf ~/Library/Mail/Bundles/GrowlMail.mailbundle
rm -rf /Library/Mail/Bundles/GrowlMail.mailbundle
|
rudyrichter/GrowlMail
|
GrowlMail/product/components/scripts/preflight.sh
|
Shell
|
bsd-3-clause
| 464 |
#!/bin/bash
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script assumed to be run in native_client/
if [ "x${OSTYPE}" = "xcygwin" ]; then
cd "$(cygpath "${PWD}")"
fi
if [[ ${PWD} != */native_client ]]; then
echo "ERROR: must be run in native_client!"
exit 1
fi
if [ $# -ne 1 ]; then
echo "USAGE: $0 win/mac/linux"
exit 2
fi
readonly SCRIPT_DIR="$(dirname "$0")"
readonly SCRIPT_DIR_ABS="$(cd "${SCRIPT_DIR}" ; pwd)"
readonly CORE_SDK=core_sdk
readonly CORE_SDK_WORK=core_sdk_work
export TOOLCHAINLOC=sdk
export TOOLCHAINNAME=nacl-sdk
set -x
set -e
set -u
PLATFORM=$1
cd tools
export INSIDE_TOOLCHAIN=1
echo @@@BUILD_STEP clobber_toolchain@@@
rm -rf ../scons-out \
sdk-out \
sdk \
${CORE_SDK} \
${CORE_SDK_WORK} \
../toolchain/${PLATFORM}_x86/nacl_*_newlib \
BUILD/*
echo @@@BUILD_STEP clean_sources@@@
./update_all_repos_to_latest.sh
if [[ "${BUILDBOT_SLAVE_TYPE:-Trybot}" == "Trybot" ]]; then
echo @@@BUILD_STEP setup source@@@
./buildbot_patch-toolchain-tries.sh
fi
echo @@@BUILD_STEP compile_toolchain@@@
mkdir -p ../toolchain/${PLATFORM}_x86/nacl_x86_newlib
make -j8 clean buildbot-build-with-newlib
echo @@@BUILD_STEP build_core_sdk@@@
(
# Use scons to generate the SDK headers and libraries.
cd ..
${NATIVE_PYTHON} scons.py MODE=nacl naclsdk_validate=0 \
nacl_newlib_dir=tools/sdk/nacl-sdk \
DESTINATION_ROOT="tools/${CORE_SDK_WORK}" \
includedir="tools/${CORE_SDK}/x86_64-nacl/include" \
install_headers
${NATIVE_PYTHON} scons.py MODE=nacl naclsdk_validate=0 \
platform=x86-32 \
nacl_newlib_dir=tools/sdk/nacl-sdk \
DESTINATION_ROOT="tools/${CORE_SDK_WORK}" \
libdir="tools/${CORE_SDK}/x86_64-nacl/lib32" \
install_lib
${NATIVE_PYTHON} scons.py MODE=nacl naclsdk_validate=0 \
platform=x86-64 \
nacl_newlib_dir=tools/sdk/nacl-sdk \
DESTINATION_ROOT="tools/${CORE_SDK_WORK}" \
libdir="tools/${CORE_SDK}/x86_64-nacl/lib" \
install_lib
)
echo @@@BUILD_STEP canonicalize timestamps@@@
./canonicalize_timestamps.sh sdk
./canonicalize_timestamps.sh "${CORE_SDK}"
echo @@@BUILD_STEP tar_toolchain@@@
# We don't just use tar's z flag because we want to pass the -n option
# to gzip so that it won't embed a timestamp in the compressed file.
tar cvf - sdk | gzip -n -9 > naclsdk.tgz
tar cvf - "${CORE_SDK}" | gzip -n -9 > core_sdk.tgz
if [[ "${BUILDBOT_SLAVE_TYPE:-Trybot}" != "Trybot" ]]; then
GSD_BUCKET=nativeclient-archive2
UPLOAD_REV=${BUILDBOT_GOT_REVISION}
else
GSD_BUCKET=nativeclient-trybot/packages
UPLOAD_REV=${BUILDBOT_BUILDERNAME}/${BUILDBOT_BUILDNUMBER}
fi
# Upload the toolchain before running the tests, in case the tests
# fail. We do not want a flaky test or a non-toolchain-related bug to
# cause us to lose the toolchain snapshot, especially since this takes
# so long to build on Windows. We can always re-test a toolchain
# snapshot on the trybots.
echo @@@BUILD_STEP archive_build@@@
(
gsutil=../buildbot/gsutil.sh
GS_BASE=gs://${GSD_BUCKET}/toolchain
for destrevision in ${UPLOAD_REV} latest ; do
${gsutil} cp -a public-read \
naclsdk.tgz \
${GS_BASE}/${destrevision}/naclsdk_${PLATFORM}_x86.tgz
${gsutil} cp -a public-read \
core_sdk.tgz \
${GS_BASE}/${destrevision}/core_sdk_${PLATFORM}_x86.tgz
done
)
echo @@@STEP_LINK@download@http://gsdview.appspot.com/${GSD_BUCKET}/toolchain/${UPLOAD_REV}/@@@
if [[ ${PLATFORM} == win ]]; then
GDB_TGZ=gdb_i686_w64_mingw32.tgz
elif [[ ${PLATFORM} == mac ]]; then
GDB_TGZ=gdb_x86_64_apple_darwin.tgz
elif [[ ${PLATFORM} == linux ]]; then
GDB_TGZ=gdb_i686_linux.tgz
else
echo "ERROR, bad platform."
exit 1
fi
echo @@@BUILD_STEP archive_extract_package@@@
${NATIVE_PYTHON} ../build/package_version/package_version.py \
archive --archive-package=${PLATFORM}_x86/nacl_x86_newlib --extract \
--extra-archive ${GDB_TGZ} \
naclsdk.tgz,sdk/nacl-sdk@https://storage.googleapis.com/${GSD_BUCKET}/toolchain/${UPLOAD_REV}/naclsdk_${PLATFORM}_x86.tgz \
core_sdk.tgz,${CORE_SDK}@https://storage.googleapis.com/${GSD_BUCKET}/toolchain/${UPLOAD_REV}/core_sdk_${PLATFORM}_x86.tgz
${NATIVE_PYTHON} ../build/package_version/package_version.py \
archive --archive-package=${PLATFORM}_x86/nacl_x86_newlib_raw --extract \
--extra-archive ${GDB_TGZ} \
naclsdk.tgz,sdk/nacl-sdk@https://storage.googleapis.com/${GSD_BUCKET}/toolchain/${UPLOAD_REV}/naclsdk_${PLATFORM}_x86.tgz
${NATIVE_PYTHON} ../build/package_version/package_version.py \
--cloud-bucket=${GSD_BUCKET} --annotate \
upload --skip-missing \
--upload-package=${PLATFORM}_x86/nacl_x86_newlib --revision=${UPLOAD_REV}
${NATIVE_PYTHON} ../build/package_version/package_version.py \
--cloud-bucket=${GSD_BUCKET} --annotate \
upload --skip-missing \
--upload-package=${PLATFORM}_x86/nacl_x86_newlib_raw --revision=${UPLOAD_REV}
# Before we start testing, put in dummy mock archives so gyp can still untar
# the entire package.
${NATIVE_PYTHON} ../build/package_version/package_version.py fillemptytars \
--fill-package nacl_x86_newlib
cd ..
if [[ ${PLATFORM} == win ]]; then
${NATIVE_PYTHON} buildbot/buildbot_standard.py \
--scons-args='no_gdb_tests=1 nacl_clang=0' \
opt 64 newlib
elif [[ ${PLATFORM} == mac ]]; then
${NATIVE_PYTHON} buildbot/buildbot_standard.py \
--scons-args='no_gdb_tests=1 nacl_clang=0' \
opt 32 newlib
elif [[ ${PLATFORM} == linux ]]; then
${NATIVE_PYTHON} buildbot/buildbot_standard.py \
--scons-args='no_gdb_tests=1 nacl_clang=0' \
opt 32 newlib
else
echo "ERROR, bad platform."
exit 1
fi
# sync_backports is obsolete and should probably be removed.
# if [[ "${DONT_BUILD_COMPATIBLE_TOOLCHAINS:-no}" != "yes" ]]; then
# echo @@@BUILD_STEP sync backports@@@
# rm -rf tools/BACKPORTS/ppapi*
# tools/BACKPORTS/build_backports.sh VERSIONS ${PLATFORM} newlib
# fi
|
yantrabuddhi/nativeclient
|
buildbot/buildbot_toolchain.sh
|
Shell
|
bsd-3-clause
| 5,961 |
#!/bin/sh
# Build unbound distribution tar from the SVN repository.
#
# Copyright (c) 2007, NLnet Labs. All rights reserved.
#
# This software is open source.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the NLNET LABS nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Abort script on unexpected errors.
set -e
# Remember the current working directory.
cwd=`pwd`
# Utility functions.
usage () {
cat >&2 <<EOF
Usage $0: [-h] [-s] [-d SVN_root] [-w ...args...]
Generate a distribution tar file for unbound.
-h This usage information.
-s Build a snapshot distribution file. The current date is
automatically appended to the current unbound version number.
-rc <nr> Build a release candidate, the given string will be added
to the version number
(which will then be unbound-<version>rc<number>)
-d SVN_root Retrieve the unbound source from the specified repository.
Detected from svn working copy if not specified.
-wssl openssl.xx.tar.gz Also build openssl from tarball for windows dist.
-wxp expat.xx.tar.gz Also build expat from tarball for windows dist.
-w64 64bit windows compile.
-w ... Build windows binary dist. last args passed to configure.
EOF
exit 1
}
info () {
echo "$0: info: $1"
}
error () {
echo "$0: error: $1" >&2
exit 1
}
question () {
printf "%s (y/n) " "$*"
read answer
case "$answer" in
[Yy]|[Yy][Ee][Ss])
return 0
;;
*)
return 1
;;
esac
}
# Only use cleanup and error_cleanup after generating the temporary
# working directory.
cleanup () {
info "Deleting temporary working directory."
cd $cwd && rm -rf $temp_dir
}
error_cleanup () {
echo "$0: error: $1" >&2
cleanup
exit 1
}
replace_text () {
(cp "$1" "$1".orig && \
sed -e "s/$2/$3/g" < "$1".orig > "$1" && \
rm "$1".orig) || error_cleanup "Replacement for $1 failed."
}
replace_all () {
info "Updating '$1' with the version number."
replace_text "$1" "@version@" "$version"
info "Updating '$1' with today's date."
replace_text "$1" "@date@" "`date +'%b %e, %Y'`"
}
replace_version () {
local v1=`echo $2 | sed -e 's/^.*\..*\.//'`
local v2=`echo $3 | sed -e 's/^.*\..*\.//'`
replace_text "$1" "VERSION_MICRO\],\[$v1" "VERSION_MICRO\],\[$v2"
}
check_svn_root () {
# Check if SVNROOT is specified.
if [ -z "$SVNROOT" ]; then
if svn info 2>&1 | grep "not a working copy" >/dev/null; then
if test -z "$SVNROOT"; then
error "SVNROOT must be specified (using -d)"
fi
else
eval `svn info | grep 'URL:' | sed -e 's/URL: /url=/' | head -1`
SVNROOT="$url"
fi
fi
}
create_temp_dir () {
# Creating temp directory
info "Creating temporary working directory"
temp_dir=`mktemp -d unbound-dist-XXXXXX`
info "Directory '$temp_dir' created."
cd $temp_dir
}
# pass filename as $1 arg.
# creates file.sha1 and file.sha256
storehash () {
case $OSTYPE in
linux*)
sha=`sha1sum $1 | awk '{ print $1 }'`
sha256=`sha256sum $1 | awk '{ print $1 }'`
;;
freebsd*)
sha=`sha1 $1 | awk '{ print $5 }'`
sha256=`sha256 $1 | awk '{ print $5 }'`
;;
*)
# in case $OSTYPE is gone.
case `uname` in
Linux*)
sha=`sha1sum $1 | awk '{ print $1 }'`
sha256=`sha256sum $1 | awk '{ print $1 }'`
;;
FreeBSD*)
sha=`sha1 $1 | awk '{ print $5 }'`
sha256=`sha256 $1 | awk '{ print $5 }'`
;;
*)
sha=`sha1sum $1 | awk '{ print $1 }'`
sha256=`sha256sum $1 | awk '{ print $1 }'`
;;
esac
;;
esac
echo $sha > $1.sha1
echo $sha256 > $1.sha256
echo "hash of $1.{sha1,sha256}"
echo "sha1 $sha"
echo "sha256 $sha256"
}
SNAPSHOT="no"
RC="no"
DOWIN="no"
W64="no"
WINSSL=""
WINEXPAT=""
# Parse the command line arguments.
while [ "$1" ]; do
case "$1" in
"-h")
usage
;;
"-d")
SVNROOT="$2"
shift
;;
"-s")
SNAPSHOT="yes"
;;
"-wssl")
WINSSL="$2"
shift
;;
"-wxp")
WINEXPAT="$2"
shift
;;
"-w64")
W64="yes"
shift
;;
"-w")
DOWIN="yes"
shift
break
;;
"-rc")
RC="$2"
shift
;;
*)
error "Unrecognized argument -- $1"
;;
esac
shift
done
if [ "$DOWIN" = "yes" ]; then
# detect crosscompile, from Fedora13 at this point.
if test "`uname`" = "Linux"; then
info "Crosscompile windows dist"
cross="yes"
if test "$W64" = "yes"; then
warch="x86_64" # i686 for 32bit, or x86_64 for 64bit
mw64="mingw64" # mingw32 or mingw64
else
warch="i686"
mw64="mingw32"
fi
configure="${mw64}-configure" # mingw32-configure, mingw64-configure
strip="${warch}-w64-mingw32-strip"
makensis="makensis" # from mingw32-nsis package
# flags for crosscompiled dependency libraries
cross_flag=""
check_svn_root
create_temp_dir
# crosscompile openssl for windows.
if test -n "$WINSSL"; then
info "Cross compile $WINSSL"
info "winssl tar unpack"
(cd ..; gzip -cd $WINSSL) | tar xf - || error_cleanup "tar unpack of $WINSSL failed"
sslinstall="`pwd`/sslinstall"
cd openssl-* || error_cleanup "no openssl-X dir in tarball"
# configure for crosscompile, without CAPI because it fails
# cross-compilation and it is not used anyway
# before 1.0.1i need --cross-compile-prefix=i686-w64-mingw32-
if test "$mw64" = "mingw64"; then
sslflags="no-asm -DOPENSSL_NO_CAPIENG mingw64"
else
sslflags="no-asm -DOPENSSL_NO_CAPIENG mingw"
fi
info "winssl: Configure $sslflags"
CC=${warch}-w64-mingw32-gcc AR=${warch}-w64-mingw32-ar RANLIB=${warch}-w64-mingw32-ranlib ./Configure --prefix="$sslinstall" $sslflags || error_cleanup "OpenSSL Configure failed"
info "winssl: make"
make || error_cleanup "OpenSSL crosscompile failed"
# only install sw not docs, which take a long time.
info "winssl: make install_sw"
make install_sw || error_cleanup "OpenSSL install failed"
cross_flag="$cross_flag --with-ssl=$sslinstall"
cd ..
fi
if test -n "$WINEXPAT"; then
info "Cross compile $WINEXPAT"
info "wxp: tar unpack"
(cd ..; gzip -cd $WINEXPAT) | tar xf - || error_cleanup "tar unpack of $WINEXPAT failed"
wxpinstall="`pwd`/wxpinstall"
cd expat-* || error_cleanup "no expat-X dir in tarball"
info "wxp: configure"
$configure --prefix="$wxpinstall" --exec-prefix="$wxpinstall" --bindir="$wxpinstall/bin" --includedir="$wxpinstall/include" --mandir="$wxpinstall/man" --libdir="$wxpinstall/lib" || error_cleanup "libexpat configure failed"
#info "wxp: make"
#make || error_cleanup "libexpat crosscompile failed"
info "wxp: make installlib"
make installlib || error_cleanup "libexpat install failed"
cross_flag="$cross_flag --with-libexpat=$wxpinstall"
cd ..
fi
info "SVNROOT is $SVNROOT"
info "Exporting source from SVN."
svn export "$SVNROOT" unbound || error_cleanup "SVN command failed"
cd unbound || error_cleanup "Unbound not exported correctly from SVN"
# on a re-configure the cache may no longer be valid...
if test -f mingw32-config.cache; then rm mingw32-config.cache; fi
else
cross="no" # mingw and msys
cross_flag=""
configure="./configure"
strip="strip"
makensis="c:/Program Files/NSIS/makensis.exe" # http://nsis.sf.net
fi
# version gets compiled into source, edit the configure to set it
version=`./configure --version | head -1 | awk '{ print $3 }'` \
|| error_cleanup "Cannot determine version number."
if [ "$RC" != "no" -o "$SNAPSHOT" != "no" ]; then
if [ "$RC" != "no" ]; then
version2=`echo $version | sed -e 's/rc.*$//' -e 's/_20.*$//'`
version2=`echo $version2 | sed -e 's/rc.*//'`"rc$RC"
fi
if [ "$SNAPSHOT" != "no" ]; then
version2=`echo $version | sed -e 's/rc.*$//' -e 's/_20.*$//'`
version2="${version2}_`date +%Y%m%d`"
fi
replace_version "configure.ac" "$version" "$version2"
version="$version2"
info "Rebuilding configure script (autoconf) snapshot."
autoconf || error_cleanup "Autoconf failed."
autoheader || error_cleanup "Autoheader failed."
rm -r autom4te* || echo "ignored"
fi
# procedure for making unbound installer on mingw.
info "Creating windows dist unbound $version"
info "Calling configure"
echo "$configure"' --enable-debug --enable-static-exe --disable-flto '"$* $cross_flag"
$configure --enable-debug --enable-static-exe --disable-flto $* $cross_flag \
|| error_cleanup "Could not configure"
info "Calling make"
make || error_cleanup "Could not make"
info "Make complete"
info "Unbound version: $version"
file="unbound-$version.zip"
rm -f $file
info "Creating $file"
mkdir tmp.$$
# keep debug symbols
#$strip unbound.exe
#$strip anchor-update.exe
#$strip unbound-control.exe
#$strip unbound-host.exe
#$strip unbound-anchor.exe
#$strip unbound-checkconf.exe
#$strip unbound-service-install.exe
#$strip unbound-service-remove.exe
cd tmp.$$
cp ../doc/example.conf ../doc/Changelog .
cp ../unbound.exe ../unbound-anchor.exe ../unbound-host.exe ../unbound-control.exe ../unbound-checkconf.exe ../unbound-service-install.exe ../unbound-service-remove.exe ../LICENSE ../winrc/unbound-control-setup.cmd ../winrc/unbound-website.url ../winrc/service.conf ../winrc/README.txt ../contrib/create_unbound_ad_servers.cmd ../contrib/warmup.cmd ../contrib/unbound_cache.cmd .
# zipfile
zip ../$file LICENSE README.txt unbound.exe unbound-anchor.exe unbound-host.exe unbound-control.exe unbound-checkconf.exe unbound-service-install.exe unbound-service-remove.exe unbound-control-setup.cmd example.conf service.conf unbound-website.url create_unbound_ad_servers.cmd warmup.cmd unbound_cache.cmd Changelog
info "Testing $file"
(cd .. ; zip -T $file )
# installer
info "Creating installer"
quadversion=`cat ../config.h | grep RSRC_PACKAGE_VERSION | sed -e 's/#define RSRC_PACKAGE_VERSION //' -e 's/,/\\./g'`
cat ../winrc/setup.nsi | sed -e 's/define VERSION.*$/define VERSION "'$version'"/' -e 's/define QUADVERSION.*$/define QUADVERSION "'$quadversion'"/' > ../winrc/setup_ed.nsi
"$makensis" ../winrc/setup_ed.nsi
info "Created installer"
cd ..
rm -rf tmp.$$
mv winrc/unbound_setup_$version.exe .
if test "$cross" = "yes"; then
mv unbound_setup_$version.exe $cwd/.
mv unbound-$version.zip $cwd/.
cleanup
fi
storehash unbound_setup_$version.exe
storehash unbound-$version.zip
ls -lG unbound_setup_$version.exe
ls -lG unbound-$version.zip
info "Done"
exit 0
fi
check_svn_root
# Start the packaging process.
info "SVNROOT is $SVNROOT"
info "SNAPSHOT is $SNAPSHOT"
#question "Do you wish to continue with these settings?" || error "User abort."
create_temp_dir
info "Exporting source from SVN."
svn export "$SVNROOT" unbound || error_cleanup "SVN command failed"
cd unbound || error_cleanup "Unbound not exported correctly from SVN"
info "Adding libtool utils (libtoolize)."
libtoolize -c --install || libtoolize -c || error_cleanup "Libtoolize failed."
info "Building configure script (autoreconf)."
autoreconf || error_cleanup "Autoconf failed."
rm -r autom4te* || error_cleanup "Failed to remove autoconf cache directory."
info "Building lexer and parser."
echo "#include \"config.h\"" > util/configlexer.c || error_cleanup "Failed to create configlexer"
echo "#include \"util/configyyrename.h\"" >> util/configlexer.c || error_cleanup "Failed to create configlexer"
flex -i -t util/configlexer.lex >> util/configlexer.c || error_cleanup "Failed to create configlexer"
if test -x `which bison` 2>&1; then YACC=bison; else YACC=yacc; fi
$YACC -y -d -o util/configparser.c util/configparser.y || error_cleanup "Failed to create configparser"
find . -name .c-mode-rc.el -exec rm {} \;
find . -name .cvsignore -exec rm {} \;
rm makedist.sh || error_cleanup "Failed to remove makedist.sh."
info "Determining Unbound version."
version=`./configure --version | head -1 | awk '{ print $3 }'` || \
error_cleanup "Cannot determine version number."
info "Unbound version: $version"
RECONFIGURE="no"
if [ "$RC" != "no" ]; then
info "Building Unbound release candidate $RC."
version2="${version}rc$RC"
info "Version number: $version2"
replace_version "configure.ac" "$version" "$version2"
version="$version2"
RECONFIGURE="yes"
fi
if [ "$SNAPSHOT" = "yes" ]; then
info "Building Unbound snapshot."
version2="${version}_`date +%Y%m%d`"
info "Snapshot version number: $version2"
replace_version "configure.ac" "$version" "$version2"
version="$version2"
RECONFIGURE="yes"
fi
if [ "$RECONFIGURE" = "yes" ]; then
info "Rebuilding configure script (autoconf) snapshot."
autoreconf || error_cleanup "Autoconf failed."
rm -r autom4te* || error_cleanup "Failed to remove autoconf cache directory."
fi
replace_all doc/README
replace_all doc/unbound.8.in
replace_all doc/unbound.conf.5.in
replace_all doc/unbound-checkconf.8.in
replace_all doc/unbound-control.8.in
replace_all doc/unbound-anchor.8.in
replace_all doc/unbound-host.1.in
replace_all doc/example.conf.in
replace_all doc/libunbound.3.in
info "Renaming Unbound directory to unbound-$version."
cd ..
mv unbound unbound-$version || error_cleanup "Failed to rename unbound directory."
tarfile="../unbound-$version.tar.gz"
if [ -f $tarfile ]; then
(question "The file $tarfile already exists. Overwrite?" \
&& rm -f $tarfile) || error_cleanup "User abort."
fi
info "Creating tar unbound-$version.tar.gz"
tar czf ../unbound-$version.tar.gz unbound-$version || error_cleanup "Failed to create tar file."
cleanup
storehash unbound-$version.tar.gz
echo "create unbound-$version.tar.gz.asc with:"
echo " gpg --armor --detach-sign unbound-$version.tar.gz"
echo " gpg --armor --detach-sign unbound-$version.zip"
echo " gpg --armor --detach-sign unbound_setup_$version.exe"
info "Unbound distribution created successfully."
|
thozza/unbound
|
makedist.sh
|
Shell
|
bsd-3-clause
| 15,709 |
#!/bin/bash
#Script to run the json builders!
#BE SURE TO RUN `mvn package` BEFORE DOING THIS!
#JSON files will be stored in json/ directory
java -cp target/original-UserAgentUtils-1.16-SNAPSHOT.jar:target/UserAgentUtils-1.16-SNAPSHOT.jar fauxsoup.Extract
|
kyle-neal/user-agent-utils
|
run.sh
|
Shell
|
bsd-3-clause
| 256 |
#!/bin/bash -p
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# usage: keystone_install.sh update_dmg_mount_point
#
# Called by the Keystone system to update the installed application with a new
# version from a disk image.
#
# Environment variables:
# GOOGLE_CHROME_UPDATER_DEBUG
# When set to a non-empty value, additional information about this script's
# actions will be logged to stderr. The same debugging information will
# also be enabled when "Library/Google/Google Chrome Updater Debug" in the
# root directory or in ${HOME} exists.
# GOOGLE_CHROME_UPDATER_TEST_PATH
# When set to a non-empty value, the product at this path will be updated.
# ksadmin will not be consulted to locate the installed product, nor will it
# be called to update any tickets.
#
# Exit codes:
# 0 Happiness
# 1 Unknown failure
# 2 Basic sanity check source failure (e.g. no app on disk image)
# 3 Basic sanity check destination failure (e.g. ticket points to nothing)
# 4 Update driven by user ticket when a system ticket is also present
# 5 Could not prepare existing installed version to receive update
# 6 Patch sanity check failure
# 7 rsync failed (could not copy new versioned directory to Versions)
# 8 rsync failed (could not update outer .app bundle)
# 9 Could not get the version, update URL, or channel after update
# 10 Updated application does not have the version number from the update
# 11 ksadmin failure
# 12 dirpatcher failed for versioned directory
# 13 dirpatcher failed for outer .app bundle
# 14 The update is incompatible with the system (presently unused)
#
# The following exit codes can be used to convey special meaning to Keystone.
# KeystoneRegistration will present these codes to Chrome as "success."
# 66 (unused) success, request reboot
# 77 (unused) try installation again later
set -eu
# http://b/2290916: Keystone runs the installation with a restrictive PATH
# that only includes the directory containing ksadmin, /bin, and /usr/bin. It
# does not include /sbin or /usr/sbin. This script uses lsof, which is in
# /usr/sbin, and it's conceivable that it might want to use other tools in an
# sbin directory. Adjust the path accordingly.
export PATH="${PATH}:/sbin:/usr/sbin"
# Environment sanitization. Clear environment variables that might impact the
# interpreter's operation. The |bash -p| invocation on the #! line takes the
# bite out of BASH_ENV, ENV, and SHELLOPTS (among other features), but
# clearing them here ensures that they won't impact any shell scripts used as
# utility programs. SHELLOPTS is read-only and can't be unset, only
# unexported.
unset BASH_ENV CDPATH ENV GLOBIGNORE IFS POSIXLY_CORRECT
export -n SHELLOPTS
set -o pipefail
shopt -s nullglob
ME="$(basename "${0}")"
readonly ME
readonly KS_CHANNEL_KEY="KSChannelID"
# Workaround for https://crbug.com/83180#c3: in bash 4.0, "declare VAR" no
# longer initializes VAR if not already set. (Apple has never shipped a bash
# newer than 3.2, but a small number of people seem to have replaced their
# system /bin/sh with a newer bash, probably all before SIP became a thing.)
: ${GOOGLE_CHROME_UPDATER_DEBUG:=}
: ${GOOGLE_CHROME_UPDATER_TEST_PATH:=}
err() {
local error="${1}"
local id=
if [[ -n "${GOOGLE_CHROME_UPDATER_DEBUG}" ]]; then
id=": ${$} $(date "+%Y-%m-%d %H:%M:%S %z")"
fi
echo "${ME}${id}: ${error}" >& 2
}
note() {
local message="${1}"
if [[ -n "${GOOGLE_CHROME_UPDATER_DEBUG}" ]]; then
err "${message}"
fi
}
g_temp_dir=
cleanup() {
local status=${?}
trap - EXIT
trap '' HUP INT QUIT TERM
if [[ ${status} -ge 128 ]]; then
err "Caught signal $((${status} - 128))"
fi
if [[ -n "${g_temp_dir}" ]]; then
rm -rf "${g_temp_dir}"
fi
exit ${status}
}
ensure_temp_dir() {
if [[ -z "${g_temp_dir}" ]]; then
# Choose a template that won't be a dot directory. Make it safe by
# removing leading hyphens, too.
local template="${ME}"
if [[ "${template}" =~ ^[-.]+(.*)$ ]]; then
template="${BASH_REMATCH[1]}"
fi
if [[ -z "${template}" ]]; then
template="keystone_install"
fi
g_temp_dir="$(mktemp -d -t "${template}")"
note "g_temp_dir = ${g_temp_dir}"
fi
}
# Returns 0 (true) if |symlink| exists, is a symbolic link, and appears
# writable on the basis of its POSIX permissions. This is used to determine
# writability like test's -w primary, but -w resolves symbolic links and this
# function does not.
is_writable_symlink() {
local symlink="${1}"
local link_mode
link_mode="$(stat -f %Sp "${symlink}" 2> /dev/null || true)"
if [[ -z "${link_mode}" ]] || [[ "${link_mode:0:1}" != "l" ]]; then
return 1
fi
local link_user link_group
link_user="$(stat -f %u "${symlink}" 2> /dev/null || true)"
link_group="$(stat -f %g "${symlink}" 2> /dev/null || true)"
if [[ -z "${link_user}" ]] || [[ -z "${link_group}" ]]; then
return 1
fi
# If the users match, check the owner-write bit.
if [[ ${EUID} -eq "${link_user}" ]]; then
if [[ "${link_mode:2:1}" = "w" ]]; then
return 0
fi
return 1
fi
# If the file's group matches any of the groups that this process is a
# member of, check the group-write bit.
local group_match=
local group
for group in "${GROUPS[@]}"; do
if [[ "${group}" -eq "${link_group}" ]]; then
group_match="y"
break
fi
done
if [[ -n "${group_match}" ]]; then
if [[ "${link_mode:5:1}" = "w" ]]; then
return 0
fi
return 1
fi
# Check the other-write bit.
if [[ "${link_mode:8:1}" = "w" ]]; then
return 0
fi
return 1
}
# If |symlink| exists and is a symbolic link, but is not writable according to
# is_writable_symlink, this function attempts to replace it with a new
# writable symbolic link. If |symlink| does not exist, is not a symbolic
# link, or is already writable, this function does nothing. This function
# always returns 0 (true).
ensure_writable_symlink() {
local symlink="${1}"
if [[ -L "${symlink}" ]] && ! is_writable_symlink "${symlink}"; then
# If ${symlink} refers to a directory, doing this naively might result in
# the new link being placed in that directory, instead of replacing the
# existing link. ln -fhs is supposed to handle this case, but it does so
# by unlinking (removing) the existing symbolic link before creating a new
# one. That leaves a small window during which the symbolic link is not
# present on disk at all.
#
# To avoid that possibility, a new symbolic link is created in a temporary
# location and then swapped into place with mv. An extra temporary
# directory is used to convince mv to replace the symbolic link: again, if
# the existing link refers to a directory, "mv newlink oldlink" will
# actually leave oldlink alone and place newlink into the directory.
# "mv newlink dirname(oldlink)" works as expected, but in order to replace
# oldlink, newlink must have the same basename, hence the temporary
# directory.
local target
target="$(readlink "${symlink}" 2> /dev/null || true)"
if [[ -z "${target}" ]]; then
return 0
fi
# Error handling strategy: if anything fails, such as the mktemp, ln,
# chmod, or mv, ignore the failure and return 0 (success), leaving the
# existing state with the non-writable symbolic link intact. Failures
# in this function will be difficult to understand and diagnose, and a
# non-writable symbolic link is not necessarily fatal. If something else
# requires a writable symbolic link, allowing it to fail when a symbolic
# link is not writable is easier to understand than bailing out of the
# script on failure here.
local symlink_dir temp_link_dir temp_link
symlink_dir="$(dirname "${symlink}")"
temp_link_dir="$(mktemp -d "${symlink_dir}/.symlink_temp.XXXXXX" || true)"
if [[ -z "${temp_link_dir}" ]]; then
return 0
fi
temp_link="${temp_link_dir}/$(basename "${symlink}")"
(ln -fhs "${target}" "${temp_link}" &&
chmod -h 755 "${temp_link}" &&
mv -f "${temp_link}" "${symlink_dir}/") || true
rm -rf "${temp_link_dir}"
fi
return 0
}
# ensure_writable_symlinks_recursive calls ensure_writable_symlink for every
# symbolic link in |directory|, recursively.
#
# In some very weird and rare cases, it is possible to wind up with a user
# installation that contains symbolic links that the user does not have write
# permission over. More on how that might happen later.
#
# If a weird and rare case like this is observed, rsync will exit with an
# error when attempting to update the times on these symbolic links. rsync
# may not be intelligent enough to try creating a new symbolic link in these
# cases, but this script can be.
#
# The problem occurs when an administrative user first drag-installs the
# application to /Applications, resulting in the program's user being set to
# the user's own ID. If, subsequently, a .pkg package is installed over that,
# the existing directory ownership will be preserved, but file ownership will
# be changed to whatever is specified by the package, typically root. This
# applies to symbolic links as well. On a subsequent update, rsync will be
# able to copy the new files into place, because the user still has permission
# to write to the directories. If the symbolic link targets are not changing,
# though, rsync will not replace them, and they will remain owned by root.
# The user will not have permission to update the time on the symbolic links,
# resulting in an rsync error.
ensure_writable_symlinks_recursive() {
local directory="${1}"
# This fix-up is not necessary when running as root, because root will
# always be able to write everything needed.
if [[ ${EUID} -eq 0 ]]; then
return 0
fi
# This step isn't critical.
local set_e=
if [[ "${-}" =~ e ]]; then
set_e="y"
set +e
fi
# Use find -print0 with read -d $'\0' to handle even the weirdest paths.
local symlink
while IFS= read -r -d $'\0' symlink; do
ensure_writable_symlink "${symlink}"
done < <(find "${directory}" -type l -print0)
# Go back to how things were.
if [[ -n "${set_e}" ]]; then
set -e
fi
}
# is_version_ge accepts two version numbers, left and right, and performs a
# piecewise comparison determining the result of left >= right, returning true
# (0) if left >= right, and false (1) if left < right. If left or right are
# missing components relative to the other, the missing components are assumed
# to be 0, such that 10.6 == 10.6.0.
is_version_ge() {
local left="${1}"
local right="${2}"
local -a left_array right_array
IFS=. left_array=(${left})
IFS=. right_array=(${right})
local left_count=${#left_array[@]}
local right_count=${#right_array[@]}
local count=${left_count}
if [[ ${right_count} -lt ${count} ]]; then
count=${right_count}
fi
# Compare the components piecewise, as long as there are corresponding
# components on each side. If left_element and right_element are unequal,
# a comparison can be made.
local index=0
while [[ ${index} -lt ${count} ]]; do
local left_element="${left_array[${index}]}"
local right_element="${right_array[${index}]}"
if [[ ${left_element} -gt ${right_element} ]]; then
return 0
elif [[ ${left_element} -lt ${right_element} ]]; then
return 1
fi
((++index))
done
# If there are more components on the left than on the right, continue
# comparing, assuming 0 for each of the missing components on the right.
while [[ ${index} -lt ${left_count} ]]; do
local left_element="${left_array[${index}]}"
if [[ ${left_element} -gt 0 ]]; then
return 0
fi
((++index))
done
# If there are more components on the right than on the left, continue
# comparing, assuming 0 for each of the missing components on the left.
while [[ ${index} -lt ${right_count} ]]; do
local right_element="${right_array[${index}]}"
if [[ ${right_element} -gt 0 ]]; then
return 1
fi
((++index))
done
# Upon reaching this point, the two version numbers are semantically equal.
return 0
}
# Prints the version of ksadmin, as reported by ksadmin --ksadmin-version, to
# stdout. This function operates with "static" variables: it will only check
# the ksadmin version once per script run. If ksadmin is old enough to not
# support --ksadmin-version, or another error occurs, this function prints an
# empty string.
g_checked_ksadmin_version=
g_ksadmin_version=
ksadmin_version() {
if [[ -z "${g_checked_ksadmin_version}" ]]; then
g_checked_ksadmin_version="y"
if [[ -n "${GOOGLE_CHROME_UPDATER_TEST_PATH}" ]]; then
note "test mode: not calling Keystone, g_ksadmin_version is fake"
# This isn't very special, it's just what happens to be current as this is
# written. It's new enough that all of the feature checks
# (ksadmin_supports_*) pass.
g_ksadmin_version="1.2.13.41"
else
g_ksadmin_version="$(ksadmin --ksadmin-version || true)"
fi
note "g_ksadmin_version = ${g_ksadmin_version}"
fi
echo "${g_ksadmin_version}"
return 0
}
# Compares the installed ksadmin version against a supplied version number,
# |check_version|, and returns 0 (true) if the installed Keystone version is
# greater than or equal to |check_version| according to a piece-wise
# comparison. Returns 1 (false) if the installed Keystone version number
# cannot be determined or if |check_version| is greater than the installed
# Keystone version. |check_version| should be a string of the form
# "major.minor.micro.build".
is_ksadmin_version_ge() {
local check_version="${1}"
local ksadmin_version="$(ksadmin_version)"
is_version_ge "${ksadmin_version}" "${check_version}"
# The return value of is_version_ge is used as this function's return value.
}
# Returns 0 (true) if ksadmin supports --tag.
ksadmin_supports_tag() {
local ksadmin_version
ksadmin_version="$(ksadmin_version)"
if [[ -n "${ksadmin_version}" ]]; then
# A ksadmin that recognizes --ksadmin-version and provides a version
# number is new enough to recognize --tag.
return 0
fi
return 1
}
# Returns 0 (true) if ksadmin supports --tag-path and --tag-key.
ksadmin_supports_tagpath_tagkey() {
# --tag-path and --tag-key were introduced in Keystone 1.0.7.1306.
is_ksadmin_version_ge 1.0.7.1306
# The return value of is_ksadmin_version_ge is used as this function's
# return value.
}
# Returns 0 (true) if ksadmin supports --brand-path and --brand-key.
ksadmin_supports_brandpath_brandkey() {
# --brand-path and --brand-key were introduced in Keystone 1.0.8.1620.
is_ksadmin_version_ge 1.0.8.1620
# The return value of is_ksadmin_version_ge is used as this function's
# return value.
}
# Returns 0 (true) if ksadmin supports --version-path and --version-key.
ksadmin_supports_versionpath_versionkey() {
# --version-path and --version-key were introduced in Keystone 1.0.9.2318.
is_ksadmin_version_ge 1.0.9.2318
# The return value of is_ksadmin_version_ge is used as this function's
# return value.
}
# Runs "defaults read" to obtain the value of a key in a property list. As
# with "defaults read", an absolute path to a plist is supplied, without the
# ".plist" extension.
#
# As of Mac OS X 10.8, defaults (and NSUserDefaults and CFPreferences)
# normally communicates with cfprefsd to read and write plists. Changes to a
# plist file aren't necessarily reflected immediately via this API family when
# not made through this API family, because cfprefsd may return cached data
# from a former on-disk version of a plist file instead of reading the current
# version from disk. The old behavior can be restored by setting the
# __CFPREFERENCES_AVOID_DAEMON environment variable, although extreme care
# should be used because portions of the system that use this API family
# normally and thus use cfprefsd and its cache will become unsynchronized with
# the on-disk state.
#
# This function is provided to set __CFPREFERENCES_AVOID_DAEMON when calling
# "defaults read" and thus avoid cfprefsd and its on-disk cache, and is
# intended only to be used to read values from Info.plist files, which are not
# preferences. The use of "defaults" for this purpose has always been
# questionable, but there's no better option to interact with plists from
# shell scripts. Definitely don't use infoplist_read to read preference
# plists.
#
# This function exists because the update process delivers new copies of
# Info.plist files to the disk behind cfprefsd's back, and if cfprefsd becomes
# aware of the original version of the file for any reason (such as this
# script reading values from it via "defaults read"), the new version of the
# file will not be immediately effective or visible via cfprefsd after the
# update is applied.
infoplist_read() {
__CFPREFERENCES_AVOID_DAEMON=1 defaults read "${@}"
}
# When a patch update fails because the old installed copy doesn't match the
# expected state, mark_failed_patch_update updates the Keystone ticket by
# adding "-full" to the tag. The server will see this on a subsequent update
# attempt and will provide a full update (as opposed to a patch) to the
# client.
#
# Even if mark_failed_patch_update fails to modify the tag, the user will
# eventually be updated. Patch updates are only provided for successive
# releases on a particular channel, to update version o to version o+1. If a
# patch update fails in this case, eventually version o+2 will be released,
# and no patch update will exist to update o to o+2, so the server will
# provide a full update package.
mark_failed_patch_update() {
local product_id="${1}"
local want_full_installer_path="${2}"
local old_ks_plist="${3}"
local old_version_app="${4}"
local system_ticket="${5}"
# This step isn't critical.
local set_e=
if [[ "${-}" =~ e ]]; then
set_e="y"
set +e
fi
note "marking failed patch update"
local channel
channel="$(infoplist_read "${old_ks_plist}" "${KS_CHANNEL_KEY}" 2> /dev/null)"
local tag="${channel}"
local tag_key="${KS_CHANNEL_KEY}"
tag="${tag}-full"
tag_key="${tag_key}-full"
note "tag = ${tag}"
note "tag_key = ${tag_key}"
# ${old_ks_plist}, used for --tag-path, is the Info.plist for the old
# version of Chrome. It may not contain the keys for the "-full" tag suffix.
# If it doesn't, just bail out without marking the patch update as failed.
local read_tag="$(infoplist_read "${old_ks_plist}" "${tag_key}" 2> /dev/null)"
note "read_tag = ${read_tag}"
if [[ -z "${read_tag}" ]]; then
note "couldn't mark failed patch update"
if [[ -n "${set_e}" ]]; then
set -e
fi
return 0
fi
# Chrome can't easily read its Keystone ticket prior to registration, and
# when Chrome registers with Keystone, it obliterates old tag values in its
# ticket. Therefore, an alternative mechanism is provided to signal to
# Chrome that a full installer is desired. If the .want_full_installer file
# is present and it contains Chrome's current version number, Chrome will
# include "-full" in its tag when it registers with Keystone. This allows
# "-full" to persist in the tag even after Chrome is relaunched, which on a
# user ticket, triggers a re-registration.
#
# .want_full_installer is placed immediately inside the .app bundle as a
# sibling to the Contents directory. In this location, it's outside of the
# view of the code signing and code signature verification machinery. This
# file can safely be added, modified, and removed without affecting the
# signature.
rm -f "${want_full_installer_path}" 2> /dev/null
echo "${old_version_app}" > "${want_full_installer_path}"
# See the comment below in the "setting permissions" section for an
# explanation of the groups and modes selected here.
local chmod_mode="644"
if [[ -z "${system_ticket}" ]] &&
[[ "${want_full_installer_path:0:14}" = "/Applications/" ]] &&
chgrp admin "${want_full_installer_path}" 2> /dev/null; then
chmod_mode="664"
fi
note "chmod_mode = ${chmod_mode}"
chmod "${chmod_mode}" "${want_full_installer_path}" 2> /dev/null
local old_ks_plist_path="${old_ks_plist}.plist"
# Using ksadmin without --register only updates specified values in the
# ticket, without changing other existing values.
local ksadmin_args=(
--productid "${product_id}"
)
if ksadmin_supports_tag; then
ksadmin_args+=(
--tag "${tag}"
)
fi
if ksadmin_supports_tagpath_tagkey; then
ksadmin_args+=(
--tag-path "${old_ks_plist_path}"
--tag-key "${tag_key}"
)
fi
note "ksadmin_args = ${ksadmin_args[*]}"
if [[ -n "${GOOGLE_CHROME_UPDATER_TEST_PATH}" ]]; then
note "test mode: not calling Keystone to mark failed patch update"
elif ! ksadmin "${ksadmin_args[@]}"; then
err "ksadmin failed to mark failed patch update"
else
note "marked failed patch update"
fi
# Go back to how things were.
if [[ -n "${set_e}" ]]; then
set -e
fi
}
usage() {
echo "usage: ${ME} update_dmg_mount_point" >& 2
}
main() {
local update_dmg_mount_point="${1}"
# Early steps are critical. Don't continue past any failure.
set -e
trap cleanup EXIT HUP INT QUIT TERM
readonly PRODUCT_NAME="Google Chrome"
readonly APP_DIR="${PRODUCT_NAME}.app"
readonly ALTERNATE_APP_DIR="${PRODUCT_NAME} Canary.app"
readonly FRAMEWORK_NAME="${PRODUCT_NAME} Framework"
readonly FRAMEWORK_DIR="${FRAMEWORK_NAME}.framework"
readonly PATCH_DIR=".patch"
readonly CONTENTS_DIR="Contents"
readonly APP_PLIST="${CONTENTS_DIR}/Info"
readonly VERSIONS_DIR_NEW=\
"${CONTENTS_DIR}/Frameworks/${FRAMEWORK_DIR}/Versions"
readonly VERSIONS_DIR_OLD="${CONTENTS_DIR}/Versions"
readonly UNROOTED_BRAND_PLIST="Library/Google/Google Chrome Brand"
readonly UNROOTED_DEBUG_FILE="Library/Google/Google Chrome Updater Debug"
readonly UNROOTED_KS_BUNDLE_DIR=\
"Library/Google/GoogleSoftwareUpdate/GoogleSoftwareUpdate.bundle"
readonly APP_VERSION_KEY="CFBundleShortVersionString"
readonly APP_BUNDLEID_KEY="CFBundleIdentifier"
readonly KS_VERSION_KEY="KSVersion"
readonly KS_PRODUCT_KEY="KSProductID"
readonly KS_URL_KEY="KSUpdateURL"
readonly KS_BRAND_KEY="KSBrandID"
readonly QUARANTINE_ATTR="com.apple.quarantine"
# Don't use rsync --archive, because --archive includes --group and --owner,
# which copy groups and owners, respectively, from the source, and that is
# undesirable in this case (often, this script will have permission to set
# those attributes). --archive also includes --devices and --specials, which
# copy files that should never occur in the transfer; --devices only works
# when running as root, so for consistency between privileged and unprivileged
# operation, this option is omitted as well. --archive does not include
# --ignore-times, which is desirable, as it forces rsync to copy files even
# when their sizes and modification times are identical, as their content
# still may be different.
readonly RSYNC_FLAGS="--ignore-times --links --perms --recursive --times"
# It's difficult to get GOOGLE_CHROME_UPDATER_DEBUG set in the environment
# when this script is called from Keystone. If a "debug file" exists in
# either the root directory or the home directory of the user who owns the
# ticket, turn on verbosity. This may aid debugging.
if [[ -e "/${UNROOTED_DEBUG_FILE}" ]] ||
[[ -e ~/"${UNROOTED_DEBUG_FILE}" ]]; then
export GOOGLE_CHROME_UPDATER_DEBUG="y"
fi
note "update_dmg_mount_point = ${update_dmg_mount_point}"
# The argument should be the disk image path. Make sure it exists and that
# it's an absolute path.
note "checking update"
if [[ -z "${update_dmg_mount_point}" ]] ||
[[ "${update_dmg_mount_point:0:1}" != "/" ]] ||
! [[ -d "${update_dmg_mount_point}" ]]; then
err "update_dmg_mount_point must be an absolute path to a directory"
usage
exit 2
fi
local patch_dir="${update_dmg_mount_point}/${PATCH_DIR}"
if [[ "${patch_dir:0:1}" != "/" ]]; then
note "patch_dir = ${patch_dir}"
err "patch_dir must be an absolute path"
exit 2
fi
# Figure out if this is an ordinary installation disk image being used as a
# full update, or a patch. A patch will have a .patch directory at the root
# of the disk image containing information about the update, tools to apply
# it, and the update contents.
local is_patch=
local dirpatcher=
if [[ -d "${patch_dir}" ]]; then
# patch_dir exists and is a directory - this is a patch update.
is_patch="y"
dirpatcher="${patch_dir}/dirpatcher.sh"
if ! [[ -x "${dirpatcher}" ]]; then
err "couldn't locate dirpatcher"
exit 6
fi
elif [[ -e "${patch_dir}" ]]; then
# patch_dir exists, but is not a directory - what's that mean?
note "patch_dir = ${patch_dir}"
err "patch_dir must be a directory"
exit 2
else
# patch_dir does not exist - this is a full "installer."
patch_dir=
fi
note "patch_dir = ${patch_dir}"
note "is_patch = ${is_patch}"
note "dirpatcher = ${dirpatcher}"
# The update to install.
# update_app is the path to the new version of the .app. It will only be
# set at this point for a non-patch update. It is not yet set for a patch
# update because no such directory exists yet; it will be set later when
# dirpatcher creates it.
local update_app=
# update_version_app_old, patch_app_dir, and patch_versioned_dir will only
# be set for patch updates.
local update_version_app_old=
local patch_app_dir=
local patch_versioned_dir=
local update_version_app update_version_ks product_id update_layout_new
if [[ -z "${is_patch}" ]]; then
update_app="${update_dmg_mount_point}/${APP_DIR}"
note "update_app = ${update_app}"
# Make sure that it's an absolute path.
if [[ "${update_app:0:1}" != "/" ]]; then
err "update_app must be an absolute path"
exit 2
fi
# Make sure there's something to copy from.
if ! [[ -d "${update_app}" ]]; then
update_app="${update_dmg_mount_point}/${ALTERNATE_APP_DIR}"
note "update_app = ${update_app}"
if [[ "${update_app:0:1}" != "/" ]]; then
err "update_app (alternate) must be an absolute path"
exit 2
fi
if ! [[ -d "${update_app}" ]]; then
err "update_app must be a directory"
exit 2
fi
fi
# Get some information about the update.
note "reading update values"
local update_app_plist="${update_app}/${APP_PLIST}"
note "update_app_plist = ${update_app_plist}"
if ! update_version_app="$(infoplist_read "${update_app_plist}" \
"${APP_VERSION_KEY}")" ||
[[ -z "${update_version_app}" ]]; then
err "couldn't determine update_version_app"
exit 2
fi
note "update_version_app = ${update_version_app}"
local update_ks_plist="${update_app_plist}"
note "update_ks_plist = ${update_ks_plist}"
if ! update_version_ks="$(infoplist_read "${update_ks_plist}" \
"${KS_VERSION_KEY}")" ||
[[ -z "${update_version_ks}" ]]; then
err "couldn't determine update_version_ks"
exit 2
fi
note "update_version_ks = ${update_version_ks}"
if ! product_id="$(infoplist_read "${update_ks_plist}" \
"${KS_PRODUCT_KEY}")" ||
[[ -z "${product_id}" ]]; then
err "couldn't determine product_id"
exit 2
fi
note "product_id = ${product_id}"
if [[ -d "${update_app}/${VERSIONS_DIR_NEW}" ]]; then
update_layout_new="y"
fi
note "update_layout_new = ${update_layout_new}"
else # [[ -n "${is_patch}" ]]
# Get some information about the update.
note "reading update values"
if ! update_version_app_old=$(<"${patch_dir}/old_app_version") ||
[[ -z "${update_version_app_old}" ]]; then
err "couldn't determine update_version_app_old"
exit 2
fi
note "update_version_app_old = ${update_version_app_old}"
if ! update_version_app=$(<"${patch_dir}/new_app_version") ||
[[ -z "${update_version_app}" ]]; then
err "couldn't determine update_version_app"
exit 2
fi
note "update_version_app = ${update_version_app}"
if ! update_version_ks=$(<"${patch_dir}/new_ks_version") ||
[[ -z "${update_version_ks}" ]]; then
err "couldn't determine update_version_ks"
exit 2
fi
note "update_version_ks = ${update_version_ks}"
if ! product_id=$(<"${patch_dir}/ks_product") ||
[[ -z "${product_id}" ]]; then
err "couldn't determine product_id"
exit 2
fi
note "product_id = ${product_id}"
patch_app_dir="${patch_dir}/application.dirpatch"
if ! [[ -d "${patch_app_dir}" ]]; then
err "couldn't locate patch_app_dir"
exit 6
fi
note "patch_app_dir = ${patch_app_dir}"
patch_versioned_dir="${patch_dir}/\
framework_${update_version_app_old}_${update_version_app}.dirpatch"
if [[ -d "${patch_versioned_dir}" ]]; then
update_layout_new="y"
else
patch_versioned_dir=\
"${patch_dir}/version_${update_version_app_old}_${update_version_app}.dirpatch"
if ! [[ -d "${patch_versioned_dir}" ]]; then
err "couldn't locate patch_versioned_dir"
exit 6
fi
fi
note "patch_versioned_dir = ${patch_versioned_dir}"
note "update_layout_new = ${update_layout_new}"
fi
# ksadmin is required. Keystone should have set a ${PATH} that includes it.
# Check that here, so that more useful feedback can be offered in the
# unlikely event that ksadmin is missing.
note "checking Keystone"
if [[ -n "${GOOGLE_CHROME_UPDATER_TEST_PATH}" ]]; then
note "test mode: not setting ksadmin_path"
else
local ksadmin_path
if ! ksadmin_path="$(type -p ksadmin)" || [[ -z "${ksadmin_path}" ]]; then
err "couldn't locate ksadmin_path"
exit 3
fi
note "ksadmin_path = ${ksadmin_path}"
fi
# Call ksadmin_version once to prime the global state. This is needed
# because subsequent calls to ksadmin_version that occur in $(...)
# expansions will not affect the global state (although they can read from
# the already-initialized global state) and thus will cause a new ksadmin
# --ksadmin-version process to run for each check unless the globals have
# been properly initialized beforehand.
ksadmin_version >& /dev/null || true
local ksadmin_version_string
ksadmin_version_string="$(ksadmin_version 2> /dev/null || true)"
note "ksadmin_version_string = ${ksadmin_version_string}"
# Figure out where to install.
local installed_app
if [[ -n "${GOOGLE_CHROME_UPDATER_TEST_PATH}" ]]; then
note "test mode: not calling Keystone, installed_app is from environment"
installed_app="${GOOGLE_CHROME_UPDATER_TEST_PATH}"
elif ! installed_app="$(ksadmin -pP "${product_id}" | sed -Ene \
"s%^[[:space:]]+xc=<KSPathExistenceChecker:.* path=(/.+)>\$%\\1%p")" ||
[[ -z "${installed_app}" ]]; then
err "couldn't locate installed_app"
exit 3
fi
note "installed_app = ${installed_app}"
local want_full_installer_path="${installed_app}/.want_full_installer"
note "want_full_installer_path = ${want_full_installer_path}"
if [[ "${installed_app:0:1}" != "/" ]] ||
! [[ -d "${installed_app}" ]]; then
err "installed_app must be an absolute path to a directory"
exit 3
fi
# If this script is running as root, it's being driven by a system ticket.
# Otherwise, it's being driven by a user ticket.
local system_ticket=
if [[ ${EUID} -eq 0 ]]; then
system_ticket="y"
fi
note "system_ticket = ${system_ticket}"
# If this script is being driven by a user ticket, but a system ticket is also
# present and system Keystone is installed, there's a potential for the two
# tickets to collide. Both ticket types might be present if another user on
# the system promoted the ticket to system: the other user could not have
# removed this user's user ticket. Handle that case here by deleting the user
# ticket and exiting early with a discrete exit code.
#
# Current versions of ksadmin will exit 1 (false) when asked to print tickets
# and given a specific product ID to print. Older versions of ksadmin would
# exit 0 (true), but those same versions did not support -S (meaning to check
# the system ticket store) and would exit 1 (false) with this invocation due
# to not understanding the question. Therefore, the usage here will only
# delete the existing user ticket when running as non-root with access to a
# sufficiently recent ksadmin. Older ksadmins are tolerated: the update will
# likely fail for another reason and the user ticket will hang around until
# something is eventually able to remove it.
if [[ -z "${GOOGLE_CHROME_UPDATER_TEST_PATH}" ]] &&
[[ -z "${system_ticket}" ]] &&
[[ -d "/${UNROOTED_KS_BUNDLE_DIR}" ]] &&
ksadmin -S --print-tickets --productid "${product_id}" >& /dev/null; then
ksadmin --delete --productid "${product_id}" || true
err "can't update on a user ticket when a system ticket is also present"
exit 4
fi
# Figure out what the existing installed application is using for its
# versioned directory. This will be used later, to avoid removing the
# existing installed version's versioned directory in case anything is still
# using it.
note "reading install values"
local installed_app_plist="${installed_app}/${APP_PLIST}"
note "installed_app_plist = ${installed_app_plist}"
local installed_app_plist_path="${installed_app_plist}.plist"
note "installed_app_plist_path = ${installed_app_plist_path}"
local old_version_app
old_version_app="$(infoplist_read "${installed_app_plist}" \
"${APP_VERSION_KEY}" || true)"
note "old_version_app = ${old_version_app}"
# old_version_app is not required, because it won't be present in skeleton
# bootstrap installations, which just have an empty .app directory. Only
# require it when doing a patch update, and use it to validate that the
# patch applies to the old installed version. By definition, skeleton
# bootstraps can't be installed with patch updates. They require the full
# application on the disk image.
if [[ -n "${is_patch}" ]]; then
if [[ -z "${old_version_app}" ]]; then
err "old_version_app required for patch"
exit 6
elif [[ "${old_version_app}" != "${update_version_app_old}" ]]; then
err "this patch does not apply to the installed version"
exit 6
fi
fi
local installed_versions_dir_new="${installed_app}/${VERSIONS_DIR_NEW}"
note "installed_versions_dir_new = ${installed_versions_dir_new}"
local installed_versions_dir_old="${installed_app}/${VERSIONS_DIR_OLD}"
note "installed_versions_dir_old = ${installed_versions_dir_old}"
local installed_versions_dir
if [[ -n "${update_layout_new}" ]]; then
installed_versions_dir="${installed_versions_dir_new}"
else
installed_versions_dir="${installed_versions_dir_old}"
fi
note "installed_versions_dir = ${installed_versions_dir}"
# If the installed application is incredibly old, or in a skeleton bootstrap
# installation, old_versioned_dir may not exist.
local old_versioned_dir
if [[ -n "${old_version_app}" ]]; then
if [[ -d "${installed_versions_dir_new}/${old_version_app}" ]]; then
old_versioned_dir="${installed_versions_dir_new}/${old_version_app}"
elif [[ -d "${installed_versions_dir_old}/${old_version_app}" ]]; then
old_versioned_dir="${installed_versions_dir_old}/${old_version_app}"
fi
fi
note "old_versioned_dir = ${old_versioned_dir}"
# Collect the installed application's brand code, it will be used later. It
# is not an error for the installed application to not have a brand code.
local old_ks_plist="${installed_app_plist}"
note "old_ks_plist = ${old_ks_plist}"
local old_brand
old_brand="$(infoplist_read "${old_ks_plist}" \
"${KS_BRAND_KEY}" 2> /dev/null ||
true)"
note "old_brand = ${old_brand}"
local update_versioned_dir=
if [[ -z "${is_patch}" ]]; then
if [[ -n "${update_layout_new}" ]]; then
update_versioned_dir=\
"${update_app}/${VERSIONS_DIR_NEW}/${update_version_app}"
else
update_versioned_dir=\
"${update_app}/${VERSIONS_DIR_OLD}/${update_version_app}"
fi
note "update_versioned_dir = ${update_versioned_dir}"
fi
ensure_writable_symlinks_recursive "${installed_app}"
# By copying to ${installed_app}, the existing application name will be
# preserved, if the user has renamed the application on disk. Respecting
# the user's changes is friendly.
# Make sure that ${installed_versions_dir} exists, so that it can receive
# the versioned directory. It may not exist if updating from an older
# version that did not use the same versioned layout on disk. Later, during
# the rsync to copy the application directory, the mode bits and timestamp on
# ${installed_versions_dir} will be set to conform to whatever is present in
# the update.
#
# ${installed_app} is guaranteed to exist at this point, but
# ${installed_app}/${CONTENTS_DIR} may not if things are severely broken or
# if this update is actually an initial installation from a Keystone
# skeleton bootstrap. The mkdir creates ${installed_app}/${CONTENTS_DIR} if
# it doesn't exist; its mode bits will be fixed up in a subsequent rsync.
note "creating installed_versions_dir"
if ! mkdir -p "${installed_versions_dir}"; then
err "mkdir of installed_versions_dir failed"
exit 5
fi
local new_versioned_dir
new_versioned_dir="${installed_versions_dir}/${update_version_app}"
note "new_versioned_dir = ${new_versioned_dir}"
# If there's an entry at ${new_versioned_dir} but it's not a directory
# (or it's a symbolic link, whether or not it points to a directory), rsync
# won't get rid of it. It's never correct to have a non-directory in place
# of the versioned directory, so toss out whatever's there. Don't treat
# this as a critical step: if removal fails, operation can still proceed to
# to the dirpatcher or rsync, which will likely fail.
if [[ -e "${new_versioned_dir}" ]] &&
([[ -L "${new_versioned_dir}" ]] ||
! [[ -d "${new_versioned_dir}" ]]); then
note "removing non-directory in place of versioned directory"
rm -f "${new_versioned_dir}" 2> /dev/null || true
fi
if [[ -n "${is_patch}" ]]; then
# dirpatcher won't patch into a directory that already exists. Doing so
# would be a bad idea, anyway. If ${new_versioned_dir} already exists,
# it may be something left over from a previous failed or incomplete
# update attempt, or it may be the live versioned directory if this is a
# same-version update intended only to change channels. Since there's no
# way to tell, this case is handled by having dirpatcher produce the new
# versioned directory in a temporary location and then having rsync copy
# it into place as an ${update_versioned_dir}, the same as in a non-patch
# update. If ${new_versioned_dir} doesn't exist, dirpatcher can place the
# new versioned directory at that location directly.
local versioned_dir_target
if ! [[ -e "${new_versioned_dir}" ]]; then
versioned_dir_target="${new_versioned_dir}"
note "versioned_dir_target = ${versioned_dir_target}"
else
ensure_temp_dir
versioned_dir_target="${g_temp_dir}/${update_version_app}"
note "versioned_dir_target = ${versioned_dir_target}"
update_versioned_dir="${versioned_dir_target}"
note "update_versioned_dir = ${update_versioned_dir}"
fi
note "dirpatching versioned directory"
if ! "${dirpatcher}" "${old_versioned_dir}" \
"${patch_versioned_dir}" \
"${versioned_dir_target}"; then
err "dirpatcher of versioned directory failed, status ${PIPESTATUS[0]}"
mark_failed_patch_update "${product_id}" \
"${want_full_installer_path}" \
"${old_ks_plist}" \
"${old_version_app}" \
"${system_ticket}"
if [[ -n "${update_layout_new}" ]] &&
[[ "${versioned_dir_target}" = "${new_versioned_dir}" ]]; then
# If the dirpatcher of a new-layout versioned directory failed while
# writing directly to the target location, remove it. The incomplete
# version would break code signature validation under the new layout.
# If it was being staged in a temporary directory, there's nothing to
# clean up beyond cleaning up the temporary directory, which will happen
# normally at exit.
note "cleaning up new_versioned_dir"
rm -rf "${new_versioned_dir}"
fi
exit 12
fi
fi
# Copy the versioned directory. The new versioned directory should have a
# different name than any existing one, so this won't harm anything already
# present in ${installed_versions_dir}, including the versioned directory
# being used by any running processes. If this step is interrupted, there
# will be an incomplete versioned directory left behind, but it won't
# won't interfere with anything, and it will be replaced or removed during a
# future update attempt.
#
# In certain cases, same-version updates are distributed to move users
# between channels; when this happens, the contents of the versioned
# directories are identical and rsync will not render the versioned
# directory unusable even for an instant.
#
# ${update_versioned_dir} may be empty during a patch update (${is_patch})
# if the dirpatcher above was able to write it into place directly. In
# that event, dirpatcher guarantees that ${new_versioned_dir} is already in
# place.
if [[ -n "${update_versioned_dir}" ]]; then
note "rsyncing versioned directory"
if ! rsync ${RSYNC_FLAGS} --delete-before "${update_versioned_dir}/" \
"${new_versioned_dir}"; then
err "rsync of versioned directory failed, status ${PIPESTATUS[0]}"
if [[ -n "${update_layout_new}" ]]; then
# If the rsync of a new-layout versioned directory failed, remove it.
# The incomplete version would break code signature validation.
note "cleaning up new_versioned_dir"
rm -rf "${new_versioned_dir}"
fi
exit 7
fi
fi
if [[ -n "${is_patch}" ]]; then
# If the versioned directory was prepared in a temporary directory and
# then rsynced into place, remove the temporary copy now that it's no
# longer needed.
if [[ -n "${update_versioned_dir}" ]]; then
rm -rf "${update_versioned_dir}" 2> /dev/null || true
update_versioned_dir=
note "update_versioned_dir = ${update_versioned_dir}"
fi
# Prepare ${update_app}. This always needs to be done in a temporary
# location because dirpatcher won't write to a directory that already
# exists, and ${installed_app} needs to be used as input to dirpatcher
# in any event. The new application will be rsynced into place once
# dirpatcher creates it.
ensure_temp_dir
update_app="${g_temp_dir}/${APP_DIR}"
note "update_app = ${update_app}"
note "dirpatching app directory"
if ! "${dirpatcher}" "${installed_app}" \
"${patch_app_dir}" \
"${update_app}"; then
err "dirpatcher of app directory failed, status ${PIPESTATUS[0]}"
mark_failed_patch_update "${product_id}" \
"${want_full_installer_path}" \
"${old_ks_plist}" \
"${old_version_app}" \
"${system_ticket}"
exit 13
fi
fi
# See if the timestamp of what's currently on disk is newer than the
# update's outer .app's timestamp. rsync will copy the update's timestamp
# over, but if that timestamp isn't as recent as what's already on disk, the
# .app will need to be touched.
local needs_touch=
if [[ "${installed_app}" -nt "${update_app}" ]]; then
needs_touch="y"
fi
note "needs_touch = ${needs_touch}"
# Copy the unversioned files into place, leaving everything in
# ${installed_versions_dir} alone. If this step is interrupted, the
# application will at least remain in a usable state, although it may not
# pass signature validation. Depending on when this step is interrupted,
# the application will either launch the old or the new version. The
# critical point is when the main executable is replaced. There isn't very
# much to copy in this step, because most of the application is in the
# versioned directory. This step only accounts for around 50 files, most of
# which are small localized InfoPlist.strings files. Note that
# ${VERSIONS_DIR_NEW} or ${VERSIONS_DIR_OLD} are included to copy their mode
# bits and timestamps, but their contents are excluded, having already been
# installed above. The ${VERSIONS_DIR_NEW}/Current symbolic link is updated
# or created in this step, however.
note "rsyncing app directory"
if ! rsync ${RSYNC_FLAGS} --delete-after \
--include="/${VERSIONS_DIR_NEW}/Current" \
--exclude="/${VERSIONS_DIR_NEW}/*" --exclude="/${VERSIONS_DIR_OLD}/*" \
"${update_app}/" "${installed_app}"; then
err "rsync of app directory failed, status ${PIPESTATUS[0]}"
exit 8
fi
note "rsyncs complete"
if [[ -n "${is_patch}" ]]; then
# update_app has been rsynced into place and is no longer needed.
rm -rf "${update_app}" 2> /dev/null || true
update_app=
note "update_app = ${update_app}"
fi
if [[ -n "${g_temp_dir}" ]]; then
# The temporary directory, if any, is no longer needed.
rm -rf "${g_temp_dir}" 2> /dev/null || true
g_temp_dir=
note "g_temp_dir = ${g_temp_dir}"
fi
# Clean up any old .want_full_installer files from previous dirpatcher
# failures. This is not considered a critical step, because this file
# normally does not exist at all.
rm -f "${want_full_installer_path}" || true
# If necessary, touch the outermost .app so that it appears to the outside
# world that something was done to the bundle. This will cause
# LaunchServices to invalidate the information it has cached about the
# bundle even if lsregister does not run. This is not done if rsync already
# updated the timestamp to something newer than what had been on disk. This
# is not considered a critical step, and if it fails, this script will not
# exit.
if [[ -n "${needs_touch}" ]]; then
touch -cf "${installed_app}" || true
fi
# Read the new values, such as the version.
note "reading new values"
local new_version_app
if ! new_version_app="$(infoplist_read "${installed_app_plist}" \
"${APP_VERSION_KEY}")" ||
[[ -z "${new_version_app}" ]]; then
err "couldn't determine new_version_app"
exit 9
fi
note "new_version_app = ${new_version_app}"
local new_versioned_dir="${installed_versions_dir}/${new_version_app}"
note "new_versioned_dir = ${new_versioned_dir}"
local new_ks_plist="${installed_app_plist}"
note "new_ks_plist = ${new_ks_plist}"
local new_version_ks
if ! new_version_ks="$(infoplist_read "${new_ks_plist}" \
"${KS_VERSION_KEY}")" ||
[[ -z "${new_version_ks}" ]]; then
err "couldn't determine new_version_ks"
exit 9
fi
note "new_version_ks = ${new_version_ks}"
local update_url
if ! update_url="$(infoplist_read "${new_ks_plist}" "${KS_URL_KEY}")" ||
[[ -z "${update_url}" ]]; then
err "couldn't determine update_url"
exit 9
fi
note "update_url = ${update_url}"
# The channel ID is optional. Suppress stderr to prevent Keystone from
# seeing possible error output.
local channel
channel="$(infoplist_read "${new_ks_plist}" \
"${KS_CHANNEL_KEY}" 2> /dev/null || true)"
note "channel = ${channel}"
local tag="${channel}"
local tag_key="${KS_CHANNEL_KEY}"
note "tag = ${tag}"
note "tag_key = ${tag_key}"
# Make sure that the update was successful by comparing the version found in
# the update with the version now on disk.
if [[ "${new_version_ks}" != "${update_version_ks}" ]]; then
err "new_version_ks and update_version_ks do not match"
exit 10
fi
# Notify LaunchServices. This is not considered a critical step, and
# lsregister's exit codes shouldn't be confused with this script's own.
# Redirect stdout to /dev/null to suppress the useless "ThrottleProcessIO:
# throttling disk i/o" messages that lsregister might print.
note "notifying LaunchServices"
local coreservices="/System/Library/Frameworks/CoreServices.framework"
local launchservices="${coreservices}/Frameworks/LaunchServices.framework"
local lsregister="${launchservices}/Support/lsregister"
note "coreservices = ${coreservices}"
note "launchservices = ${launchservices}"
note "lsregister = ${lsregister}"
"${lsregister}" -f "${installed_app}" > /dev/null || true
# The brand information is stored differently depending on whether this is
# running for a system or user ticket.
note "handling brand code"
local set_brand_file_access=
local brand_plist
if [[ -n "${system_ticket}" ]]; then
# System ticket.
set_brand_file_access="y"
brand_plist="/${UNROOTED_BRAND_PLIST}"
else
# User ticket.
brand_plist=~/"${UNROOTED_BRAND_PLIST}"
fi
local brand_plist_path="${brand_plist}.plist"
note "set_brand_file_access = ${set_brand_file_access}"
note "brand_plist = ${brand_plist}"
note "brand_plist_path = ${brand_plist_path}"
local ksadmin_brand_plist_path
local ksadmin_brand_key
# Only the stable channel, identified by an empty channel string, has a
# brand code. On the beta and dev channels, remove the brand plist if
# present. Its presence means that the ticket used to manage a
# stable-channel Chrome but the user has since replaced it with a beta or
# dev channel version. Since the canary channel can run side-by-side with
# another Chrome installation, don't remove the brand plist on that channel,
# but skip the rest of the brand logic.
if [[ "${channel}" = "beta" ]] || [[ "${channel}" = "dev" ]]; then
note "defeating brand code on channel ${channel}"
rm -f "${brand_plist_path}" 2>/dev/null || true
elif [[ -n "${channel}" ]]; then
# Canary channel.
note "skipping brand code on channel ${channel}"
else
# Stable channel.
# If the user manually updated their copy of Chrome, there might be new
# brand information in the app bundle, and that needs to be copied out
# into the file Keystone looks at.
if [[ -n "${old_brand}" ]]; then
local brand_dir
brand_dir="$(dirname "${brand_plist_path}")"
note "brand_dir = ${brand_dir}"
if ! mkdir -p "${brand_dir}"; then
err "couldn't mkdir brand_dir, continuing"
else
if ! defaults write "${brand_plist}" "${KS_BRAND_KEY}" \
-string "${old_brand}"; then
err "couldn't write brand_plist, continuing"
elif [[ -n "${set_brand_file_access}" ]]; then
if ! chown "root:wheel" "${brand_plist_path}"; then
err "couldn't chown brand_plist_path, continuing"
else
if ! chmod 644 "${brand_plist_path}"; then
err "couldn't chmod brand_plist_path, continuing"
fi
fi
fi
fi
fi
# Confirm that the brand file exists. It's optional.
ksadmin_brand_plist_path="${brand_plist_path}"
ksadmin_brand_key="${KS_BRAND_KEY}"
if ! [[ -f "${ksadmin_brand_plist_path}" ]]; then
# Clear any branding information.
ksadmin_brand_plist_path=
ksadmin_brand_key=
fi
fi
note "ksadmin_brand_plist_path = ${ksadmin_brand_plist_path}"
note "ksadmin_brand_key = ${ksadmin_brand_key}"
note "notifying Keystone"
local ksadmin_args=(
--register
--productid "${product_id}"
--version "${new_version_ks}"
--xcpath "${installed_app}"
--url "${update_url}"
)
if ksadmin_supports_tag; then
ksadmin_args+=(
--tag "${tag}"
)
fi
if ksadmin_supports_tagpath_tagkey; then
ksadmin_args+=(
--tag-path "${installed_app_plist_path}"
--tag-key "${tag_key}"
)
fi
if ksadmin_supports_brandpath_brandkey; then
ksadmin_args+=(
--brand-path "${ksadmin_brand_plist_path}"
--brand-key "${ksadmin_brand_key}"
)
fi
if ksadmin_supports_versionpath_versionkey; then
ksadmin_args+=(
--version-path "${installed_app_plist_path}"
--version-key "${KS_VERSION_KEY}"
)
fi
note "ksadmin_args = ${ksadmin_args[*]}"
if [[ -n "${GOOGLE_CHROME_UPDATER_TEST_PATH}" ]]; then
note "test mode: not calling Keystone to update ticket"
elif ! ksadmin "${ksadmin_args[@]}"; then
err "ksadmin failed"
exit 11
fi
# The remaining steps are not considered critical.
set +e
# Try to clean up old versions that are not in use. The strategy is to keep
# the versioned directory corresponding to the update just applied
# (obviously) and the version that was just replaced, and to use ps and lsof
# to see if it looks like any processes are currently using any other old
# directories. Directories not in use are removed. Old versioned
# directories that are in use are left alone so as to not interfere with
# running processes. These directories can be cleaned up by this script on
# future updates.
#
# To determine which directories are in use, both ps and lsof are used.
# Each approach has limitations.
#
# The ps check looks for processes within the versioned directory. Only
# helper processes, such as renderers, are within the versioned directory.
# Browser processes are not, so the ps check will not find them, and will
# assume that a versioned directory is not in use if a browser is open
# without any windows. The ps mechanism can also only detect processes
# running on the system that is performing the update. If network shares
# are involved, all bets are off.
#
# The lsof check looks to see what processes have the framework dylib open.
# Browser processes will have their versioned framework dylib open, so this
# check is able to catch browsers even if there are no associated helper
# processes. Like the ps check, the lsof check is limited to processes on
# the system that is performing the update. Finally, unless running as
# root, the lsof check can only find processes running as the effective user
# performing the update.
#
# These limitations are motivations to additionally preserve the versioned
# directory corresponding to the version that was just replaced.
note "cleaning up old versioned directories"
local versioned_dir
for versioned_dir in "${installed_versions_dir_new}/"* \
"${installed_versions_dir_old}/"*; do
note "versioned_dir = ${versioned_dir}"
if [[ "${versioned_dir}" = "${new_versioned_dir}" ]] ||
[[ "${versioned_dir}" = "${old_versioned_dir}" ]] ||
[[ "${versioned_dir}" = "${installed_versions_dir_new}/Current" ]]; then
# This is the versioned directory corresponding to the update that was
# just applied or the version that was previously in use. Leave it
# alone.
note "versioned_dir is new_versioned_dir or old_versioned_dir, skipping"
continue
fi
# Look for any processes whose executables are within this versioned
# directory. They'll be helper processes, such as renderers. Their
# existence indicates that this versioned directory is currently in use.
local ps_string="${versioned_dir}/"
note "ps_string = ${ps_string}"
# Look for any processes using the framework dylib. This will catch
# browser processes where the ps check will not, but it is limited to
# processes running as the effective user.
local lsof_file
if [[ -e "${versioned_dir}/${FRAMEWORK_DIR}/${FRAMEWORK_NAME}" ]]; then
# Old layout.
lsof_file="${versioned_dir}/${FRAMEWORK_DIR}/${FRAMEWORK_NAME}"
else
# New layout.
lsof_file="${versioned_dir}/${FRAMEWORK_NAME}"
fi
note "lsof_file = ${lsof_file}"
# ps -e displays all users' processes, -ww causes ps to not truncate
# lines, -o comm instructs it to only print the command name, and the =
# tells it to not print a header line.
# The cut invocation filters the ps output to only have at most the number
# of characters in ${ps_string}. This is done so that grep can look for
# an exact match.
# grep -F tells grep to look for lines that are exact matches (not regular
# expressions), -q tells it to not print any output and just indicate
# matches by exit status, and -x tells it that the entire line must match
# ${ps_string} exactly, as opposed to matching a substring. A match
# causes grep to exit zero (true).
#
# lsof will exit nonzero if ${lsof_file} does not exist or is open by any
# process. If the file exists and is open, it will exit zero (true).
if (! ps -ewwo comm= | \
cut -c "1-${#ps_string}" | \
grep -Fqx "${ps_string}") &&
(! lsof "${lsof_file}" >& /dev/null); then
# It doesn't look like anything is using this versioned directory. Get
# rid of it.
note "versioned_dir doesn't appear to be in use, removing"
rm -rf "${versioned_dir}"
else
note "versioned_dir is in use, skipping"
fi
done
# When the last old-layout version is gone, remove the old-layout Versions
# directory. Note that this isn't attempted when the last new-layout Versions
# directory disappears, because hopefully there won't ever be an "upgrade" (at
# least not long-term) that needs to revert from the new to the old layout. If
# this does become necessary, the rmdir should attempt to remove, from
# innermost to outermost, ${installed_versions_dir_new} out to
# ${installed_app}/${CONTENTS_DIR}/Frameworks. Even though that removal isn't
# attempted here, a subsequent update will do this cleanup as a side effect of
# the outer app rsync, which will remove these directories if empty when
# "updating" to another old-layout version.
if [[ -n "${update_layout_new}" ]] &&
[[ -d "${installed_versions_dir_old}" ]]; then
note "attempting removal of installed_versions_dir_old"
rmdir "${installed_versions_dir_old}" >& /dev/null
if [[ -d "${installed_versions_dir_old}" ]]; then
note "removal of installed_versions_dir_old failed"
else
note "removal of installed_versions_dir_old succeeded"
fi
fi
# If this script is being driven by a user Keystone ticket, it is not
# running as root. If the application is installed somewhere under
# /Applications, try to make it writable by all admin users. This will
# allow other admin users to update the application from their own user
# Keystone instances.
#
# If the script is being driven by a user Keystone ticket (not running as
# root) and the application is not installed under /Applications, it might
# not be in a system-wide location, and it probably won't be something that
# other users on the system are running, so err on the side of safety and
# don't make it group-writable.
#
# If this script is being driven by a system ticket (running as root), it's
# future updates can be expected to be applied the same way, so admin-
# writability is not a concern. Set the entire thing to be owned by root
# in that case, regardless of where it's installed, and drop any group and
# other write permission.
#
# If this script is running as a user that is not a member of the admin
# group, the chgrp operation will not succeed. Tolerate that case, because
# it's better than the alternative, which is to make the application
# world-writable.
note "setting permissions"
local chmod_mode="a+rX,u+w,go-w"
if [[ -z "${system_ticket}" ]]; then
if [[ "${installed_app:0:14}" = "/Applications/" ]] &&
chgrp -Rh admin "${installed_app}" 2> /dev/null; then
chmod_mode="a+rX,ug+w,o-w"
fi
else
chown -Rh root:wheel "${installed_app}" 2> /dev/null
fi
note "chmod_mode = ${chmod_mode}"
chmod -R "${chmod_mode}" "${installed_app}" 2> /dev/null
# On the Mac, or at least on HFS+, symbolic link permissions are significant,
# but chmod -R and -h can't be used together. Do another pass to fix the
# permissions on any symbolic links.
find "${installed_app}" -type l -exec chmod -h "${chmod_mode}" {} + \
2> /dev/null
# If an update is triggered from within the application itself, the update
# process inherits the quarantine bit (LSFileQuarantineEnabled). Any files
# or directories created during the update will be quarantined in that case,
# which may cause Launch Services to display quarantine UI. That's bad,
# especially if it happens when the outer .app launches a quarantined inner
# helper. If the application is already on the system and is being updated,
# then it can be assumed that it should not be quarantined. Use xattr to
# drop the quarantine attribute.
#
# TODO(mark): Instead of letting the quarantine attribute be set and then
# dropping it here, figure out a way to get the update process to run
# without LSFileQuarantineEnabled even when triggering an update from within
# the application.
note "lifting quarantine"
xattr -d -r "${QUARANTINE_ATTR}" "${installed_app}" 2> /dev/null
# Great success!
note "done!"
trap - EXIT
return 0
}
# Check "less than" instead of "not equal to" in case Keystone ever changes to
# pass more arguments.
if [[ ${#} -lt 1 ]]; then
usage
exit 2
fi
main "${@}"
exit ${?}
|
endlessm/chromium-browser
|
chrome/installer/mac/keystone_install.sh
|
Shell
|
bsd-3-clause
| 62,402 |
#!/bin/bash
# Set up needed ENV variables.
set -o allexport
source ./django/sierra/sierra/settings/.env
USERID=$(id -u)
GROUPID=$(id -g)
set +o allexport
DDPATH=./docker_data
SIERRA_FIXTURE_PATH=./django/sierra/base/fixtures
SCRIPTNAME="$(basename "$0")"
DEV_SERVICES=("default-db-dev" "solr-dev" "redis-celery" "redis-appdata-dev" "app" "celery-worker")
TEST_SERVICES=("default-db-test" "sierra-db-test" "solr-test" "redis-appdata-test" "test")
ALL_SERVICES=("${DEV_SERVICES[@]}" "${TEST_SERVICES[@]}")
### FUNCTIONS ###
# show_help -- Display usage/help text
function show_help {
echo ""
echo "Usage: $SCRIPTNAME [-f] [-v | -m] group"
echo " $SCRIPTNAME [-f] [-v | -m] service ..."
echo ""
echo "-f Force overwriting existing volumes on the host machine."
echo "-h Display this help message."
echo "-m Only run migrations (skip volume set up). Cannot be used with -v."
echo "-s Display a list of valid service names and descriptions."
echo "-v Only run volume setup on host machine (skip migrations). Cannot be"
echo " used with -m."
echo "group Provide one argument to set up multiple services. Must be \"all\","
echo " \"dev\", or \"tests\". \"all\" sets up all services."
echo "service One or more service names to initialize. Note that services are set up"
echo " in the order specified. Use -s to see more info about services."
echo ""
echo "Please note that you are not prevented from specifying a valid group along with"
echo "one or more services. The group will be expanded into individual services, and"
echo "they will all be initialized in the order specified. However, including a"
echo "service name more than once will cause the setup for that service to be run more"
echo "once. So, be careful, especially if using the -f flag."
echo ""
echo "Use -s to get information about services."
echo ""
exit 1
}
# show_services -- Display information about services
function show_services {
echo ""
echo "Services"
echo "These are the services that you can set up with this script. Note that there"
echo "are a few catalog-api Docker services not listed here because they require no"
echo "local volumes to be set up."
echo ""
echo "(service -- group)"
echo ""
echo "default-db-dev -- dev"
echo " The default Django MariaDB database for a development environment."
echo " Migrations are needed to set up the needed Django apps."
echo ""
echo "solr-dev -- dev"
echo " Empty instance of Solr for a development environment. No migrations."
echo ""
echo "redis-celery -- dev"
echo " Redis instance behind Celery, used in development. No migrations."
echo ""
echo "redis-appdata-dev -- dev"
echo " Redis instance that stores some app data in development. No migrations."
echo ""
echo "app -- dev"
echo " The development app itself. Log and media directories are set up. No"
echo " migrations."
echo ""
echo "celery-worker -- dev"
echo " The celery-worker service that runs in development. A log directory is set"
echo " up. No migrations."
echo ""
echo "default-db-test -- tests"
echo " The default Django MariaDB database for a test environment. Migrations are"
echo " needed to set up the needed Django apps. This must be set up and migrated"
echo " before you run initial migrations on sierra-db-test."
echo ""
echo "sierra-db-test -- tests"
echo " The sierra PostGreSQL database for a test environment. Migrations are"
echo " needed to install sierra test fixtures. But, before you run migrations for"
echo " the first time on sierra-db-test, you must make sure that default-db-test"
echo " is set up and migrated."
echo ""
echo "solr-test -- tests"
echo " Empty instance of Solr for a test environment. No migrations (yet)."
echo ""
echo "redis-appdata-test -- tests"
echo " Redis instance that stores some app data in test. No migrations (yet)."
echo ""
echo "test -- tests"
echo " Log and media directories are set up for the test environment. No "
echo " migrations."
exit 1
}
# show_summary -- Display summary of what the script will do when it runs,
# based on the user-provided options.
function show_summary {
local actions="$1"
local user_services="$2"
local force="$3"
do_what="migrate data"
if [[ $actions == *"v"* ]]; then
do_what="make volumes"
if [[ $actions == *"m"* ]]; then
do_what+=" and migrate data"
fi
if [[ $force ]]; then
use_force="-f (force) is set. Any existing data for these services will be deleted and empty volumes will be recreated."
else
use_force="-f (force) is not set. Existing data will be preserved."
fi
else
if [[ $force ]]; then
use_force="Warning: -f (force) flag is set but will have no effect, as it only affects volume creation (NOT migrations)."
fi
fi
echo ""
echo "--------------------------------------"
echo "INITIALIZE DOCKER DATA FOR CATALOG-API"
echo "This will attempt to $do_what for these catalog-api docker-compose services: $user_services."
if [[ $use_force ]]; then echo $use_force; fi
echo ""
}
# warm_up_sierra_db_test -- Forces postgres to run initdb to create the DB for
# sierra-db-test as the default postgres user and issues a chown on the DB
# to change the owner to the appropriate user. This is a workaround for the
# fact that PostgreSQL won't initialize a database for a user that doesn't
# exist in /etc/passwd in the container. After the database is initialized
# and the owner changed, you can run the postgres server as the correct user
# even though it still doesn't exist in /etc/passwd.
function warm_up_sierra_db_test {
echo "Initializing PostgreSQL database for \`sierra-db-test\` service"
local container=$(docker-compose run -u root -d sierra-db-test)
#container="${container##*$'\n'}"
container=$(echo "$container" | tail -1)
printf "(waiting for database) ..."
local limit=60
local waited=0
while ! docker logs $container 2>&1 | grep -q "PostgreSQL init process complete"; do
printf "."; sleep 3; let waited+=3;
if [[ $waited -ge $limit ]]; then
echo "Error: Timed out while waiting for sierra-db-test database to be created. Database NOT properly initialized."
docker stop $container && docker rm $container &> /dev/null
return 1
fi
done
echo "database created."
echo "Stopping intermediate container."
sleep 2; docker stop $container && docker rm $container &> /dev/null; sleep 2;
echo "Changing ownership of pgdata directory to current user."
docker-compose run --rm -u root --entrypoint="sh -c \"chown -R $USERID:$GROUPID /var/lib/postgresql/data\"" sierra-db-test
echo "Done. Database initialized."
return 0
}
# make_volume -- Takes args $path, $force, $service. Sets up the data volume
# for $service at $path if the $path does not exist. If the $path and $force
# exist, then it rm -rf's $path first. Returns 0 if it created a fresh volume
# successfully and 1 if it did not.
function make_volume {
local path="$1"
local service="$2"
local force="$3"
if [[ -d $path ]]; then
if [[ $force ]]; then
echo "Deleting existing data volume on host at $path."
rm -rf $path
else
echo "Warning: data volume for service $service already exists on host at $path. Use -f to force overwriting existing data with a fresh data volume."
return 1
fi
fi
echo "Creating new data volume on host at $path."
mkdir -p $path
}
# prepvolume_[service] functions. Define new functions with this naming pattern
# to run any setup that needs to happen between the make_volume and migration
# steps. Each prepvolume function takes an argument that tells you whether or
# not a new volume was created with make_volume.
# prepvolume_sierra_db_test -- Wrapper for warm_up_sierra_db_test.
function prepvolume_sierra_db_test {
local volume_was_created=$1
if [[ $volume_was_created ]]; then
warm_up_sierra_db_test
return $?
else
return 0
fi
}
# migrate_[service] functions. Define new functions with this naming pattern to
# migrate data (e.g. create database structures, load data fixtures, etc.) for
# a particular service. Each migrate function takes an argument that tells you
# whether the volume is ready for migrations or not.
function migrate_default_db_dev {
docker-compose run --rm manage-dev migrate --database=default
}
function migrate_default_db_test {
docker-compose run --rm manage-test migrate --database=default
}
function migrate_sierra_db_test {
local volume_is_ready=$1
if [[ $volume_is_ready ]]; then
docker-compose run --rm manage-test migrate --database=sierra
echo "Installing sierra-db-test fixtures..."
docker-compose run --rm manage-test loaddata --app=base --database=sierra $(find $SIERRA_FIXTURE_PATH/*.json -exec basename {} .json \; | tr '\n' ' ')
else
echo "Warning: Database could not be initialized; skipping migrations for \`sierra-db-test\`"
fi
}
### PARSE OPTIONS ###
user_services=()
want_make_volumes="true"
want_do_migrations="true"
while getopts :fmvhs FLAG; do
case $FLAG in
f)
force="true"
;;
m)
want_make_volumes=""
;;
v)
want_do_migrations=""
;;
h)
show_help
;;
s)
show_services
;;
\?)
echo "Unrecognized option $OPTARG."
echo "Use $SCRIPTNAME -h to see help."
exit 2
;;
esac
done
shift $((OPTIND-1))
if [[ ! $want_make_volumes && ! $want_do_migrations ]]; then
echo "You cannot use the -m and -v flags together."
echo "Use $SCRIPTNAME -h to see help."
exit 2
fi
if [[ $# -eq 0 ]]; then
echo "Error: you must specify at least one service or group of services."
echo "Use $SCRIPTNAME -h to see help or -s to see a list of services."
exit 2
fi
for arg in $@; do
case $arg in
all)
user_services+=("${ALL_SERVICES[@]}")
;;
dev)
user_services+=("${DEV_SERVICES[@]}")
;;
tests)
user_services+=("${TEST_SERVICES[@]}")
;;
*)
user_services+=("$arg")
;;
esac
done
### MAIN ###
actions=$([[ $want_make_volumes ]] && echo "v")$([[ $want_do_migrations ]] && echo "m")
show_summary $actions "${user_services[*]}" $force
echo "Stopping any running catalog-api Docker services ..."
docker-compose down &> /dev/null
# First, loop over user-provide $user_services. Validate each service and set
# volumes up as appropriate
services=()
volumes_were_created=()
for service in ${user_services[@]}; do
paths=()
case $service in
default-db-dev)
paths=("$DDPATH/default_db_dev")
;;
default-db-test)
paths=("$DDPATH/default_db_test")
;;
sierra-db-test)
paths=("$DDPATH/sierra_db_test")
;;
solr-dev)
paths=("$DDPATH/solr_dev/logs"
"$DDPATH/solr_dev/bibdata_data"
"$DDPATH/solr_dev/haystack_data"
"$DDPATH/solr_dev/marc_data")
;;
solr-test)
paths=("$DDPATH/solr_test/logs"
"$DDPATH/solr_test/bibdata_data"
"$DDPATH/solr_test/haystack_data"
"$DDPATH/solr_test/marc_data")
;;
redis-celery)
paths=("$DDPATH/redis_celery/data"
"$DDPATH/redis_celery/logs")
;;
redis-appdata-dev)
paths=("$DDPATH/redis_appdata_dev/data"
"$DDPATH/redis_appdata_dev/logs")
;;
redis-appdata-test)
paths=("$DDPATH/redis_appdata_test/data"
"$DDPATH/redis_appdata_test/logs")
;;
app)
paths=("$DDPATH/app/logs"
"$DDPATH/app/media")
;;
celery-worker)
paths=("$DDPATH/celery_worker/logs")
;;
test)
paths=("$DDPATH/test/logs"
"$DDPATH/test/media")
;;
*)
echo ""
echo "Warning: Skipping \`$service\`. Either it is not a valid service, or it does not use data volumes."
;;
esac
if [[ $paths ]]; then
services+=($service)
if [[ $want_make_volumes ]]; then
volume_was_created="true"
echo ""
echo "Making data volume(s) for \`$service\`."
for path in ${paths[@]}; do
if ! make_volume "$path" $service $force; then
volume_was_created="false"
fi
done
else
volume_was_created="false"
fi
volumes_were_created+=($volume_was_created)
fi
done
# Now loop over all valid services, and this time run any existing
# prepvolume_[service] commands. If the prepvolume command returns anything
# other than 0, then assume something went wrong and the volume is NOT ready.
i=0
volumes_are_ready=()
for service in ${services[@]}; do
volume_is_ready="true"
if [[ $want_make_volumes ]]; then
prep_command="prepvolume_${service//-/_}"
if [[ "$(type -t $prep_command)" == "function" ]]; then
echo ""
echo "Running prepvolume for \`$service\`."
created_arg=$([[ ${volumes_were_created[$i]} == "true" ]] && echo "true" || echo "")
if ! $prep_command $created_arg; then
volume_is_ready="false"
fi
fi
fi
volumes_are_ready+=($volume_is_ready)
let i+=1
done
# Finally, if the user wants migrations run, then loop over all valid services
# again and run any existing migrate_[service] commands, passing in a value
# indicating whether or not the volume is ready.
if [[ $want_do_migrations ]]; then
i=0
for service in ${services[@]}; do
migrate_command="migrate_${service//-/_}"
if [ "$(type -t $migrate_command)" == "function" ]; then
echo ""
echo "Running migrations for \`$service\`."
ready_arg=$([[ ${volumes_are_ready[$i]} == "true" ]] && echo "true" || echo "")
$migrate_command $ready_arg
else
echo ""
echo "No migrations found for \`$service\`."
fi
let i+=1
done
fi
echo ""
echo "Done. Stopping all running services."
docker-compose down &> /dev/null
echo ""
echo "$SCRIPTNAME finished."
echo ""
|
unt-libraries/catalog-api
|
init-dockerdata.sh
|
Shell
|
bsd-3-clause
| 14,021 |
#default message
if [ ${message+x} ]; then
msg=$message
else
msg="Push from Bitrise"
fi
echo "app_id: $app_id"
echo "rest_key: $rest_key"
echo "message: $message"
res=$(curl -X POST \
-H "X-Parse-Application-Id: $app_id" \
-H "X-Parse-REST-API-Key: $rest_key" \
-H "Content-Type: application/json" \
-d "{
\"where\": {
\"deviceType\": \"ios\"
},
\"data\": {
\"alert\": \"$msg\"
}
}" \
https://api.parse.com/1/push)
echo " --- Result ---"
echo "$res"
echo " --------------"
|
bitrise-io/steps-push-notification-with-parse
|
step.sh
|
Shell
|
mit
| 545 |
#!/bin/bash
whitelist="logo-en"
for img in `ls img | grep '.pdf' | sed 's/\.pdf$//'`; do
skip=""
for white in $whitelist; do
if [ "$white" == "$img" ]; then
skip="true"
fi
done
if [ -z "$skip" ]; then
echo "Converting img/$img.pdf to imga/$img.pdf..."
echo
gs -dPDFA -dBATCH -dNOPAUSE -sProcessColorModel=DeviceCMYK -sDEVICE=pdfwrite -sPDFACompatibilityPolicy=1 -sOutputFile=imga/"$img".pdf img/"$img".pdf
echo
echo
fi
done
|
oskopek/TransportEditor
|
transport-docs/manuals/convert-images.sh
|
Shell
|
mit
| 513 |
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
|
scrisenbery/dotfiles
|
config.d/_shellrc.d/_bashrc.d/05-color-prompt.bash
|
Shell
|
mit
| 1,170 |
#!/usr/bin/env bash
#
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_native_old
export DOCKER_NAME_TAG=ubuntu:18.04
export PACKAGES="libqt5gui5 libqt5core5a qtbase5-dev libqt5dbus5 qttools5-dev qttools5-dev-tools libssl-dev libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-chrono-dev libboost-iostreams-dev libboost-program-options-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libqrencode-dev libzip-dev zlib1g zlib1g-dev libcurl4-openssl-dev"
export RUN_UNIT_TESTS=true
# export RUN_FUNCTIONAL_TESTS=false
# export RUN_SECURITY_TESTS="true"
export GOAL="install"
export GRIDCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb --with-gui=qt5"
export NEED_XVFB="true"
export NO_DEPENDS=1
|
gridcoin/Gridcoin-Research
|
ci/test/00_setup_env_native_old.sh
|
Shell
|
mit
| 948 |
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
PWD=$(cd $(dirname "$0") && pwd -P)
# Find closest tag
CLOSEST_VERSION=$(git describe --tags --abbrev=0)
# Install git-chglog binary
if ! command -v git-chglog >/dev/null ; then
make git-chglog
fi
# Generate Changelog
git-chglog --config ${PWD}/../../scripts/chglog/config-release-note.yml --tag-filter-pattern v[0-9]+.[0-9]+.[0-9]+$ --output ${PWD}/../../CURRENT-RELEASE-CHANGELOG.md ${CLOSEST_VERSION}
cat ${PWD}/../../CURRENT-RELEASE-CHANGELOG.md
|
cloud-ca/terraform-provider-cloudca
|
scripts/release/release-note.sh
|
Shell
|
mit
| 526 |
#!/bin/sh
typedoc --includeDeclarations --out out1 --module commonjs node-opcua/source node-opcua-common/source node-opcua-client/source node-opcua-variant/source node-opcua-basic-types/source node-opcua-common/source node-opcua-constants/source node-opcua-data-value/source node-opcua-date-time/source node-opcua-guid/source node-opcua-numeric-range/source node-opcua-types/source
|
node-opcua/node-opcua
|
packages/makedoc.sh
|
Shell
|
mit
| 384 |
2to3 -w \
protobuf/descriptor.py \
protobuf/internal/cpp_message.py \
protobuf/internal/decoder.py \
protobuf/internal/python_message.py \
protobuf/internal/type_checkers.py \
protobuf/internal/message_factory_test.py \
protobuf/internal/message_factory_python_test.py \
protobuf/internal/message_python_test.py \
protobuf/internal/message_test.py \
protobuf/internal/reflection_test.py \
protobuf/internal/test_util.py \
protobuf/internal/text_format_test.py \
protobuf/message_factory.py \
protobuf/text_encoding.py \
protobuf/text_format.py
|
katharosada/botchallenge
|
client/google/run2to3.sh
|
Shell
|
mit
| 544 |
echo "Now edit each of the signup posts that you've made over the past week, and use double-tildes to line-out the content, and copy this into a new"
echo "paragraph, substituting the new month's name and URL:"
echo ""
echo "**[The StayClean 2017 year-long challenge is now in progress](https://www.reddit.com/r/pornfree/comments/3v059o/stay_clean_december_this_thread_updated_daily/)**."
|
foobarbazblarg/stayclean
|
stayclean-2017/after-the-last-day-of-the-year-step-4.sh
|
Shell
|
mit
| 389 |
# hints/dec_osf.sh
# * If you want to debug perl or want to send a
# stack trace for inclusion into an bug report, call
# Configure with the additional argument -Doptimize=-g2
# or uncomment this assignment to "optimize":
#
#optimize=-g2
#
# If you want both to optimise and debug with the DEC cc
# you must have -g3, e.g. "-O4 -g3", and (re)run Configure.
#
# * gcc can always have both -g and optimisation on.
#
# * debugging optimised code, no matter what compiler
# one is using, can be surprising and confusing because of
# the optimisation tricks like code motion, code removal,
# loop unrolling, and inlining. The source code and the
# executable code simply do not agree any more while in
# mid-execution, the optimiser only cares about the results.
#
# * Configure will automatically add the often quoted
# -DDEBUGGING for you if the -g is specified.
#
# * There is even more optimisation available in the new
# (GEM) DEC cc: -O5 and -fast. "man cc" will tell more about them.
# The jury is still out whether either or neither help for Perl
# and how much. Based on very quick testing, -fast boosts
# raw data copy by about 5-15% (-fast brings in, among other
# things, inlined, ahem, fast memcpy()), while on the other
# hand searching things (index, m//, s///), seems to get slower.
# Your mileage will vary.
#
# * The -std is needed because the following compiled
# without the -std and linked with -lm
#
# #include <math.h>
# #include <stdio.h>
# int main(){short x=10,y=sqrt(x);printf("%d\n",y);}
#
# will in Digital UNIX 3.* and 4.0b print 0 -- and in Digital
# UNIX 4.0{,a} dump core: Floating point exception in the printf(),
# the y has become a signaling NaN.
#
# * Compilation warnings like:
#
# "Undefined the ANSI standard macro ..."
#
# can be ignored, at least while compiling the POSIX extension
# and especially if using the sfio (the latter is not a standard
# part of Perl, never mind if it says little to you).
#
# If using the DEC compiler we must find out the DEC compiler style:
# the style changed between Digital UNIX (aka DEC OSF/1) 3 and
# Digital UNIX 4. The old compiler was originally from Ultrix and
# the MIPS company, the new compiler is originally from the VAX world
# and it is called GEM. Many of the options we are going to use depend
# on the compiler style.
cc=${cc:-cc}
# do NOT, I repeat, *NOT* take away the leading tabs
# Configure Black Magic (TM)
# reset
_DEC_cc_style=
case "`$cc -v 2>&1 | grep cc`" in
*gcc*) _gcc_version=`$cc --version 2>&1 | tr . ' '`
set $_gcc_version
if test "$1" -lt 2 -o \( "$1" -eq 2 -a \( "$2" -lt 95 -o \( "$2" -eq 95 -a "$3" -lt 2 \) \) \); then
cat >&4 <<EOF
*** Your cc seems to be gcc and its version ($_gcc_version) seems to be
*** less than 2.95.2. This is not a good idea since old versions of gcc
*** are known to produce buggy code when compiling Perl (and no doubt for
*** other programs, too).
***
*** Therefore, I strongly suggest upgrading your gcc. (Why don't you use
*** the vendor cc is also a good question. It comes with the operating
*** system and produces good code.)
Cannot continue, aborting.
EOF
exit 1
fi
if test "$1" -eq 2 -a "$2" -eq 95 -a "$3" -le 2; then
cat >&4 <<EOF
*** Note that as of gcc 2.95.2 (19991024) and Perl 5.6.0 (March 2000)
*** if the said Perl is compiled with the said gcc the lib/sdbm test
*** may dump core (meaning that the SDBM_File extension is unusable).
*** As this core dump never happens with the vendor cc, this is most
*** probably a lingering bug in gcc. Therefore unless you have a better
*** gcc installation you are still better off using the vendor cc.
Since you explicitly chose gcc, I assume that you know what are doing.
EOF
fi
;;
*) # compile something small: taint.c is fine for this.
ccversion=`cc -V | awk '/(Compaq|DEC) C/ {print $3}'`
# the main point is the '-v' flag of 'cc'.
case "`cc -v -I. -c taint.c -o taint$$.o 2>&1`" in
*/gemc_cc*) # we have the new DEC GEM CC
_DEC_cc_style=new
;;
*) # we have the old MIPS CC
_DEC_cc_style=old
;;
esac
# cleanup
rm -f taint$$.o
;;
esac
# be nauseatingly ANSI
case "`$cc -v 2>&1 | grep gcc`" in
*gcc*) ccflags="$ccflags -ansi"
;;
*) ccflags="$ccflags -std"
;;
esac
# for gcc the Configure knows about the -fpic:
# position-independent code for dynamic loading
# we want optimisation
case "$optimize" in
'') case "`$cc -v 2>&1 | grep gcc`" in
*gcc*)
optimize='-O3' ;;
*) case "$_DEC_cc_style" in
new) optimize='-O4'
ccflags="$ccflags -fprm d -ieee"
;;
old) optimize='-O2 -Olimit 3200' ;;
esac
ccflags="$ccflags -D_INTRINSICS"
;;
esac
;;
esac
# Make glibpth agree with the compiler suite. Note that /shlib
# is not here. That's on purpose. Even though that's where libc
# really lives from V4.0 on, the linker (and /sbin/loader) won't
# look there by default. The sharable /sbin utilities were all
# built with "-Wl,-rpath,/shlib" to get around that. This makes
# no attempt to figure out the additional location(s) searched by
# gcc, since not all versions of gcc are easily coerced into
# revealing that information.
glibpth="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc"
glibpth="$glibpth /usr/lib /usr/local/lib /var/shlib"
# dlopen() is in libc
libswanted="`echo $libswanted | sed -e 's/ dl / /'`"
# libPW contains nothing useful for perl
libswanted="`echo $libswanted | sed -e 's/ PW / /'`"
# libnet contains nothing useful for perl here, and doesn't work
libswanted="`echo $libswanted | sed -e 's/ net / /'`"
# libbsd contains nothing used by perl that is not already in libc
libswanted="`echo $libswanted | sed -e 's/ bsd / /'`"
# libc need not be separately listed
libswanted="`echo $libswanted | sed -e 's/ c / /'`"
# ndbm is already in libc
libswanted="`echo $libswanted | sed -e 's/ ndbm / /'`"
# the basic lddlflags used always
lddlflags='-shared -expect_unresolved "*"'
# Fancy compiler suites use optimising linker as well as compiler.
# <[email protected]>
case "`uname -r`" in
*[123].*) # old loader
lddlflags="$lddlflags -O3"
;;
*) if $test "X$optimize" = "X$undef"; then
lddlflags="$lddlflags -msym"
else
case "`/usr/sbin/sizer -v`" in
*4.0D*)
# QAR 56761: -O4 + .so may produce broken code,
# fixed in 4.0E or better.
;;
*)
lddlflags="$lddlflags $optimize"
;;
esac
# -msym: If using a sufficiently recent /sbin/loader,
# keep the module symbols with the modules.
lddlflags="$lddlflags -msym -std"
fi
;;
esac
# Yes, the above loses if gcc does not use the system linker.
# If that happens, let me know about it. <[email protected]>
# If debugging or (old systems and doing shared)
# then do not strip the lib, otherwise, strip.
# As noted above the -DDEBUGGING is added automagically by Configure if -g.
case "$optimize" in
*-g*) ;; # left intentionally blank
*) case "`uname -r`" in
*[123].*)
case "$useshrplib" in
false|undef|'') lddlflags="$lddlflags -s" ;;
esac
;;
*) lddlflags="$lddlflags -s"
;;
esac
;;
esac
#
# Make embedding in things like INN and Apache more memory friendly.
# Keep it overridable on the Configure command line, though, so that
# "-Uuseshrplib" prevents this default.
#
case "$_DEC_cc_style.$useshrplib" in
new.) useshrplib="$define" ;;
esac
# The EFF_ONLY_OK from <sys/access.h> is present but dysfunctional for
# [RWX]_OK as of Digital UNIX 4.0[A-D]?. If and when this gets fixed,
# please adjust this appropriately. See also pp_sys.c just before the
# emulate_eaccess().
# Fixed in V5.0A.
case "`/usr/sbin/sizer -v`" in
*5.0[A-Z]*|*5.[1-9]*|*[6-9].[0-9]*)
: ok
;;
*)
# V5.0 or previous
pp_sys_cflags='ccflags="$ccflags -DNO_EFF_ONLY_OK"'
;;
esac
# The off_t is already 8 bytes, so we do have largefileness.
cat > UU/usethreads.cbu <<'EOCBU'
# This script UU/usethreads.cbu will get 'called-back' by Configure
# after it has prompted the user for whether to use threads.
case "$usethreads" in
$define|true|[yY]*)
# Threads interfaces changed with V4.0.
case "`$cc -v 2>&1 | grep gcc`" in
*gcc*)ccflags="-D_REENTRANT $ccflags" ;;
*) case "`uname -r`" in
*[123].*) ccflags="-threads $ccflags" ;;
*) ccflags="-pthread $ccflags" ;;
esac
;;
esac
case "`uname -r`" in
*[123].*) libswanted="$libswanted pthreads mach exc c_r" ;;
*) libswanted="$libswanted pthread exc" ;;
esac
case "$usemymalloc" in
'')
usemymalloc='n'
;;
esac
;;
esac
EOCBU
cat > UU/uselongdouble.cbu <<'EOCBU'
# This script UU/uselongdouble.cbu will get 'called-back' by Configure
# after it has prompted the user for whether to use long doubles.
case "$uselongdouble" in
$define|true|[yY]*) d_Gconvert='sprintf((b),"%.*Lg",(n),(x))' ;;
esac
EOCBU
#
# Unset temporary variables no more needed.
#
unset _DEC_cc_style
#
# History:
#
# perl5.005_51:
#
# September-1998 Jarkko Hietaniemi <[email protected]>
#
# * Added the -DNO_EFF_ONLY_OK flag ('use filetest;' support).
#
# perl5.004_57:
#
# 19-Dec-1997 Spider Boardman <[email protected]>
#
# * Newer Digital UNIX compilers enforce signaling for NaN without
# -ieee. Added -fprm d at the same time since it's friendlier for
# embedding.
#
# * Fixed the library search path to match cc, ld, and /sbin/loader.
#
# * Default to building -Duseshrplib on newer systems. -Uuseshrplib
# still overrides.
#
# * Fix -pthread additions for useshrplib. ld has no -pthread option.
#
#
# perl5.004_04:
#
# 19-Sep-1997 Spider Boardman <[email protected]>
#
# * libnet on Digital UNIX is for JAVA, not for sockets.
#
#
# perl5.003_28:
#
# 22-Feb-1997 Jarkko Hietaniemi <[email protected]>
#
# * Restructuring Spider's suggestions.
#
# * Older Digital UNIXes cannot handle -Olimit ... for $lddlflags.
#
# * ld -s cannot be used in older Digital UNIXes when doing shared.
#
#
# 21-Feb-1997 Spider Boardman <[email protected]>
#
# * -hidden removed.
#
# * -DSTANDARD_C removed.
#
# * -D_INTRINSICS added. (that -fast does not seem to buy much confirmed)
#
# * odbm not in libc, only ndbm. Therefore dbm back to $libswanted.
#
# * -msym for the newer runtime loaders.
#
# * $optimize also in $lddflags.
#
#
# perl5.003_27:
#
# 18-Feb-1997 Jarkko Hietaniemi <[email protected]>
#
# * unset _DEC_cc_style and more commentary on -std.
#
#
# perl5.003_26:
#
# 15-Feb-1997 Jarkko Hietaniemi <[email protected]>
#
# * -std and -ansi.
#
#
# perl5.003_24:
#
# 30-Jan-1997 Jarkko Hietaniemi <[email protected]>
#
# * Fixing the note on -DDEBUGGING.
#
# * Note on -O5 -fast.
#
#
# perl5.003_23:
#
# 26-Jan-1997 Jarkko Hietaniemi <[email protected]>
#
# * Notes on how to do both optimisation and debugging.
#
#
# 25-Jan-1997 Jarkko Hietaniemi <[email protected]>
#
# * Remove unneeded libraries from $libswanted: PW, bsd, c, dbm
#
# * Restructure the $lddlflags build.
#
# * $optimize based on which compiler we have.
#
#
# perl5.003_22:
#
# 23-Jan-1997 Achim Bohnet <[email protected]>
#
# * Added comments 'how to create a debugging version of perl'
#
# * Fixed logic of this script to prevent stripping of shared
# objects by the loader (see ld man page for -s) is debugging
# is set via the -g switch.
#
#
# 21-Jan-1997 Achim Bohnet <[email protected]>
#
# * now 'dl' is always removed from libswanted. Not only if
# optimize is an empty string.
#
#
# 17-Jan-1997 Achim Bohnet <[email protected]>
#
# * Removed 'dl' from libswanted: When the FreePort binary
# translator for Sun binaries is installed Configure concludes
# that it should use libdl.x.yz.fpx.so :-(
# Because the dlopen, dlclose,... calls are in the
# C library it not necessary at all to check for the
# dl library. Therefore dl is removed from libswanted.
#
#
# 1-Jan-1997 Achim Bohnet <[email protected]>
#
# * Set -Olimit to 3200 because perl_yylex.c got too big
# for the optimizer.
#
|
atmark-techno/atmark-dist
|
user/perl/hints/dec_osf.sh
|
Shell
|
gpl-2.0
| 11,976 |
#!/bin/sh
#
# Copyright (c) 2010 Johan Herland
#
test_description='Test merging of notes trees'
. ./test-lib.sh
test_expect_success setup '
test_commit 1st &&
test_commit 2nd &&
test_commit 3rd &&
test_commit 4th &&
test_commit 5th &&
# Create notes on 4 first commits
git config core.notesRef refs/notes/x &&
git notes add -m "Notes on 1st commit" 1st &&
git notes add -m "Notes on 2nd commit" 2nd &&
git notes add -m "Notes on 3rd commit" 3rd &&
git notes add -m "Notes on 4th commit" 4th &&
# Copy notes to remote-notes
git fetch . refs/notes/*:refs/remote-notes/origin/* &&
test_oid_init &&
test_oid_cache <<-EOF
hash4a sha1:5e93d24084d32e1cb61f7070505b9d2530cca987
hash3a sha1:8366731eeee53787d2bdf8fc1eff7d94757e8da0
hash2a sha1:eede89064cd42441590d6afec6c37b321ada3389
hash1a sha1:daa55ffad6cb99bf64226532147ffcaf5ce8bdd1
hash5b sha1:0f2efbd00262f2fd41dfae33df8765618eeacd99
hash4b sha1:dec2502dac3ea161543f71930044deff93fa945c
hash3b sha1:4069cdb399fd45463ec6eef8e051a16a03592d91
hash2c sha1:d000d30e6ddcfce3a8122c403226a2ce2fd04d9d
hash1c sha1:43add6bd0c8c0bc871ac7991e0f5573cfba27804
hash4d sha1:1f257a3a90328557c452f0817d6cc50c89d315d4
hash3d sha1:05a4927951bcef347f51486575b878b2b60137f2
hash4a sha256:eef876be1d32ac2e2e42240e0429325cec116e55e88cb2969899fac695aa762f
hash3a sha256:cf7cd1bc091d7ba4166a86df864110e42087cd893a5ae96bc50d637e0290939d
hash2a sha256:21ddde7ebce2c285213898cb04deca0fd3209610cf7aaf8222e4e2f45262fae2
hash1a sha256:f9fe0eda16c6027732ed9d4295689a03abd16f893be69b3dcbf4037ddb191921
hash5b sha256:20046f2244577797a9e3d3f790ea9eca4d8a6bafb2a5570bcb0e03aa02ce100b
hash4b sha256:f90563d134c61a95bb88afbd45d48ccc9e919c62aa6fbfcd483302b3e4d8dbcb
hash3b sha256:988f2aca9f2d87e93e6a73197c2bb99560cc44a2f92d18653968f956f01221e0
hash2c sha256:84153b777b4d42827a756c6578dcdb59d8ae5d1360b874fb37c430150c825c26
hash1c sha256:9beb2bc4eef72e4c4087be168a20573e34d993d9ab1883055f23e322afa06567
hash4d sha256:32de39dc06e679a7abb2d4a55ede7709b3124340a4a90aa305971b1c72ac319d
hash3d sha256:fa73b20e41cbb7541c4c81d1535016131dbfbeb05bf6a71f6115e9cad31c7af5
EOF
'
commit_sha1=$(git rev-parse 1st^{commit})
commit_sha2=$(git rev-parse 2nd^{commit})
commit_sha3=$(git rev-parse 3rd^{commit})
commit_sha4=$(git rev-parse 4th^{commit})
commit_sha5=$(git rev-parse 5th^{commit})
verify_notes () {
notes_ref="$1"
git -c core.notesRef="refs/notes/$notes_ref" notes |
sort >"output_notes_$notes_ref" &&
test_cmp "expect_notes_$notes_ref" "output_notes_$notes_ref" &&
git -c core.notesRef="refs/notes/$notes_ref" log --format="%H %s%n%N" \
>"output_log_$notes_ref" &&
test_cmp "expect_log_$notes_ref" "output_log_$notes_ref"
}
cat <<EOF | sort >expect_notes_x
$(test_oid hash4a) $commit_sha4
$(test_oid hash3a) $commit_sha3
$(test_oid hash2a) $commit_sha2
$(test_oid hash1a) $commit_sha1
EOF
cat >expect_log_x <<EOF
$commit_sha5 5th
$commit_sha4 4th
Notes on 4th commit
$commit_sha3 3rd
Notes on 3rd commit
$commit_sha2 2nd
Notes on 2nd commit
$commit_sha1 1st
Notes on 1st commit
EOF
test_expect_success 'verify initial notes (x)' '
verify_notes x
'
cp expect_notes_x expect_notes_y
cp expect_notes_x expect_notes_v
cp expect_log_x expect_log_y
cp expect_log_x expect_log_v
test_expect_success 'fail to merge empty notes ref into empty notes ref (z => y)' '
test_must_fail git -c "core.notesRef=refs/notes/y" notes merge z
'
test_expect_success 'fail to merge into various non-notes refs' '
test_must_fail git -c "core.notesRef=refs/notes" notes merge x &&
test_must_fail git -c "core.notesRef=refs/notes/" notes merge x &&
git update-ref refs/notes/dir/foo HEAD &&
test_must_fail git -c "core.notesRef=refs/notes/dir" notes merge x &&
test_must_fail git -c "core.notesRef=refs/notes/dir/" notes merge x &&
test_must_fail git -c "core.notesRef=refs/heads/master" notes merge x &&
test_must_fail git -c "core.notesRef=refs/notes/y:" notes merge x &&
test_must_fail git -c "core.notesRef=refs/notes/y:foo" notes merge x &&
test_must_fail git -c "core.notesRef=refs/notes/foo^{bar" notes merge x
'
test_expect_success 'merge non-notes ref into empty notes ref (remote-notes/origin/x => v)' '
git config core.notesRef refs/notes/v &&
git notes merge refs/remote-notes/origin/x &&
verify_notes v &&
# refs/remote-notes/origin/x and v should point to the same notes commit
test "$(git rev-parse refs/remote-notes/origin/x)" = "$(git rev-parse refs/notes/v)"
'
test_expect_success 'merge notes into empty notes ref (x => y)' '
git config core.notesRef refs/notes/y &&
git notes merge x &&
verify_notes y &&
# x and y should point to the same notes commit
test "$(git rev-parse refs/notes/x)" = "$(git rev-parse refs/notes/y)"
'
test_expect_success 'merge empty notes ref (z => y)' '
git notes merge z &&
# y should not change (still == x)
test "$(git rev-parse refs/notes/x)" = "$(git rev-parse refs/notes/y)"
'
test_expect_success 'change notes on other notes ref (y)' '
# Not touching notes to 1st commit
git notes remove 2nd &&
git notes append -m "More notes on 3rd commit" 3rd &&
git notes add -f -m "New notes on 4th commit" 4th &&
git notes add -m "Notes on 5th commit" 5th
'
test_expect_success 'merge previous notes commit (y^ => y) => No-op' '
pre_state="$(git rev-parse refs/notes/y)" &&
git notes merge y^ &&
# y should not move
test "$pre_state" = "$(git rev-parse refs/notes/y)"
'
cat <<EOF | sort >expect_notes_y
$(test_oid hash5b) $commit_sha5
$(test_oid hash4b) $commit_sha4
$(test_oid hash3b) $commit_sha3
$(test_oid hash1a) $commit_sha1
EOF
cat >expect_log_y <<EOF
$commit_sha5 5th
Notes on 5th commit
$commit_sha4 4th
New notes on 4th commit
$commit_sha3 3rd
Notes on 3rd commit
More notes on 3rd commit
$commit_sha2 2nd
$commit_sha1 1st
Notes on 1st commit
EOF
test_expect_success 'verify changed notes on other notes ref (y)' '
verify_notes y
'
test_expect_success 'verify unchanged notes on original notes ref (x)' '
verify_notes x
'
test_expect_success 'merge original notes (x) into changed notes (y) => No-op' '
git notes merge -vvv x &&
verify_notes y &&
verify_notes x
'
cp expect_notes_y expect_notes_x
cp expect_log_y expect_log_x
test_expect_success 'merge changed (y) into original (x) => Fast-forward' '
git config core.notesRef refs/notes/x &&
git notes merge y &&
verify_notes x &&
verify_notes y &&
# x and y should point to same the notes commit
test "$(git rev-parse refs/notes/x)" = "$(git rev-parse refs/notes/y)"
'
test_expect_success 'merge empty notes ref (z => y)' '
# Prepare empty (but valid) notes ref (z)
git config core.notesRef refs/notes/z &&
git notes add -m "foo" &&
git notes remove &&
git notes >output_notes_z &&
test_must_be_empty output_notes_z &&
# Do the merge (z => y)
git config core.notesRef refs/notes/y &&
git notes merge z &&
verify_notes y &&
# y should no longer point to the same notes commit as x
test "$(git rev-parse refs/notes/x)" != "$(git rev-parse refs/notes/y)"
'
cat <<EOF | sort >expect_notes_y
$(test_oid hash5b) $commit_sha5
$(test_oid hash4b) $commit_sha4
$(test_oid hash3b) $commit_sha3
$(test_oid hash2c) $commit_sha2
$(test_oid hash1c) $commit_sha1
EOF
cat >expect_log_y <<EOF
$commit_sha5 5th
Notes on 5th commit
$commit_sha4 4th
New notes on 4th commit
$commit_sha3 3rd
Notes on 3rd commit
More notes on 3rd commit
$commit_sha2 2nd
New notes on 2nd commit
$commit_sha1 1st
Notes on 1st commit
More notes on 1st commit
EOF
test_expect_success 'change notes on other notes ref (y)' '
# Append to 1st commit notes
git notes append -m "More notes on 1st commit" 1st &&
# Add new notes to 2nd commit
git notes add -m "New notes on 2nd commit" 2nd &&
verify_notes y
'
cat <<EOF | sort >expect_notes_x
$(test_oid hash5b) $commit_sha5
$(test_oid hash4d) $commit_sha4
$(test_oid hash1a) $commit_sha1
EOF
cat >expect_log_x <<EOF
$commit_sha5 5th
Notes on 5th commit
$commit_sha4 4th
New notes on 4th commit
More notes on 4th commit
$commit_sha3 3rd
$commit_sha2 2nd
$commit_sha1 1st
Notes on 1st commit
EOF
test_expect_success 'change notes on notes ref (x)' '
git config core.notesRef refs/notes/x &&
git notes remove 3rd &&
git notes append -m "More notes on 4th commit" 4th &&
verify_notes x
'
cat <<EOF | sort >expect_notes_x
$(test_oid hash5b) $commit_sha5
$(test_oid hash4d) $commit_sha4
$(test_oid hash2c) $commit_sha2
$(test_oid hash1c) $commit_sha1
EOF
cat >expect_log_x <<EOF
$commit_sha5 5th
Notes on 5th commit
$commit_sha4 4th
New notes on 4th commit
More notes on 4th commit
$commit_sha3 3rd
$commit_sha2 2nd
New notes on 2nd commit
$commit_sha1 1st
Notes on 1st commit
More notes on 1st commit
EOF
test_expect_success 'merge y into x => Non-conflicting 3-way merge' '
git notes merge y &&
verify_notes x &&
verify_notes y
'
cat <<EOF | sort >expect_notes_w
$(test_oid hash3d) $commit_sha3
$(test_oid hash2c) $commit_sha2
EOF
cat >expect_log_w <<EOF
$commit_sha5 5th
$commit_sha4 4th
$commit_sha3 3rd
New notes on 3rd commit
$commit_sha2 2nd
New notes on 2nd commit
$commit_sha1 1st
EOF
test_expect_success 'create notes on new, separate notes ref (w)' '
git config core.notesRef refs/notes/w &&
# Add same note as refs/notes/y on 2nd commit
git notes add -m "New notes on 2nd commit" 2nd &&
# Add new note on 3rd commit (non-conflicting)
git notes add -m "New notes on 3rd commit" 3rd &&
# Verify state of notes on new, separate notes ref (w)
verify_notes w
'
cat <<EOF | sort >expect_notes_x
$(test_oid hash5b) $commit_sha5
$(test_oid hash4d) $commit_sha4
$(test_oid hash3d) $commit_sha3
$(test_oid hash2c) $commit_sha2
$(test_oid hash1c) $commit_sha1
EOF
cat >expect_log_x <<EOF
$commit_sha5 5th
Notes on 5th commit
$commit_sha4 4th
New notes on 4th commit
More notes on 4th commit
$commit_sha3 3rd
New notes on 3rd commit
$commit_sha2 2nd
New notes on 2nd commit
$commit_sha1 1st
Notes on 1st commit
More notes on 1st commit
EOF
test_expect_success 'merge w into x => Non-conflicting history-less merge' '
git config core.notesRef refs/notes/x &&
git notes merge w &&
# Verify new state of notes on other notes ref (x)
verify_notes x &&
# Also verify that nothing changed on other notes refs (y and w)
verify_notes y &&
verify_notes w
'
test_done
|
brunosantiagovazquez/git
|
t/t3308-notes-merge.sh
|
Shell
|
gpl-2.0
| 10,252 |
#!/bin/bash
lang=$1
appname="gecosws-config-assistant"
if [ "" == "$lang" ]
then
lang="es"
fi
podir="../po"
potfilesin="${podir}/POTFILES.in"
potfile="${podir}/${appname}.pot"
pofile="${podir}/${lang}.po"
pofilemerged="${podir}/${lang}.merged.po"
mofile="${podir}/${appname}.mo"
find .. -type f -name "*.py" > $potfilesin
xgettext --language=Python --keyword=_ --output=$potfile -f $potfilesin
if [ ! -f $pofile ]; then
msginit --input=$potfile --locale=es_ES --output-file $pofile
else
msgmerge $pofile $potfile > $pofilemerged
mv $pofilemerged $pofile
fi
sed -i s@^../@@g $potfilesin
|
rcmorano/gecosws-config-assistant
|
script/i18n.sh
|
Shell
|
gpl-2.0
| 614 |
#!/bin/bash
#
# Test the unapplied code
#
source $REG_DIR/scaffold
cmd setup_repo
cmd guilt-unapplied
guilt-series | while read n; do
cmd guilt-push
cmd guilt-unapplied
cmd list_files
done
|
trygvis/guilt
|
regression/t-024.sh
|
Shell
|
gpl-2.0
| 198 |
#!/bin/bash
numInstances=100
if [[ -z "$instanceGen" || -z "$dflatArguments" || -z "$monolithicEncoding" ]]; then
echo "Environment variables not set"
exit 1
fi
for instance in $(seq 1 $numInstances); do
seed=$RANDOM
instance=$(mktemp)
trap "rm -f $instance" EXIT
$instanceGen $seed > $instance || exit
gringo $monolithicEncoding $instance 2>/dev/null | clasp -q >/dev/null
claspExit=$?
dflat $dflatArguments --depth 0 --seed $seed < $instance >/dev/null
dflatExit=$?
[ $claspExit -ne 30 ] || claspExit=10
if [ $claspExit -ne $dflatExit ]; then
cp $instance mismatch${seed}.lp
echo
echo "Mismatch for seed $seed (dflat: ${dflatExit}, clasp: ${claspExit})"
exit 1
else
# echo -n .
echo -n "$dflatExit "
fi
# remove temp file
rm -f $instance
trap - EXIT
done
echo
|
bbliem/dflat
|
applications/test_decision.sh
|
Shell
|
gpl-3.0
| 799 |
#!/usr/bin/env bash
# usage: gcc-mips.sh "some_file.c" X
# where X is an (optional) optimisation level eg "2" => -O2
# compiler will output to "some_file.s"
#
# requires `g++-5-mips-linux-gnu` package
which mips-linux-gnu-g++-5 > /dev/null
if [ $? -ne 0 ]; then
echo "the cross-compiler is not installed."
echo "if you are on a Debian based Linux distro: Please install"
echo "the g++-5-mips-linux-gnu package. Otherwise you're on your own."
exit 1
fi
FILE="$1"
# level of optimisation in $2. Default value given after :-
OPTIMISATION=${2:-0}
export OPTIMISATION
# change extension with bash parameter expansion
OUT="${FILE/%.*/.s}"
export OUT
# whether to alter the compiler output to make it work with Simulizer
FILTER=true
# to list flags
# mips-linux-gnu-g++-5 --help=common
# mips-linux-gnu-g++-5 --help=target
# also see: https://gcc.gnu.org/onlinedocs/gcc/MIPS-Options.html
#
# -fverbose-asm: add information to the output including compiler flags used
#
function compile {
# flags which are closest to the architecture of the Simulizer CPU
mips-linux-gnu-g++-5 -O$OPTIMISATION -Wall -Wpedantic -std=c++14 \
-fno-exceptions -mno-explicit-relocs \
-march=r3000 -meb -mgp32 -mfp32 -msoft-float \
-mno-llsc -fno-stack-protector -fno-delayed-branch \
-I./ -I"$(dirname "$0")" -I"$(dirname "$OUT")" -S "$1" -o "$OUT"
# -O0: disable optimisations (to make output more readable)
# -fno-exceptions: disabling exceptions removes some cruft added for bookkeeping
# -mno-explicit-relocs: disables use of %hi() and %lo() to load from the
# .data segment
# -march=r3000: compile for the R3000 processor (which Simulizer emulates)
# -meb: big endian (which Simulizer is)
# -mgp32: 32-bit general purpose registers (as opposed to 64-bit)
# -mfp32: 32-bit floating point registers (as opposed to 64-bit).
# required for -march=r3000 however Simulizer currently
# has no FPU
# -msoft-float: don't use hardware float instructions. Use library calls
# instead (because Simulizer doesn't have a FPU)
# -mno-llsc: don't use ll,sc and sync instructions (atomic instructions)
# because Simulizer does not support them
# -mno-stack-protector: don't write stack canaries or other protections to
# the stack
# -fno-delayed-branch: don't exploit delayed branch slots because Simulizer
# does not have them
# -I./ include the current directory in the include path to search
# for headers
# -I$($0) include the path of this script
# -I$(...) include the path that the input and output files reside in
# -S: generate assembly output
}
# by wrapping everything in extern "C" disables name mangling for all functions
TMP_FILE=$(mktemp /tmp/gcc-mips.tmp.XXXXX.c)
echo 'extern "C" {' > "$TMP_FILE"
cat "$FILE" >> "$TMP_FILE"
echo '}' >> "$TMP_FILE"
# the line numbers will be wrong but if piped to /dev/null you loose warning messages
compile "$TMP_FILE"
COMPILE_STATUS=$?
rm "$TMP_FILE"
if [ $COMPILE_STATUS -ne 0 ]; then
RED='\033[1;31m'
NO_COLOR='\033[0m'
echo -e "\n\n${RED}Compilation Failed${NO_COLOR}\n"
# compile the original file (will mangle function names)
# to get better diagnostics (line numbers)
compile "$FILE"
# if it works without extern "C" surrounding it...
if [ $? -eq 0 ]; then
echo "something went wrong with the compiler"
fi
exit 1
fi
# no filtering
if ! $FILTER; then
exit 0
fi
HEADER="\t# compiled using GCC with optimisation level $OPTIMISATION\n"
echo -e "$HEADER$(cat "$OUT")" > "$OUT"
# remove assembler directives that Simulizer doesn't understand
# .globl is understood, but ignored by Simulizer
# .align is understood, but ignored by Simulizer
# .rdata is not understood, but is useful to keep in so you can see where the
# read-only data segment is so you can move it to the .data segment
# note that sometimes GCC places data in the .bss segment which is also not
# supported by Simulizer
KNOWN_DIRECTIVES="(text|data|rdata|ascii|asciiz|byte|half|word|space)"
# if on a line matching Simulizer-compatible directives: print
# if on a line matching some other directive: skip
# remove #nop lines
# remove #APP and #NO_APP messages which surround asm() statements
# remove # 0 "" 2 and # XX "input.c" 1 lines which surround asm() statements
AWK_FILTER='
/\.'$KNOWN_DIRECTIVES'([^\w.]|$)/{print; next;}
/^(\s*\.section)?\s*\.bss/{$0="\t.data"; print} # replace .bss with .data
/^\s*\.section\s*\.text\.startup/{$0="\t.text"; print} # .section .text.startup with .text
/^\s*\./{next} # unknown directives
/^\s*#nop$/{print "\t# <hazard>"; next}
/^\s*#(NO_)?APP$/{next}
/^\s*# 0 "" 2$/{next}
/^\s*# [0-9]+ "'"${TMP_FILE//\//\\/}"'" 1/{next} # need to escape / in file path
/^\s*# [0-9]+ ".*libc-simulizer.h" 1/{next}
{print}
'
# explanations
# ([^\w.]|$) -> when compiling with -O3 a directive gets generated: .text.startup
# so this checks that the next char after a known directive
# doesn't extend that directive
awk -i inplace "$AWK_FILTER" "$OUT"
# gcc uses labels of the form: $LXXX eg $L4 (where XXX is a unique number) for
# loops Simulizer does not understand these as they are confusing as they look
# like registers. (Note spim can handle these labels)
# eg $L4 --> LBL_4
sed --in-place='' 's/\(^[^#]*\)\$L\(\d*\)/\1LBL_\2/' "$OUT"
# when optimising, gcc creates labels of the form: functionName.constprop.XXX
# but Simulizer does not support . in label names
sed --in-place='' 's/\(^[^#]*[[:alpha:]]\+\)\./\1_/g' "$OUT"
# substitute mnemonic register names (personal preference)
sed --in-place='' 's/\$31/$ra/' "$OUT"
# some versions of GCC seem to be placing literal null characters rather than \0
sed --in-place='' 's/\x0/\\0/' "$OUT"
# gcc uses these macros which simulizer does not understand for a particular
# overloading, for example the 'move' instruction is used to move from memory to
# a register. The following program replaces these instructions with an
# appropriate replacement
AWK_FIX_OVERLOADS='
# match: "address as the second argument"
/^\s*move[^,]*,[^#]*\(/{$1="\tlw"; print $0; next;}
# match "not a register as the third argument"
/^\s*slt([^,]*,){2}[^\$]/{$1="\tslti"; print $0; next;}
{print}
'
awk -i inplace "$AWK_FIX_OVERLOADS" "$OUT"
|
Simulizer/Simulizer
|
work/gcc-mips.sh
|
Shell
|
gpl-3.0
| 6,578 |
# HardeningOne - A hardening tool for Linux
# Copyright (C) 2010 Author
# Seg 18 Out 2010 09:49:01 BRST
# São Paulo-SP
# Brazil
# Author:
# * Mauro Risonho de Paula Assumpção aka firebits <firebits at backtrack dot com dot br>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
#!/bin/bash
echo " "
echo "-------------------------------------------------------------------------">>/root/ho/v.0.0.1/hardeningone/tmp/report.txt
date>>/root/ho/v.0.0.1/hardeningone/tmp/report.txt
echo "-------------------------------------------------------------------------">>/root/ho/v.0.0.1/hardeningone/tmp/report.txt
echo "Scanning no PHP5 por funcao proc_open">>/root/ho/v.0.0.1/hardeningone/tmp/report.txt
echo "-------------------------------------------------------------------------">>/root/ho/v.0.0.1/hardeningone/tmp/report.txt
cat /etc/php5/apache2/php.ini|grep proc_open>>/root/ho/v.0.0.1/hardeningone/tmp/report.txt
|
firebitsbr/HardeningONE
|
ok/check_php5_proc_open.sh
|
Shell
|
gpl-3.0
| 1,600 |
# vim: ft=sh:sw=2:et
tForemanSetLang() {
# facter 1.7- fails to parse some values when non-US LANG and others are set
# see: http://projects.puppetlabs.com/issues/12012
export LANGUAGE=en_US.UTF-8
export LANG=en_US.UTF-8
export LC_ALL=en_US.UTF-8
}
tForemanVersion() {
(
if tPackageExists foreman; then
tPackageVersion foreman
elif tPackageExists foreman-installer; then
tPackageVersion foreman-installer
fi
) | cut -d. -f1-2
}
|
cfouant/katello-deploy
|
bats/foreman_helper.bash
|
Shell
|
gpl-3.0
| 468 |
#!/bin/bash
# Copyright (c) 2013 Alberto Otero de la Roza <[email protected]>,
# Felix Kannemann <[email protected]>, Erin R. Johnson <[email protected]>,
# Ross M. Dickson <[email protected]>, Hartmut Schmider <[email protected]>,
# and Axel D. Becke <[email protected]>
## modify this
chf="lcwpbe"
c1=1.0875
c2=0.4850
basis="aug-cc-pvtz"
ecp=""
G09="g09"
POSTG="postg"
verbose=""
cat > $2.route <<EOF
%mem=2GB
%nprocs=4
#p lc-wpbe int(grid=ultrafine)
EOF
#########
if [ -f $basis ] ; then
basisfile=$basis
basis="gen"
fi
if [ -n "$ecp" ] && [ -f "$ecp" ] ; then
ecpfile=$ecp
ecp="pseudo=read"
else
ecp=""
fi
# read info from gaussian "output"
read atoms derivs charge spin < $2
# route string
sroute="units=au output=wfx"
if [ $derivs == "2" ] ; then
sroute="${sroute} freq punch=derivatives"
do1="1"
do2="1"
elif [ $derivs == "1" ] ; then
sroute="${sroute} force punch=derivatives"
do1="1"
fi
# prepare the G09 input file ($2.in) for the single-point "Force" calculation
cat $2.route > $2.in
cat >> $2.in <<EOF
# $sroute $basis $ecp
title
$charge $spin
$(sed -n 2,$(($atoms+1))p < $2 | cut -c 1-72)
EOF
if [ "x$basis" == "xgen" ] ; then
echo "" >> $2.in
awk '/^ *$/{next}{print}' $basisfile >> $2.in
fi
if [ -n "$ecp" ] ; then
echo "" >> $2.in
awk '/^ *$/{next}{print}' $ecpfile >> $2.in
fi
cat >> $2.in <<EOF
$2.wfx
EOF
# run G09
$G09 < $2.in > $2.out
if [ -n "$do1" ] ; then
head -n $atoms fort.7 | tr D e > $2.fgauss
fi
if [ -n "$do2" ] ; then
tail -n+$((${atoms}+1)) fort.7 | tr D e > $2.qgauss
fi
rm -f fort.7 >& /dev/null
# run postG
$POSTG $c1 $c2 $2.wfx $chf > $2.outg
# energy
e=$(grep 'total energy' $2.outg | awk '{print $NF}')
printf "%20.12e%20.12e%20.12e%20.12e\n" $e 0 0 0 | tr e D > $3
# forces
if [ -n "$do1" ] ; then
grep -A $(($atoms+1)) 'dispersion forces' $2.outg | awk '{print $2, $3, $4}' | tail -n $atoms > $2.fpostg
paste $2.fgauss $2.fpostg > $2.forces
awk '{printf("%20.12e%20.12e%20.12e\n",$1-$4,$2-$5,$3-$6)}' $2.forces | tr e D >> $3
fi
# frequencies
if [ -n "$do2" ] ; then
printf "%20.12e%20.12e%20.12e\n" 0 0 0 | tr e D >> $3 # polarizability
printf "%20.12e%20.12e%20.12e\n" 0 0 0 | tr e D >> $3
for ((i=1;i<=3*${atoms};i++)) ; do
printf "%20.12e%20.12e%20.12e\n" 0 0 0 | tr e D >> $3 # dip ders
done
grep -A $((3*$atoms*(3*$atoms+1)/2+1)) 'dispersion force constant matrix' $2.outg | \
tail -n $((3*$atoms*(3*$atoms+1)/2)) | \
awk '{printf "%s ",$NF}NR%3==0{printf"\n"}' > $2.qpostg
paste $2.qgauss $2.qpostg > $2.freqs
awk '{printf("%20.12e%20.12e%20.12e\n",$1+$4,$2+$5,$3+$6)}' $2.freqs | tr e D >> $3
fi
if [ -n "$verbose" ] ; then
# output for debug
echo "#XDM# gaussian input" >> $4
cat $2.in >> $4
echo "#XDM# gaussian output" >> $4
cat $2.out >> $4
echo "#XDM# wfx file" >> $4
cat $2.wfx >> $4
echo "#XDM# postg output" >> $4
cat $2.outg >> $4
else
echo "#XDM# tail -n 50 logfile" >> $4
tail -n 50 $2.out >> $4
echo "#XDM# grep 'Delta-E' logfile" >> $4
grep 'Delta-E' $2.out >> $4
echo "#XDM# energies from postg output (hartree)" >> $4
grep 'energy' $2.outg >> $4
fi
rm -f $2.* >& /dev/null
|
aoterodelaroza/postg
|
examples/sp/gxdmopt.sh
|
Shell
|
gpl-3.0
| 3,250 |
#!/bin/bash
# Problems with cartopy if the -m{32,64} flag is not defined.
# See https://taskman.eionet.europa.eu/issues/14817.
MACHINE_TYPE=`uname -m`
if [ ${MACHINE_TYPE} == 'x86_64' ]; then
ARCH="-m64"
else
ARCH="-m32"
fi
CFLAGS=${ARCH} CPPFLAGS=${ARCH} CXXFLAGS=${ARCH} LDFLAGS=${ARCH} FFLAGS=${ARCH} \
./configure --prefix=$PREFIX --without-jni
make
make install
|
pelson/conda-recipes-scitools
|
geos/build.sh
|
Shell
|
gpl-3.0
| 380 |
#!/bin/sh
#
# Checkout the appropriate meza version in Travis
# TRAVIS_EVENT_TYPE: Indicates how the build was triggered. One of push, pull_request, api, cron.
TRAVIS_EVENT_TYPE="$1"
# TRAVIS_COMMIT: The commit that the current build is testing.
TRAVIS_COMMIT="$2"
# TRAVIS_PULL_REQUEST_SHA:
# if the current job is a pull request, the commit SHA of the HEAD commit of the PR.
# if the current job is a push build, this variable is empyty ("").
TRAVIS_PULL_REQUEST_SHA="$3"
# TRAVIS_BRANCH:
# for push builds, or builds not triggered by a pull request, this is the name of the branch.
# for builds triggered by a pull request this is the name of the branch targeted by the pull request.
TRAVIS_BRANCH="$4"
# TRAVIS_PULL_REQUEST_BRANCH:
# if the current job is a pull request, the name of the branch from which the PR originated.
# if the current job is a push build, this variable is empty ("").
TRAVIS_PULL_REQUEST_BRANCH="$5"
cd /opt/meza
if [ "$TRAVIS_EVENT_TYPE" = "pull_request" ]; then
git checkout "$TRAVIS_BRANCH"
git merge "origin/$TRAVIS_PULL_REQUEST_BRANCH" || true
git status
echo
echo "rev-parse HEAD:"
git rev-parse HEAD
echo
echo "Pull Request hash:"
echo "$TRAVIS_PULL_REQUEST_SHA"
else
git reset --hard "$TRAVIS_COMMIT"
fi
|
freephile/qb
|
tests/travis/git-setup.sh
|
Shell
|
agpl-3.0
| 1,256 |
#!/bin/bash
#
# Copyright © 2016 Red Hat, Inc.
# Copyright © 2017 Endless Mobile, Inc.
#
# SPDX-License-Identifier: LGPL-2.0+
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
set -euo pipefail
. $(dirname $0)/libtest.sh
echo '1..1'
cd ${test_tmpdir}
mkdir repo
ostree_repo_init repo --collection-id org.example.Collection
mkdir -p tree/root
touch tree/root/a
# Add a few commits
seq 5 | while read i; do
echo a >> tree/root/a
${CMD_PREFIX} ostree --repo=repo commit --branch=test-$i -m test -s test tree
done
# Check that they are all listed, with the repository’s collection ID, in the summary.
${CMD_PREFIX} ostree --repo=repo summary --update
${CMD_PREFIX} ostree --repo=repo summary --view > summary
assert_file_has_content summary "(org\.example\.Collection, test-1)$"
assert_file_has_content summary "(org\.example\.Collection, test-2)$"
assert_file_has_content summary "(org\.example\.Collection, test-3)$"
assert_file_has_content summary "(org\.example\.Collection, test-4)$"
assert_file_has_content summary "(org\.example\.Collection, test-5)$"
assert_file_has_content summary "^Collection ID (ostree\.summary\.collection-id): org\.example\.Collection$"
# Test that mirrored branches are listed too.
${CMD_PREFIX} ostree --repo=repo refs --collections --create=org.example.OtherCollection:test-1-mirror test-1
${CMD_PREFIX} ostree --repo=repo summary --update
${CMD_PREFIX} ostree --repo=repo summary --view > summary
assert_file_has_content summary "(org\.example\.OtherCollection, test-1-mirror)$"
# Test that remote refs are listed, but only if they have collection IDs
cd ${test_tmpdir}
mkdir collection-repo
ostree_repo_init collection-repo --collection-id org.example.RemoteCollection
mkdir -p adir
${CMD_PREFIX} ostree --repo=collection-repo commit --branch=rcommit -m rcommit -s rcommit adir
${CMD_PREFIX} ostree --repo=repo remote add --no-sign-verify --collection-id org.example.RemoteCollection collection-repo-remote "file://${test_tmpdir}/collection-repo"
${CMD_PREFIX} ostree --repo=repo pull collection-repo-remote rcommit
${CMD_PREFIX} ostree --repo=repo summary --update
${CMD_PREFIX} ostree --repo=repo summary --view > summary
assert_file_has_content summary "(org\.example\.RemoteCollection, rcommit)$"
cd ${test_tmpdir}
mkdir no-collection-repo
ostree_repo_init no-collection-repo
mkdir -p adir2
${CMD_PREFIX} ostree --repo=no-collection-repo commit --branch=rcommit2 -m rcommit2 -s rcommit2 adir2
${CMD_PREFIX} ostree --repo=repo remote add --no-sign-verify no-collection-repo-remote "file://${test_tmpdir}/no-collection-repo"
${CMD_PREFIX} ostree --repo=repo pull no-collection-repo-remote rcommit2
${CMD_PREFIX} ostree --repo=repo summary --update
${CMD_PREFIX} ostree --repo=repo summary --view > summary
assert_not_file_has_content summary "rcommit2"
echo "ok summary collections"
|
GNOME/ostree
|
tests/test-summary-collections.sh
|
Shell
|
lgpl-2.1
| 3,520 |
#!/bin/sh
get_cmd_version() {
if [ -z "$1" ]; then
return
fi
local cmd="$1"
if command -v "$cmd" 2>&1 >/dev/null; then
ver=$("$cmd" --version 2> /dev/null | head -n 1)
# some tools (eg. openocd) print version info to stderr
if [ -z "$ver" ]; then
ver=$("$cmd" --version 2>&1 | head -n 1)
fi
if [ -z "$ver" ]; then
ver="error"
fi
else
ver="missing"
fi
printf "%s" "$ver"
}
get_define() {
local cc="$1"
local line=
if command -v "$cc" 2>&1 >/dev/null; then
line=$(echo "$3" | "$cc" -x c -include "$2" -E -o - - 2>&1 | sed -e '/^[ ]*#/d' -e '/^[ ]*$/d')
fi
if [ -z "$line" ]; then
line=missing
fi
printf "%s" "$line"
}
get_kernel_info() {
uname -mprs
}
get_os_info() {
local os="$(uname -s)"
local osname="unknown"
local osvers="unknown"
if [ "$os" = "Linux" ]; then
osname="$(cat /etc/os-release | grep ^NAME= | awk -F'=' '{print $2}')"
osvers="$(cat /etc/os-release | grep ^VERSION= | awk -F'=' '{print $2}')"
elif [ "$os" = "Darwin" ]; then
osname="$(sw_vers -productName)"
osvers="$(sw_vers -productVersion)"
fi
printf "%s %s" "$osname" "$osvers"
}
newlib_version() {
if [ -z "$1" ]; then
printf "%s" "error"
else
local cc="$1"
printf "%s" "$(get_define "$cc" newlib.h _NEWLIB_VERSION)"
fi
}
avr_libc_version() {
if [ -z "$1" ]; then
printf "%s" "error"
else
local cc="$1"
printf "%s (%s)" "$(get_define "$cc" avr/version.h __AVR_LIBC_VERSION_STRING__)" "$(get_define "$cc" avr/version.h __AVR_LIBC_DATE_STRING__)"
fi
}
printf "\n"
# print operating system information
printf "%s\n" "Operating System Environment"
printf "%s\n" "-----------------------------"
printf "%23s: %s\n" "Operating System" "$(get_os_info)"
printf "%23s: %s\n" "Kernel" "$(get_kernel_info)"
printf "\n"
printf "%s\n" "Installed compiler toolchains"
printf "%s\n" "-----------------------------"
printf "%23s: %s\n" "native gcc" "$(get_cmd_version gcc)"
for p in arm-none-eabi avr mips-mti-elf msp430 riscv-none-embed; do
printf "%23s: %s\n" "$p-gcc" "$(get_cmd_version ${p}-gcc)"
done
printf "%23s: %s\n" "clang" "$(get_cmd_version clang)"
printf "\n"
printf "%s\n" "Installed compiler libs"
printf "%s\n" "-----------------------"
# platform specific newlib version
for p in arm-none-eabi mips-mti-elf riscv-none-embed; do
printf "%23s: %s\n" "$p-newlib" "$(newlib_version ${p}-gcc)"
done
# avr libc version
printf "%23s: %s\n" "avr-libc" "$(avr_libc_version avr-gcc)"
# tools
printf "\n"
printf "%s\n" "Installed development tools"
printf "%s\n" "---------------------------"
for c in cmake cppcheck doxygen flake8 git openocd python python2 python3; do
printf "%23s: %s\n" "$c" "$(get_cmd_version $c)"
done
printf "%23s: %s\n" "coccinelle" "$(get_cmd_version spatch)"
exit 0
|
avmelnikoff/RIOT
|
dist/tools/ci/print_toolchain_versions.sh
|
Shell
|
lgpl-2.1
| 2,976 |
#!/bin/sh -xe
# Simple integration test. Make sure to activate virtualenv beforehand
# (source venv/bin/activate) and that you are running Boulder test
# instance (see ./boulder-start.sh).
#
# Environment variables:
# SERVER: Passed as "letsencrypt --server" argument.
#
# Note: this script is called by Boulder integration test suite!
. ./tests/integration/_common.sh
export PATH="/usr/sbin:$PATH" # /usr/sbin/nginx
common() {
letsencrypt_test \
--authenticator standalone \
--installer null \
"$@"
}
common --domains le1.wtf auth
common --domains le2.wtf run
common -a manual -d le.wtf auth
common -a manual -d le.wtf --no-simple-http-tls auth
export CSR_PATH="${root}/csr.der" KEY_PATH="${root}/key.pem" \
OPENSSL_CNF=examples/openssl.cnf
./examples/generate-csr.sh le3.wtf
common auth --csr "$CSR_PATH" \
--cert-path "${root}/csr/cert.pem" \
--chain-path "${root}/csr/chain.pem"
openssl x509 -in "${root}/csr/0000_cert.pem" -text
openssl x509 -in "${root}/csr/0000_chain.pem" -text
common --domain le3.wtf install \
--cert-path "${root}/csr/cert.pem" \
--key-path "${root}/csr/key.pem"
# the following assumes that Boulder issues certificates for less than
# 10 years, otherwise renewal will not take place
cat <<EOF > "$root/conf/renewer.conf"
renew_before_expiry = 10 years
deploy_before_expiry = 10 years
EOF
letsencrypt-renewer $store_flags
dir="$root/conf/archive/le1.wtf"
for x in cert chain fullchain privkey;
do
latest="$(ls -1t $dir/ | grep -e "^${x}" | head -n1)"
live="$(readlink -f "$root/conf/live/le1.wtf/${x}.pem")"
[ "${dir}/${latest}" = "$live" ] # renewer fails this test
done
# revoke by account key
common revoke --cert-path "$root/conf/live/le.wtf/cert.pem"
# revoke renewed
common revoke --cert-path "$root/conf/live/le1.wtf/cert.pem"
# revoke by cert key
common revoke --cert-path "$root/conf/live/le2.wtf/cert.pem" \
--key-path "$root/conf/live/le2.wtf/privkey.pem"
if type nginx;
then
. ./letsencrypt-nginx/tests/boulder-integration.sh
fi
|
bestwpw/letsencrypt
|
tests/boulder-integration.sh
|
Shell
|
apache-2.0
| 2,067 |
#!/usr/bin/env bash
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Tests the microcontroller code for stm32f4
set -e
TARGET=stm32f4
TAGS=cmsis-nn
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR=${SCRIPT_DIR}/../../../../..
cd ${ROOT_DIR}
pwd
source tensorflow/lite/micro/tools/ci_build/helper_functions.sh
readable_run make -f tensorflow/lite/micro/tools/make/Makefile clean
# TODO(b/143715361): downloading first to allow for parallel builds.
readable_run make -f tensorflow/lite/micro/tools/make/Makefile TAGS=${TAGS} TARGET=${TARGET} third_party_downloads
# Build test binaries first
readable_run make -j8 -f tensorflow/lite/micro/tools/make/Makefile TAGS=${TAGS} TARGET=${TARGET} build
# Parallell builds doesn't work very well with this
readable_run make -f tensorflow/lite/micro/tools/make/Makefile TAGS=${TAGS} TARGET=${TARGET} test
|
renyi533/tensorflow
|
tensorflow/lite/micro/tools/ci_build/test_stm32f4.sh
|
Shell
|
apache-2.0
| 1,514 |
#!/bin/bash
if [ -z ${USER_UID:+x} ]
then
export USER_UID=1000
export GROUP_GID=1000
fi
clean () {
docker-compose run --rm -u "$USER_UID:$GROUP_GID" gradle gradle clean
}
install() {
docker-compose run --rm -u "$USER_UID:$GROUP_GID" gradle gradle install publishToMavenLocal
}
publish() {
if [ -e "?/.gradle" ] && [ ! -e "?/.gradle/gradle.properties" ]
then
echo "odeUsername=$NEXUS_ODE_USERNAME" > "?/.gradle/gradle.properties"
echo "odePassword=$NEXUS_ODE_PASSWORD" >> "?/.gradle/gradle.properties"
echo "sonatypeUsername=$NEXUS_SONATYPE_USERNAME" >> "?/.gradle/gradle.properties"
echo "sonatypePassword=$NEXUS_SONATYPE_PASSWORD" >> "?/.gradle/gradle.properties"
fi
docker-compose run --rm -u "$USER_UID:$GROUP_GID" gradle gradle publish
}
for param in "$@"
do
case $param in
clean)
clean
;;
install)
install
;;
publish)
publish
;;
*)
echo "Invalid argument : $param"
esac
if [ ! $? -eq 0 ]; then
exit 1
fi
done
|
web-education/mongodb-helper
|
build.sh
|
Shell
|
apache-2.0
| 1,022 |
sleep 65
java -Djava.security.egd=file:/dev/./urandom -jar /app/app.jar
|
keryhu/micro-oauth2-docker
|
demo/auth-server/target/docker/runboot.sh
|
Shell
|
apache-2.0
| 71 |
#!/bin/bash
#
# build the dendrite binaries into ./bin
cd `dirname $0`/..
set -eux
export GOPATH=`pwd`/.gopath
export PATH="${GOPATH}/bin:$PATH"
go get github.com/constabulary/gb/...
gb build
|
jcgruenhage/dendrite
|
jenkins/prepare-dendrite.sh
|
Shell
|
apache-2.0
| 196 |
export LD_LIBRARY_PATH=bin/SkyBlue/LCP/EDLoggingControl/LogDecompressor:${LD_LIBRARY_PATH}
#If do not pass a argument.
if [ ! "$1" ]; then
#nohup java -jar Httpd.jar >> /dev/null 2>&1
sudo sh -c "java -cp .:lib/sqlite-jdbc-3.7.2.jar:lib/java_websocket.jar -classpath Httpd.jar:lib/sqlite-jdbc-3.7.2.jar:lib/java_websocket.jar SkyBlue.LCP.Httpd.core.Httpd -deamon >> /dev/null 2>&1 &"
else
#If argument is not file data path.
if [ ! -f "$1" ]; then
#nohup java -jar Httpd.jar >> /dev/null 2>&1
sudo sh -c "java -cp .:lib/sqlite-jdbc-3.7.2.jar:lib/java_websocket.jar -classpath Httpd.jar:lib/sqlite-jdbc-3.7.2.jar:lib/java_websocket.jar SkyBlue.LCP.Httpd.core.Httpd -deamon >> /dev/null 2>&1 &"
else
#nohup java -jar Httpd.jar -conf $1 >> /dev/null 2>&1
sudo sh -c "java -cp .:lib/sqlite-jdbc-3.7.2.jar:lib/java_websocket.jar -classpath Httpd.jar:lib/sqlite-jdbc-3.7.2.jar:lib/java_websocket.jar SkyBlue.LCP.Httpd.core.Httpd -conf $1 -deamon >> /dev/null 2>&1 &"
fi
fi
|
Bluesky-CPS/BlueSkyLoggerCloudBINResearchVer1.0
|
bin/HttpdAsDeamon.sh
|
Shell
|
apache-2.0
| 985 |
# $FreeBSD: soc2013/dpl/head/tools/regression/pjdfstest/tests/misc.sh 249505 2013-03-15 00:10:38Z pjd $
ntest=1
case "${dir}" in
/*)
maindir="${dir}/../.."
;;
*)
maindir="`pwd`/${dir}/../.."
;;
esac
fstest="${maindir}/pjdfstest"
. ${maindir}/tests/conf
expect()
{
e="${1}"
shift
r=`${fstest} $* 2>/dev/null | tail -1`
echo "${r}" | ${GREP} -Eq '^'${e}'$'
if [ $? -eq 0 ]; then
if [ -z "${todomsg}" ]; then
echo "ok ${ntest}"
else
echo "ok ${ntest} # TODO ${todomsg}"
fi
else
if [ -z "${todomsg}" ]; then
echo "not ok ${ntest} - tried '$*', expected ${e}, got ${r}"
else
echo "not ok ${ntest} # TODO ${todomsg}"
fi
fi
todomsg=""
ntest=$((ntest+1))
}
jexpect()
{
s="${1}"
d="${2}"
e="${3}"
shift 3
r=`jail -s ${s} / pjdfstest 127.0.0.1 /bin/sh -c "cd ${d} && ${fstest} $* 2>/dev/null" | tail -1`
echo "${r}" | ${GREP} -Eq '^'${e}'$'
if [ $? -eq 0 ]; then
if [ -z "${todomsg}" ]; then
echo "ok ${ntest}"
else
echo "ok ${ntest} # TODO ${todomsg}"
fi
else
if [ -z "${todomsg}" ]; then
echo "not ok ${ntest} - tried '$*', expected ${e}, got ${r}"
else
echo "not ok ${ntest} # TODO ${todomsg}"
fi
fi
todomsg=""
ntest=$((ntest+1))
}
test_check()
{
if [ $* ]; then
if [ -z "${todomsg}" ]; then
echo "ok ${ntest}"
else
echo "ok ${ntest} # TODO ${todomsg}"
fi
else
if [ -z "${todomsg}" ]; then
echo "not ok ${ntest}"
else
echo "not ok ${ntest} # TODO ${todomsg}"
fi
fi
todomsg=""
ntest=$((ntest+1))
}
todo()
{
if [ "${os}" = "${1}" -o "${os}:${fs}" = "${1}" ]; then
todomsg="${2}"
fi
}
namegen()
{
echo "pjdfstest_`dd if=/dev/urandom bs=1k count=1 2>/dev/null | openssl md5 | awk '{print $NF}'`"
}
namegen_len()
{
len="${1}"
name=""
while :; do
namepart="`dd if=/dev/urandom bs=64 count=1 2>/dev/null | openssl md5 | awk '{print $NF}'`"
name="${name}${namepart}"
curlen=`printf "%s" "${name}" | wc -c`
[ ${curlen} -lt ${len} ] || break
done
name=`echo "${name}" | cut -b -${len}`
printf "%s" "${name}"
}
# POSIX:
# {NAME_MAX}
# Maximum number of bytes in a filename (not including terminating null).
namegen_max()
{
name_max=`${fstest} pathconf . _PC_NAME_MAX`
namegen_len ${name_max}
}
# POSIX:
# {PATH_MAX}
# Maximum number of bytes in a pathname, including the terminating null character.
dirgen_max()
{
name_max=`${fstest} pathconf . _PC_NAME_MAX`
complen=$((name_max/2))
path_max=`${fstest} pathconf . _PC_PATH_MAX`
# "...including the terminating null character."
path_max=$((path_max-1))
name=""
while :; do
name="${name}`namegen_len ${complen}`/"
curlen=`printf "%s" "${name}" | wc -c`
[ ${curlen} -lt ${path_max} ] || break
done
name=`echo "${name}" | cut -b -${path_max}`
name=`echo "${name}" | sed -E 's@/$@x@'`
printf "%s" "${name}"
}
quick_exit()
{
echo "1..1"
echo "ok 1"
exit 0
}
supported()
{
case "${1}" in
lchmod)
if [ "${os}" != "FreeBSD" ]; then
return 1
fi
;;
chflags)
if [ "${os}" != "FreeBSD" ]; then
return 1
fi
;;
chflags_SF_SNAPSHOT)
if [ "${os}" != "FreeBSD" -o "${fs}" != "UFS" ]; then
return 1
fi
;;
esac
return 0
}
require()
{
if supported ${1}; then
return
fi
quick_exit
}
# usage:
# create_file <type> <name>
# create_file <type> <name> <mode>
# create_file <type> <name> <uid> <gid>
# create_file <type> <name> <mode> <uid> <gid>
create_file() {
type="${1}"
name="${2}"
case "${type}" in
none)
return
;;
regular)
expect 0 create ${name} 0644
;;
dir)
expect 0 mkdir ${name} 0755
;;
fifo)
expect 0 mkfifo ${name} 0644
;;
block)
expect 0 mknod ${name} b 0644 1 2
;;
char)
expect 0 mknod ${name} c 0644 1 2
;;
socket)
expect 0 bind ${name}
;;
symlink)
expect 0 symlink test ${name}
;;
esac
if [ -n "${3}" -a -n "${4}" -a -n "${5}" ]; then
expect 0 lchmod ${name} ${3}
expect 0 lchown ${name} ${4} ${5}
elif [ -n "${3}" -a -n "${4}" ]; then
expect 0 lchown ${name} ${3} ${4}
elif [ -n "${3}" ]; then
expect 0 lchmod ${name} ${3}
fi
}
|
dplbsd/soc2013
|
head/tools/regression/pjdfstest/tests/misc.sh
|
Shell
|
bsd-2-clause
| 4,012 |
#!/bin/bash
cd tools
./configure.sh spark/$1
cd - > /dev/null
|
IUTInfoAix/terrarium_2015
|
nuttx/configs/spark/tools/env.sh
|
Shell
|
bsd-2-clause
| 63 |
#!/bin/sh
../GLCloud3/GLCloud3
|
fluffyfreak/sandbox
|
src/bin/GLCloud3.sh
|
Shell
|
bsd-3-clause
| 31 |
#!/bin/bash
go test $(go list ./... | grep -v /vendor/)
|
manifest-destiny/api
|
scripts/run-tests.sh
|
Shell
|
bsd-3-clause
| 57 |
#!/bin/bash
open_pdfs() {
find . -name '*.pprof' | while read -r i; do
go tool pprof --pdf antibody "$i.pprof" > "/tmp/$1_$i.pdf" && open "/tmp/$1_$i.pdf"
done
}
# TODO fix this
# defer profile.Start(
# profile.MemProfile,
# profile.CPUProfile,
# profile.NoShutdownHook,
# profile.ProfilePath("."),
# ).Stop()
# git apply "./scripts/profiling/patch.patch"
go build -ldflags="-s -w -X main.version=profiling" -o antibody ./cmd/antibody
export ANTIBODY_HOME="$(mktemp -d)"
# bundle all plugins from awesome-zsh-plugins
/usr/bin/time ./antibody bundle < ./scripts/profiling/bundles.txt > /dev/null
open_pdfs bundle_download
/usr/bin/time ./antibody bundle < ./scripts/profiling/bundles.txt > /dev/null
open_pdfs bundle
/usr/bin/time ./antibody update > /dev/null
open_pdfs update
/usr/bin/time ./antibody list > /dev/null
open_pdfs list
/usr/bin/time ./antibody home > /dev/null
open_pdfs home
/usr/bin/time ./antibody init > /dev/null
open_pdfs init
rm -f ./antibody
# git checkout ./cmd/antibody/main.go
|
twang817/antibody
|
scripts/profiling/work.sh
|
Shell
|
mit
| 1,024 |
#!/bin/bash
if [ -z "$TRUFFLE_WORKING_DIRECTORY" ];
then
export TRUFFLE_WORKING_DIRECTORY=`pwd`
fi
if [ -z "$TRUFFLE_NPM_LOCATION" ];
then
export TRUFFLE_NPM_LOCATION=$(npm config --global get prefix)/lib/node_modules/truffle/
fi
# Hack. babel-node will clobber -e, and it doesn't look like `--` will stop it.
# Because we're doing string replacement, we have to take edge cases into account.
args=" $@"
args=${args// -e / --environment }
args=${args// -e=/ --environment=}
args=${args// -environment/ --environment}
cd $TRUFFLE_NPM_LOCATION
$TRUFFLE_NPM_LOCATION/node_modules/.bin/babel-node -- $TRUFFLE_NPM_LOCATION/truffle-exec.es6 ${args}
|
harlantwood/truffle
|
truffle-exec.bash
|
Shell
|
mit
| 650 |
#!/usr/bin/env bash
CURRENT_DIR="$( dirname ${BASH_SOURCE[0]} )"
source "$CURRENT_DIR/../helpers.sh"
source "$CURRENT_DIR/../variables.sh"
template() {
local tmux_start_script="$1"
shift
local options="$@"
local content=""
read -r -d '' content <<-EOF
[Unit]
Description=tmux default session (detached)
Documentation=man:tmux(1)
[Service]
Type=forking
Environment=DISPLAY=:0
ExecStart=/usr/bin/tmux ${systemd_tmux_server_start_cmd}
ExecStop=${HOME}/.tmux/plugins/tmux-resurrect/scripts/save.sh
ExecStop=/usr/bin/tmux kill-server
KillMode=none
RestartSec=2
[Install]
WantedBy=default.target
EOF
echo "$content"
}
systemd_tmux_is_enabled() {
systemctl --user is_enabled $(basename "${systemd_unit_file_path}") >/dev/null 2>&1
}
enable_tmux_unit_on_boot() {
if ! systemd_tmux_is_enabled; then
systemctl --user enable ${systemd_service_name}
fi
}
main() {
local options="$(get_tmux_option "$auto_start_config_option" "${auto_start_config_default}")"
local systemd_tmux_server_start_cmd="$(get_tmux_option "${systemd_tmux_server_start_cmd_option}" "${systemd_tmux_server_start_cmd_default}" )"
local tmux_start_script_path="${CURRENT_DIR}/linux_start_tmux.sh"
local systemd_unit_file=$(template "${tmux_start_script_path}" "${options}")
mkdir -p "$(dirname ${systemd_unit_file_path})"
echo "$systemd_unit_file" > "${systemd_unit_file_path}"
enable_tmux_unit_on_boot
}
main
|
persevereVon/dotfiles
|
tmux-plugins/tmux-continuum/scripts/handle_tmux_automatic_start/systemd_enable.sh
|
Shell
|
mit
| 1,412 |
#! /bin/bash
mkdir -p dataset
cd dataset
wget http://nlp.stanford.edu/data/glove.840B.300d.zip
unzip glove.840B.300d.zip
cd ..
./convert.py
|
FALCONN-LIB/FALCONN
|
src/examples/glove/prepare-dataset.sh
|
Shell
|
mit
| 141 |
train_file="data/train.small"
valid_file="data/valid.small"
test_file="data/test.small"
neuron_type="LSTM" # RNN/LSTM
train_method="ALL"
projection_size=100
hidden_size=100
stack_size=1
learning_rate=1e-4
minibatch_size=100
improvement_rate=5e-5
max_epoch=1000
save_model="model/small/all/pro"$projection_size".h"$hidden_size".mini"$minibatch_size".neuron"$neuron_type"-4"
load_model="model/small/turing/pro"$projection_size".h"$hidden_size".mini"$minibatch_size".neuron"$neuron_type"-7"
# train
python lm_v4.py --train $train_file --valid $valid_file --neuron-type $neuron_type --train-method $train_method --projection-size $projection_size --hidden-size $hidden_size --stack $stack_size --learning-rate $learning_rate --improvement-rate $improvement_rate --minibatch-size $minibatch_size --max-epoch $max_epoch --save-net $save_model --early-stop 0 --load-net $load_model
# test
#python lm_v4.py --test $test_file --load-net $save_model
|
darongliu/Lstm_Turing_LM
|
lstm-neural-turing-machines-lm/v3/run_script/run_all.sh
|
Shell
|
mit
| 944 |
#!/bin/sh
set -e
TIME=`date +%Y%m%d%H%M%S`
export GIT_SHA=`cd $GOPATH/src/bosun.org; git rev-parse HEAD`
build()
{
export GOOS=$1
export GOARCH=$2
EXT=""
if [ $GOOS = "windows" ]; then
EXT=".exe"
fi
if [ $GOARCH = "arm" ]; then
export GOARM=${3-6}
EXT="v${GOARM}"
fi
echo $GOOS $GOARCH $EXT
if $BOSUN; then
go build -o ${OUTPUTDIR}bosun-$GOOS-$GOARCH$EXT -ldflags "-X bosun.org/_version.VersionSHA=$GIT_SHA -X bosun.org/_version.OfficialBuild=true -X bosun.org/_version.VersionDate=$TIME" bosun.org/cmd/bosun
go build -o ${OUTPUTDIR}tsdbrelay-$GOOS-$GOARCH$EXT -ldflags "-X bosun.org/_version.VersionSHA=$GIT_SHA -X bosun.org/_version.OfficialBuild=true -X bosun.org/_version.VersionDate=$TIME" bosun.org/cmd/tsdbrelay
fi
go build -o ${OUTPUTDIR}scollector-$GOOS-$GOARCH$EXT -ldflags "-X bosun.org/_version.VersionSHA=$GIT_SHA -X bosun.org/_version.OfficialBuild=true -X bosun.org/_version.VersionDate=$TIME" bosun.org/cmd/scollector
}
BOSUN=true
for GOOS in windows linux darwin; do
for GOARCH in amd64 386; do
build $GOOS $GOARCH
done
done
BOSUN=false
build linux arm 5
build linux arm 6
build linux arm 7
if [ "$GITHUB_ACCESS_TOKEN" = "" ]; then
echo GITHUB_ACCESS_TOKEN not set: not running githubRelease.go
else
GOOS=darwin
GOARCH=amd64
export BUILD_NUMBER=`${OUTPUTDIR}bosun-darwin-amd64 -version | awk '{print $3}'`
go run build/release/githubRelease.go
fi
|
simnv/bosun
|
build/release.sh
|
Shell
|
mit
| 1,399 |
rm -rf tmp/mruby
|
fundamental/mruby-rubyffi-compat
|
.travis_after.sh
|
Shell
|
mit
| 16 |
#!/usr/bin/env bash
cat <<EOF >> ${PREFIX}/.messages.txt
Please run 'download-db.sh path/to/store/databases' to download all required VIBRANT database files.
The path will stored in the environmental variable VIBRANT_DATA_PATH so you don't have to think about
Default location is ${VIBRANT_DATA_PATH}
EOF
|
saketkc/bioconda-recipes
|
recipes/vibrant/post-link.sh
|
Shell
|
mit
| 308 |
#!/bin/bash
SRC_ROOT=$(readlink -f $(dirname $0)/..)
STAGE=0
ROOT=$1
. $SRC_ROOT/lib/config
. $SRC_ROOT/lib/shell_lib
InitVal
[ -z $(GetDev $ROOT) ] && echo Please mount rootfs on $ROOT && exit 1
ChrootPrepare $ROOT $BOOT_DEV
chroot $ROOT df -h
echo chroot $ROOT
chroot $ROOT
ChrootEnd $ROOT
|
alexasahis/OpenLinkstation
|
1_debootstrap/chroot.sh
|
Shell
|
gpl-2.0
| 298 |
#!/usr/bin/env bash
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This script searches for :class:`foo` links to non-existent 'foo'
# classes/functions in the output of the Sphinx documentation. When
# broken links are found in .rst files, the putative line numbers are
# printed, otherwise the .html files are printed without line number.
[ -z "${srcdir}" ] && srcdir=$(realpath ..)
# Pattern for :class:`foo` commands to non-existent 'foo' classes/functions.
# They are formatted by Sphinx just like regular links to functions, but are
# not enclosed within <a href="..."></a> tags. Sphinx doesn't use line
# wrapping, so these broken links can be found via text search. The first
# negative lookahead filters out common Python types (for performance reasons).
regex_sphinx_broken_link='<code class=\"xref py py-[a-z]+ docutils literal notranslate\"><span class=\"pre\">(?!(int|float|complex|bool|str|bytes|array|bytearray|memoryview|object|list|tuple|range|slice|dict|set|frozenset|(?:numpy\.|np\.)?(?:nd)?array)<)[^<>]+?</span></code>(?!</a>)'
if [ ! -f doc/sphinx/html/index.html ]; then
echo "Please run Sphinx first."
exit 1
fi
n_warnings=0
grep -qrP --include='*.html' --exclude-dir=_modules "${regex_sphinx_broken_link}" doc/sphinx/html/
if [ "${?}" = "0" ]; then
rm -f doc_warnings.log~
touch doc_warnings.log~
found="false"
grep -rPno --include='*.html' --exclude-dir=_modules "${regex_sphinx_broken_link}" doc/sphinx/html/ | sort | uniq | while read -r line; do
# extract link target
reference=$(echo "${line}" | sed -r 's|^.+<span class="pre">(.+)</span></code>$|\1|' | sed 's/()$//')
lineno=$(echo "${line}" | cut -d ':' -f 2)
# skip if broken link refers to a standard Python type or to a
# class/function from an imported module other than espressomd
is_standard_type_or_module="false"
grep -Pq '^([a-zA-Z0-9_]+Error|[a-zA-Z0-9_]*Exception|(?!espressomd\.)[a-zA-Z0-9_]+\.[a-zA-Z0-9_\.]+)$' <<< "${reference}"
[ "${?}" = "0" ] && is_standard_type_or_module="true"
# private objects are not documented and cannot be linked
is_private="false"
grep -Pq "(^_|\._)" <<< "${reference}"
[ "${?}" = "0" ] && is_private="true"
# filter out false positives
if [ "${is_standard_type_or_module}" = "true" ] || [ "${is_private}" = "true" ]; then
continue
fi
if [ "${found}" = "false" ]; then
echo "The Sphinx documentation contains broken links:"
fi
found="true"
# locate the .rst file containing the broken link
filepath_html=$(echo "${line}" | cut -d ':' -f 1)
filepath_rst=$(echo "${filepath_html}" | sed 's|/html/|/|')
filepath_rst="${filepath_rst%.html}.rst"
if [ -f "${filepath_rst}" ]; then
# look for the reference
grep -q -F "\`${reference}\`" "${filepath_rst}"
if [ "${?}" = "0" ]; then
grep --color -FnHo -m 1 "\`${reference}\`" "${filepath_rst}" | tee -a doc_warnings.log~
continue
fi
# if not found, check if reference was shortened, for example
# :class:`~espressomd.system.System` outputs a link named `System`
grep -q -P "^[a-zA-Z0-9_]+$" <<< "${reference}"
if [ "${?}" = "0" ]; then
grep -q -P "\`~.+${reference}\`" "${filepath_rst}"
if [ "${?}" = "0" ]; then
grep --color -PnHo -m 1 "\`~.+${reference}\`" "${filepath_rst}" | tee -a doc_warnings.log~
continue
fi
fi
fi
# if not in a .rst file, show the .html file
echo "${filepath_html}:${lineno}:\`${reference}\`" | tee -a doc_warnings.log~
done
# generate log file
n_warnings=$(wc -l < doc_warnings.log~)
echo "The Sphinx documentation contains ${n_warnings} broken links:" > doc_warnings.log
cat doc_warnings.log~ >> doc_warnings.log
rm doc_warnings.log~
fi
# Find malformed reSt roles, appearing as raw text in the HTML output:
# * unparsed roles, e.g. ":cite:`UnknownKey`"
# * broken math formula, e.g. ":math:` leading whitespace`"
# * incorrect syntax, e.g. "obj:float:"
# * incorrect numpydoc syntax, e.g. ":rtype:`float`"
# They are difficult to predict, so we leave them to the user's discretion
grep -qrP --include='*.html' --exclude-dir=_modules '(:py)?:[a-z]+:' doc/sphinx/html/
if [ "${?}" = "0" ]; then
echo "Possibly errors:"
grep -rP --color --include='*.html' --exclude-dir=_modules '(:py)?:[a-z]+:' doc/sphinx/html/
fi
if [ "${CI}" != "" ]; then
"${srcdir}/maintainer/gh_post_docs_warnings.py" sphinx "${n_warnings}" doc_warnings.log || exit 1
fi
if [ "${n_warnings}" = "0" ]; then
echo "Found no broken link requiring fixing."
exit 0
else
exit 1
fi
|
espressomd/espresso
|
maintainer/CI/doc_warnings.sh
|
Shell
|
gpl-3.0
| 5,552 |
#!/bin/bash
if [ ! -d jars ]; then
echo "Run from silver root as './support/profile/run.sh'"
exit 1
fi
mkdir -p build
cd build
java -Xss8M -Xmx3000M -Xrunhprof:heap=sites,cpu=samples -jar ../jars/RunSilver.jar --clean silver:composed:Default
|
charleshofer/silver
|
support/profile/run.sh
|
Shell
|
gpl-3.0
| 249 |
#!/bin/bash
cd sample/csv-provider
npm install
npm install ../../src/common/node-connector
node_modules/gulp/bin/gulp.js coffee
node_modules/gulp/bin/gulp.js --name=csv-provider --url=tcp://config-service:65001
|
starschema/virtdb-apps
|
start-csv-provider.sh
|
Shell
|
lgpl-3.0
| 211 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.