code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
#
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script executes all of the tests located in this folder through the use
# of the nosetests api. Coverage is provided by coverage.py
# This script requires the installation of the install_pyenv.sh script
# '-x' to exclude dirs from coverage, requires nose-exclude to be installed
export IN_UNIT_TESTS='true'
TESTS_DIR="$( cd "$( dirname "$BASH_SOURCE[0]}" )" && pwd )"
SPARKTK_DIR=`dirname $TESTS_DIR`
PYTHON_DIR=`dirname $SPARKTK_DIR`
echo TESTS_DIR=$TESTS_DIR
echo SPARKTK_DIR=$SPARKTK_DIR
echo PYTHON_DIR=$PYTHON_DIR
cd $PYTHON_DIR
export PYTHONPATH=$PYTHONPATH:$PYTHON_DIR
#python -c "import sys; print 'sys.path=' + str(sys.path)"
# check if the python libraries are correctly installed by importing
# them through python. If there is no output then the module exists.
if [[ -e $(python2.7 -c "import sparktk") ]]; then
echo "sparktk cannot be found"
exit 1
fi
if [[ -e $(python2.7 -c "import nose") ]]; then
echo "Nosetests is not installed into your python virtual environment please install nose."
exit 1
fi
if [[ -e $(python2.7 -c "import coverage") ]]; then
echo "Coverage.py is not installed into your python virtual environment please install coverage."
exit 1
fi
rm -rf $PYTHON_DIR/cover
if [ "$1" = "-x" ] ; then
EXCLUDE_DIRS_FILE=$TESTS_DIR/cov_exclude_dirs.txt
if [[ ! -f $EXCLUDE_DIRS_FILE ]]; then
echo ERROR: -x option: could not find exclusion file $EXCLUDE_DIRS_FILE
exit 1
fi
echo -x option: excluding files from coverage described in $EXCLUDE_DIRS_FILE
EXCLUDE_OPTION=--exclude-dir-file=$EXCLUDE_DIRS_FILE
fi
nosetests $TESTS_DIR --with-coverage --cover-package=sparktk --cover-erase --cover-inclusive --cover-html --with-xunit --xunit-file=$PYTHON_DIR/nosetests.xml $EXCLUDE_OPTION
success=$?
COVERAGE_ARCHIVE=$PYTHON_DIR/python-coverage.zip
rm *.log 2> /dev/null
rm -rf $COVERAGE_ARCHIVE
zip -rq $COVERAGE_ARCHIVE .
RESULT_FILE=$PYTHON_DIR/nosetests.xml
COVERAGE_HTML=$PYTHON_DIR/cover/index.html
echo
echo Output File: $RESULT_FILE
echo Coverage Archive: $COVERAGE_ARCHIVE
echo Coverage HTML: file://$COVERAGE_HTML
echo
unset IN_UNIT_TESTS
if [[ $success == 0 ]] ; then
echo "Python Tests Successful"
exit 0
fi
echo "Python Tests Unsuccessful"
exit 1
| ashaarunkumar/spark-tk | python/sparktk/tests/runtests.sh | Shell | apache-2.0 | 2,946 |
#!/bin/bash
# Copyright 2017 The Bootkube-CI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -ex
KS_USER="charter_admin"
KS_PROJECT="ctec"
KS_PASSWORD="password"
KS_USER_DOMAIN="default"
KS_PROJECT_DOMAIN="default"
KS_URL="http://keystone.openstack/v3"
KEYSTONE_CREDS="--os-username ${KS_USER} \
--os-project-name ${KS_PROJECT} \
--os-auth-url ${KS_URL} \
--os-project-domain-name ${KS_PROJECT_DOMAIN} \
--os-user-domain-name ${KS_USER_DOMAIN} \
--os-password ${KS_PASSWORD}"
OS_TOOLKIT=$(kubectl get -n openstack pods -l application=heat,component=engine --no-headers -o name | awk -F '/' '{ print $NF; exit }')
OS_CMD="kubectl exec -n openstack ${OS_TOOLKIT} -- openstack ${KEYSTONE_CREDS} --os-identity-api-version 3 --os-image-api-version 2"
OS_PROJECT=$($OS_CMD project show ctec -f value -c id)
OS_SEC_GROUP=$($OS_CMD security group list -f csv | grep $OS_PROJECT | grep "default" | awk -F "," '{ print $1 }' | tr -d '"')
$OS_CMD security group rule create $OS_SEC_GROUP \
--protocol icmp \
--src-ip 0.0.0.0/0
$OS_CMD security group rule create $OS_SEC_GROUP \
--protocol tcp \
--dst-port 22:22 \
--src-ip 0.0.0.0/0
TEST_KEY="$(mktemp).pem"
$OS_CMD keypair create "ctec-test" > $TEST_KEY
chmod 600 $TEST_KEY
VM_FLAVOR_ID=$($OS_CMD flavor show "vnf.small" -f value -c id)
VM_IMAGE_ID=$($OS_CMD image list -f csv | awk -F ',' '{ print $2 "," $1 }' | grep "^\"Cirros" | head -1 | awk -F ',' '{ print $2 }' | tr -d '"')
NETWORK_ID=$($OS_CMD network show provider-482 -f value -c id)
$OS_CMD server create --nic net-id=$NETWORK_ID \
--flavor=$VM_FLAVOR_ID \
--image=$VM_IMAGE_ID \
--key-name="ctec-test" \
--security-group=$OS_SEC_GROUP \
"ctec-vm"
VM_ID=$($OS_CMD server list -f value -c ID)
VM_IP=$($OS_CMD server show $VM_ID -f value -c addresses | cut -f2 -d=)
sleep 20
ssh-keyscan $VM_IP >> ~/.ssh/known_hosts
ssh -i $TEST_KEY cirros@$VM_IP ping -q -c 1 -W 2 www.google.com
ssh -i $TEST_KEY cirros@$VM_IP curl http://artscene.textfiles.com/asciiart/unicorn || true
| charter-ctec/charter-nfv-demo | scripts/openstack/boot_vm_newton.sh | Shell | apache-2.0 | 2,529 |
#!/bin/bash
#/*
# Copyright 2009-2013 by The Regents of the University of California
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may obtain a copy of the License from
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#*/
export JAVA_HOME=/usr/local/java/vms/java
LOGSDIR=/home/onose/hyracks-asterix/logs
HYRACKS_HOME=/home/onose/src/hyracks
IPADDR=`/sbin/ifconfig eth0 | grep "inet addr" | awk '{print $2}' | cut -f 2 -d ':'`
NODEID=`ypcat hosts | grep asterix | grep -w $IPADDR | awk '{print $2}'`
export JAVA_OPTS="-Xmx10g -Djava.net.preferIPv4Stack=true -Djava.io.tmpdir=/mnt/data/sdd/space/onose/tmp"
echo $HYRACKS_HOME/hyracks-server/target/hyracks-server-0.1.3.1-binary-assembly/bin/hyracksnc -cc-host 10.1.0.1 -cc-port 2222 -data-ip-address $IPADDR -node-id $NODEID
$HYRACKS_HOME/hyracks-server/target/hyracks-server-0.1.3.1-binary-assembly/bin/hyracksnc -cc-host 10.1.0.1 -cc-port 2222 -data-ip-address $IPADDR -node-id $NODEID &> $LOGSDIR/$NODEID.log &
| sjaco002/incubator-asterixdb | asterix-app/scripts/asterix/startnc.sh | Shell | apache-2.0 | 1,383 |
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: env.bash $
#
# IBM Data Engine for NoSQL - Power Systems Edition User Library Project
#
# Contributors Listed Below - COPYRIGHT 2014,2015
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
export CUSTOMFLAGS=
export BLOCK_FILEMODE_ENABLED=
export BLOCK_MC_ENABLED=
export TARGET_PLATFORM=
export BLOCK_KERNEL_MC_ENABLED=
#allow a user to specify a custom RC file if needed
#e.g. disable the advanced toolchain with "export USE_ADVANCED_TOOLCHAIN=no"
if [ -e ./customrc ]; then
echo "INFO: Running customrc"
set -x
. ./customrc
set +x
fi
## setup git hooks for this session
## adds prologs and Change-IDs for gerrit
export SURELOCKROOT=`pwd`
TOOLSDIR=${SURELOCKROOT}/src/build/tools
if [ -e $TOOLSDIR/setupgithooks.sh ]; then
echo "Setting up gerrit hooks."
$TOOLSDIR/setupgithooks.sh
fi
export MCP_PATH=/opt/mcp/toolchains/fr_SL1_2014-05-12-194021
#configure advanced toolchain for linux
AT71PATH=/opt/at7.1
AT90PATH=/opt/at9.0-2-rc2
ATPATH=/opt/at10.0
if [ -d $MCP_PATH ]; then
echo "INFO: Found MCP: $MCP_PATH ."
echo "INFO: Enabling JAILchain for builds."
export JAIL=ppc64-mcp75-jail
else
echo "INFO: MCP Jail disabled."
fi
if [ -d $ATPATH ]; then
export ADV_TOOLCHAIN_PATH=$ATPATH
else
echo "WARNING: no toolchain was found. Will fall back to system defaults. YMMV."
fi
#don't add MCP path to the $PATH... this isn't absolutely necessary
#export PATH=${MCP_PATH}/opt/mcp/bin:${MCP_PATH}/usr/bin:${PATH}
export PATH=/opt/mcp/bin:${PATH}
export PATH=${PATH}:`pwd`/src/build/tools
#enable advanced toolchain, if no one has an opinion
if [ -z "$USE_ADVANCED_TOOLCHAIN" ]; then
#enabling advanced toolchain by default. If you don't want this, set USED_ADVANCED_TOOLCHAIN in your environment
export USE_ADVANCED_TOOLCHAIN=yes
fi
if [ "$USE_ADVANCED_TOOLCHAIN" = "yes" ]; then
echo "INFO: Enabling Advanced Toolchain: $ADV_TOOLCHAIN_PATH"
export PATH=${ADV_TOOLCHAIN_PATH}/bin:${ADV_TOOLCHAIN_PATH}/sbin:${PATH}
else
echo "INFO: Advanced Toolchain Disabled."
fi
#fix up sandboxes in ODE, if we need to
if [ -n "${SANDBOXROOT}" ]; then
if [ -n "${SANDBOXNAME}" ]; then
export SANDBOXBASE="${SANDBOXROOT}/${SANDBOXNAME}"
fi
fi
#set the default ulimit -c for a developer
ulimit -c unlimited
| odaira/capiflash | env.bash | Shell | apache-2.0 | 2,936 |
for x in `seq 0 5`; do
fname=`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1`
openssl enc -aes-256-ctr -pass pass:"$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64)" -nosalt < /dev/zero | dd of=../${fname} bs=1M count=4 iflag=fullblock
done | akiradeveloper/dmtest | src/test/resources/REPRO_150_script.sh | Shell | apache-2.0 | 272 |
#!/bin/sh
set -e
# We currently get one warning when running Python in dev mode:
# astroid/node_classes.py:90: DeprecationWarning: The 'astroid.node_classes' module is deprecated and will be replaced
# by 'astroid.nodes' in astroid 3.0.0
# Turn off dev mode until astroid gets fixed or there's a way to suppress warnings in third party code
#export PYTHONDEVMODE=1
run () {
header='\033[95m'
endstyle='\033[0m'
echo "${header}$*${endstyle}"
eval "$*"
}
run mypy src
run pylint --rcfile=../../.pylintrc src tests
run python -m flake8 --select=DUO src
run pip-audit --strict --progress-spinner=off -r requirements.txt -r requirements-dev.txt
run safety check --bare --ignore 41002 -r requirements.txt -r requirements-dev.txt # See https://github.com/nedbat/coveragepy/issues/1200
run bandit --quiet --recursive src/
run vulture --min-confidence 0 src/ tests/ .vulture_ignore_list.py
| ICTU/quality-time | components/shared_python/ci/quality.sh | Shell | apache-2.0 | 902 |
## Install dependencies on Ubuntu or OS X (using Homebrew)
norm_option_value()
{
if [ "$1" = on ] || [ "$1" = ON ] || [ "$1" = yes ] || [ "$1" = YES ] || [ "$1" = y ] || [ "$1" = Y ] || [ "$1" = 1 ] || [ "$1" = true ] || [ "$1" = TRUE ]; then
echo ON
elif [ "$1" = off ] || [ "$1" = OFF ] || [ "$1" = no ] || [ "$1" = NO ] || [ "$1" = n ] || [ "$1" = N ] || [ "$1" = 0 ] || [ "$1" = false ] || [ "$1" = FALSE ]; then
echo OFF
elif [ -n "$2" ]; then
echo "$2"
else
echo OFF
fi
}
CXX_STANDARD=11
TRAVIS=`norm_option_value "$TRAVIS" OFF`
TESTING=`norm_option_value "$TESTING" OFF`
WITH_ARPACK=`norm_option_value "$WITH_ARPACK" OFF`
WITH_UMFPACK=`norm_option_value "$WITH_UMFPACK" OFF`
WITH_ITK=`norm_option_value "$WITH_ITK" OFF`
WITH_VTK=`norm_option_value "$WITH_VTK" OFF`
WITH_TBB=`norm_option_value "$WITH_TBB" ON`
WITH_FLANN=`norm_option_value "$WITH_FLANN" ON`
WITH_FLTK=`norm_option_value "$WITH_FLTK" OFF`
WITH_CCACHE=`norm_option_value "$WITH_CCACHE" OFF`
BUILD_DEPS_WITH_CCACHE=`norm_option_value "$BUILD_DEPS_WITH_CCACHE" OFF`
FORCE_REBUILD_DEPS=`norm_option_value "$FORCE_REBUILD_DEPS" OFF`
DEBUG_VTK_BUILD=`norm_option_value "$DEBUG_VTK_BUILD" OFF`
# ------------------------------------------------------------------------------
# Auxiliary variables and functions
if [ $TRAVIS = ON ]; then
os="$TRAVIS_OS_NAME"
else
os=${1:-`uname`}
travis_wait()
{
"$@"
}
fi
run()
{
echo "> $@"
"$@"
[ $? -eq 0 ] || exit 1
}
# ------------------------------------------------------------------------------
# install pre-requisites on Ubuntu
if [ $os = linux ] || [ $os = Linux ]; then
cpu_cores=$(grep -c ^processor /proc/cpuinfo)
if [ -f /etc/lsb-release ]; then
source /etc/lsb-release
else
echo "Script works on Ubuntu only" 1>&2
exit 1
fi
if [ "$DISTRIB_ID" != "Ubuntu" ]; then
echo "Script requires Ubuntu 14.04, 16.04, or 18.04" 1>&2
exit 1
fi
cmake_cmd="$(which cmake)"
if [ -n "$cmake_cmd" ]; then
cmake_version="$("$cmake_cmd" --version | grep 'cmake version' | cut -d' ' -f3)"
echo "Found CMake version $cmake_version"
cmake_version_major="${cmake_version/.*}"
[ $? -eq 0 -a -n "$cmake_version_major" ] || cmake_version_major=0
else
cmake_version_major=0
fi
if [ ${cmake_version_major} -lt 3 ]; then
cmake_version=3.12.4
echo "Installing CMake version $cmake_version"
wget --quiet https://cmake.org/files/v${cmake_version%.*}/cmake-${cmake_version}-Linux-x86_64.tar.gz -O /tmp/cmake.tar.gz
mkdir /opt/cmake-${cmake_version}
tar xf /tmp/cmake.tar.gz -C /opt/cmake-${cmake_version} --strip-components=1 -h
rm -f /tmp/cmake.tar.gz
cmake_cmd="/opt/cmake-${cmake_version}/bin/cmake"
fi
deps=( \
freeglut3-dev \
libboost-math-dev \
libboost-random-dev \
libeigen3-dev \
libnifti-dev \
libpng-dev \
)
[ $TESTING = OFF ] || deps=(${deps[@]} libgtest-dev)
[ $WITH_TBB = OFF ] || deps=(${deps[@]} libtbb-dev)
[ $WITH_FLANN = OFF ] || deps=(${deps[@]} libflann-dev)
[ $WITH_ARPACK = OFF ] || deps=(${deps[@]} libarpack2-dev)
if [ $WITH_UMFPACK = ON ]; then
# see https://bugs.launchpad.net/ubuntu/+source/suitesparse/+bug/1333214
# sudo add-apt-repository -y ppa:bzindovic/suitesparse-bugfix-1319687 || exit 1
deps=(${deps[@]} libsuitesparse-dev)
fi
if [ $WITH_VTK = ON ]; then
if [ -n "$LINUX_VTK_VERSION" ]; then
VTK_VERSION="$LINUX_VTK_VERSION"
fi
if [ "$DISTRIB_CODENAME" = "trusty" ]; then
if [ -z "$VTK_VERSION" ] || [ $VTK_VERSION = '6.0.0' ]; then
deps=(${deps[@]} libvtk6-dev)
VTK_VERSION=''
fi
elif [ "$DISTRIB_CODENAME" = "xenial" ]; then
if [ -z "$VTK_VERSION" ] || [ $VTK_VERSION = '6.2.0' ]; then
deps=(${deps[@]} libvtk6-dev python-vtk6)
VTK_VERSION=''
fi
elif [ "$DISTRIB_CODENAME" = "bionic" ] || [ "$DISTRIB_CODENAME" = "focal" ] || [ "$DISTRIB_CODENAME" = "groovy" ]; then
if [ $VTK_VERSION = '6.3.0' ]; then
deps=(${deps[@]} libvtk6-dev)
VTK_VERSION=''
elif [ -z "$VTK_VERSION" ] || [ $VTK_VERSION = '7.1.1' ]; then
deps=(${deps[@]} libvtk7-dev)
VTK_VERSION=''
fi
elif [ -z "$VTK_VERSION" ]; then
deps=(${deps[@]} libvtk7-dev)
fi
if [ $WITH_FLTK = ON ]; then
deps=(${deps[@]} libxi-dev libxmu-dev libxinerama-dev libxcursor-dev libcairo-dev libfltk1.3-dev)
fi
fi
if [ $WITH_ITK = ON ]; then
deps=(${deps[@]} libinsighttoolkit4-dev libfftw3-dev uuid-dev)
fi
sudo apt-get update -qq || exit 1
sudo apt-get install -y --no-install-recommends ${deps[@]} || exit 1
if [ $TESTING = ON ]; then
# libgtest-dev only install source files
mkdir /tmp/gtest-build && cd /tmp/gtest-build
run "$cmake_cmd" /usr/src/gtest
run make -j $cpu_cores
run sudo mv -f libgtest.a libgtest_main.a /usr/lib
cd || exit 1
[ $TRAVIS = ON ] || rm -rf /tmp/gtest-build
fi
fi
# ------------------------------------------------------------------------------
# install pre-requisites on OS X
if [ $os = osx ] || [ $os = Darwin ]; then
cpu_cores=$(sysctl -n hw.ncpu)
brew_install()
{
for dep in $@; do
if $(brew ls $dep &> /dev/null); then
brew unlink $dep && brew link $dep
[ $? -eq 0 ] || exit 1
else
brew install $dep
fi
done
}
brew update > /dev/null || exit 1
if [ $WITH_CCACHE = ON ]; then
brew_install ccache
fi
brew_install eigen flann tbb
if [ $WITH_ARPACK = ON ]; then
brew_install arpack
fi
if [ $WITH_UMFPACK = ON ]; then
brew_install suite-sparse
fi
if [ $WITH_VTK = ON ]; then
VTK_VERSION="$MACOS_VTK_VERSION"
if [ -z "$VTK_VERSION" ]; then
echo "Installing VTK using Homebrew"
brew_install vtk
elif [ "$VTK_VERSION" = "8.2" ] || [ "$VTK_VERSION" == "9.0" ]; then
echo "Installing VTK $VTK_VERSION using Homebrew"
brew_install vtk@$VTK_VERSION
VTK_VERSION="" # skip installation from source code below
fi
if [ $WITH_FLTK = ON ]; then
brew_install fltk
fi
fi
if [ $WITH_ITK = ON ]; then
brew_install itk fftw libuuid
fi
# download, build, and install gtest
if [ $TESTING = ON ]; then
run git clone --depth=1 https://github.com/google/googletest.git /tmp/gtest-source
mkdir /tmp/gtest-build && cd /tmp/gtest-build
[ $? -eq 0 ] || exit 1
run "$cmake_cmd" -DCMAKE_CXX_STANDARD=$CXX_STANDARD -DBUILD_GMOCK=OFF -DBUILD_GTEST=ON ../gtest-source
run make -j $cpu_cores
run sudo make install
cd || exit 1
[ $TRAVIS = ON ] || rm -rf /tmp/gtest-build /tmp/gtest-source
fi
fi
# ------------------------------------------------------------------------------
# Install specific VTK version from source
if [ $WITH_VTK = ON ] && [ -n "$VTK_VERSION" ]; then
vtk_prefix="${VTK_PREFIX:-$HOME/VTK-$VTK_VERSION}"
# build configuration
cmake_args=(
-DCMAKE_INSTALL_PREFIX="$vtk_prefix"
-DCMAKE_BUILD_TYPE=Release
)
# pre-requisites to use system installations
if [ $os = osx ] || [ $os = Darwin ]; then
brew_install hdf5 netcdf jpeg libpng libtiff lz4
if [ ${VTK_VERSION/.*/} -lt 9 ]; then
cmake_args+=(
-DVTK_USE_SYSTEM_HDF5=ON
-DVTK_USE_SYSTEM_EXPAT=ON
-DVTK_USE_SYSTEM_LIBXML2=ON
-DVTK_USE_SYSTEM_ZLIB=ON
-DVTK_USE_SYSTEM_NETCDF=ON
-DVTK_USE_SYSTEM_JPEG=ON
-DVTK_USE_SYSTEM_PNG=ON
-DVTK_USE_SYSTEM_TIFF=ON
-DVTK_USE_SYSTEM_LIBRARIES=ON
)
else
cmake_args+=(
-DVTK_MODULE_USE_EXTERNAL_VTK_hdf5=ON
-DVTK_MODULE_USE_EXTERNAL_VTK_expat=ON
-DVTK_MODULE_USE_EXTERNAL_VTK_libxml2=ON
-DVTK_MODULE_USE_EXTERNAL_VTK_zlib=ON
-DVTK_MODULE_USE_EXTERNAL_VTK_netcdf=ON
-DVTK_MODULE_USE_EXTERNAL_VTK_jpeg=ON
-DVTK_MODULE_USE_EXTERNAL_VTK_png=ON
-DVTK_MODULE_USE_EXTERNAL_VTK_tiff=ON
)
fi
fi
if [ $FORCE_REBUILD_DEPS = OFF ] && [ -d "$vtk_prefix/lib/cmake/vtk-${VTK_VERSION%.*}" ]; then
# use previously cached VTK installation
echo "Using cached VTK $VTK_VERSION installation in $vtk_prefix"
else
run rm -rf "$vtk_prefix"
# custom build instead of Homebrew to take advantage of caching of minimal build
cd /tmp
echo "Downloading VTK $VTK_VERSION..."
run curl -L -o "VTK-${VTK_VERSION}.tar.gz" "https://github.com/Kitware/VTK/archive/v${VTK_VERSION}.tar.gz"
run tar -xzf "VTK-${VTK_VERSION}.tar.gz"
mkdir "VTK-${VTK_VERSION}/Build" && cd "VTK-${VTK_VERSION}/Build"
[ $? -eq 0 ] || exit 1
[ "$DEBUG_VTK_BUILD" != "ON" ] || set -x
echo "Configuring VTK $VTK_VERSION..."
cmake_args+=(
-DBUILD_TESTING=OFF
-DCMAKE_CXX_STANDARD=$CXX_STANDARD
)
if [ ${VTK_VERSION/.*/} -lt 9 ]; then
cmake_args+=(
-DVTK_Group_Rendering=OFF
-DVTK_Group_StandAlone=OFF
-DVTK_WRAP_PYTHON=OFF
-DModule_vtkCommonCore=ON
-DModule_vtkCommonDataModel=ON
-DModule_vtkCommonExecutionModel=ON
-DModule_vtkFiltersCore=ON
-DModule_vtkFiltersHybrid=ON
-DModule_vtkFiltersFlowPaths=ON
-DModule_vtkFiltersGeneral=ON
-DModule_vtkFiltersGeometry=ON
-DModule_vtkFiltersParallel=ON
-DModule_vtkFiltersModeling=ON
-DModule_vtkImagingStencil=ON
-DModule_vtkIOLegacy=ON
-DModule_vtkIOXML=ON
-DModule_vtkIOGeometry=ON
-DModule_vtkIOPLY=ON
-DModule_vtkIOXML=ON
)
else
cmake_args+=(
-DVTK_GROUP_ENABLE_Imaging=YES
-DVTK_GROUP_ENABLE_MPI=DONT_WANT
-DVTK_GROUP_ENABLE_Qt=DONT_WANT
-DVTK_GROUP_ENABLE_Rendering=DONT_WANT
-DVTK_GROUP_ENABLE_StandAlone=DONT_WANT
-DVTK_GROUP_ENABLE_Views=DONT_WANT
-DVTK_GROUP_ENABLE_Web=DONT_WANT
-DVTK_MODULE_ENABLE_VTK_CommonCore=YES
-DVTK_MODULE_ENABLE_VTK_CommonDataModel=YES
-DVTK_MODULE_ENABLE_VTK_CommonExecutionModel=YES
-DVTK_MODULE_ENABLE_VTK_FiltersCore=YES
-DVTK_MODULE_ENABLE_VTK_FiltersHybrid=YES
-DVTK_MODULE_ENABLE_VTK_FiltersFlowPaths=YES
-DVTK_MODULE_ENABLE_VTK_FiltersGeneral=YES
-DVTK_MODULE_ENABLE_VTK_FiltersGeometry=YES
-DVTK_MODULE_ENABLE_VTK_FiltersParallel=YES
-DVTK_MODULE_ENABLE_VTK_FiltersModeling=YES
-DVTK_MODULE_ENABLE_VTK_ImagingStencil=YES
-DVTK_MODULE_ENABLE_VTK_IOLegacy=YES
-DVTK_MODULE_ENABLE_VTK_IOXML=YES
-DVTK_MODULE_ENABLE_VTK_IOGeometry=YES
-DVTK_MODULE_ENABLE_VTK_IOPLY=YES
-DVTK_MODULE_ENABLE_VTK_IOXML=YES
)
fi
if [ $WITH_CCACHE = ON ] && [ $BUILD_DEPS_WITH_CCACHE = ON ]; then
cc_compiler=''
cxx_compiler=''
launcher=`which ccache`
[ -z "$CC" ] || cc_compiler=`which $CC`
[ -z "$CXX" ] || cc_compiler=`which $CXX`
if [ "$cc_compiler" = "${cc_compiler/ccache/}" ]; then
echo "Using $launcher as C compiler launcher"
cmake_args+=(-DCMAKE_C_COMPILER_LAUNCHER="$launcher")
fi
if [ "$cxx_compiler" = "${cxx_compiler/ccache/}" ]; then
echo "Using $launcher as C++ compiler launcher"
cmake_args+=(-DCMAKE_CXX_COMPILER_LAUNCHER="$launcher")
fi
fi
run "$cmake_cmd" "${cmake_args[@]}" ..
echo "Configuring VTK $VTK_VERSION... done"
echo "Building VTK $VTK_VERSION..."
run make -j $cpu_cores
echo "Building VTK $VTK_VERSION... done"
echo "Installing VTK $VTK_VERSION..."
run sudo make install
echo "Installing VTK $VTK_VERSION... done"
cd
[ $TRAVIS = ON ] || rm -rf "/tmp/VTK-${VTK_VERSION}"
fi
mkdir -p "$HOME/.cmake/packages/VTK" || exit 1
echo "$vtk_prefix/lib/cmake/vtk-${VTK_VERSION%.*}" > "$HOME/.cmake/packages/VTK/VTK-$VTK_VERSION"
set +x
fi
| BioMedIA/MIRTK | Scripts/install_depends.sh | Shell | apache-2.0 | 11,825 |
#!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $(dirname $DIR)
php composer.phar update eivindarvesen/b3 --prefer-dist
cd -
| eivind88/blablablog | scripts/updateB3.sh | Shell | bsd-3-clause | 160 |
#!/bin/bash
rsync -arz --delete metrics.torproject.org::metrics-recent/relay-descriptors/consensuses in
rsync -arz --delete metrics.torproject.org::metrics-recent/relay-descriptors/server-descriptors in
| gsathya/metrics-tasks | task-6498/download.sh | Shell | bsd-3-clause | 204 |
#!/bin/sh
export P=`pwd`
cd $P/..
./configure
cd $P
| virtdb/virtdb-gtest | gyp/configure.sh | Shell | bsd-3-clause | 52 |
# if using GNU screen, let the zsh tell screen what the title and hardstatus
# of the tab window should be.
if [[ $TERM == "screen" ]]; then
if [[ $_GET_PATH == '' ]]; then
_GET_PATH='echo $PWD | sed "s/^\/Users\//~/;s/^\/home\//~/;s/^~$USER/~/"'
fi
if [[ $_GET_HOST == '' ]]; then
_GET_HOST='echo $HOST | sed "s/\..*//"'
fi
# use the current user as the prefix of the current tab title
TAB_TITLE_PREFIX='"`'$_GET_HOST'`:`'$_GET_PATH' | sed "s:..*/::"`$PROMPT_CHAR"'
# when at the shell prompt, show a truncated version of the current path (with
# standard ~ replacement) as the rest of the title.
TAB_TITLE_PROMPT='$SHELL:t'
# when running a command, show the title of the command as the rest of the
# title (truncate to drop the path to the command)
TAB_TITLE_EXEC='$cmd[1]:t'
# use the current path (with standard ~ replacement) in square brackets as the
# prefix of the tab window hardstatus.
TAB_HARDSTATUS_PREFIX='"[`'$_GET_PATH'`] "'
# when at the shell prompt, use the shell name (truncated to remove the path to
# the shell) as the rest of the title
TAB_HARDSTATUS_PROMPT='$SHELL:t'
# when running a command, show the command name and arguments as the rest of
# the title
TAB_HARDSTATUS_EXEC='$cmd'
# tell GNU screen what the tab window title ($1) and the hardstatus($2) should be
function screen_set()
{
# set the tab window title (%t) for screen
print -nR $'\033k'$1$'\033'\\\
# set hardstatus of tab window (%h) for screen
print -nR $'\033]0;'$2$'\a'
}
# called by zsh before executing a command
function preexec()
{
local -a cmd; cmd=(${(z)1}) # the command string
eval "tab_title=$TAB_TITLE_PREFIX:$TAB_TITLE_EXEC"
eval "tab_hardstatus=$TAB_HARDSTATUS_PREFIX:$TAB_HARDSTATUS_EXEC"
screen_set $tab_title $tab_hardstatus
}
# called by zsh before showing the prompt
function precmd()
{
eval "tab_title=$TAB_TITLE_PREFIX:$TAB_TITLE_PROMPT"
eval "tab_hardstatus=$TAB_HARDSTATUS_PREFIX:$TAB_HARDSTATUS_PROMPT"
screen_set $tab_title $tab_hardstatus
}
fi | drahnr/oh-my-zsh | plugins/screen/screen.plugin.zsh | Shell | mit | 2,083 |
#!/bin/bash
tar -xzf env.tar.gz
source bin/activate
python resnet.py -b 64 -r $3 -d $2 -e 30 -s 10 -o $1
| dthain/cctools | apps/wq_hypersweep/script.sh | Shell | gpl-2.0 | 106 |
#!/bin/bash
# Set path to phantomjs
export PATH=$PATH:./bin/`uname`/
while test $# -gt 0; do
case "$1" in
-a)
shift
echo "Running all tests..."
casperjs/bin/casperjs test 001_failed_login.js
casperjs/bin/casperjs test 002_successful_login.js
casperjs/bin/casperjs test 003_create_user.js
casperjs/bin/casperjs test 004_create_page.js
casperjs/bin/casperjs test 005_rename_page.js
casperjs/bin/casperjs test 006_inline_editing.js
casperjs/bin/casperjs test 007_add_element_to_page.js
casperjs/bin/casperjs test 008_create_folder.js
casperjs/bin/casperjs test 009_create_and_edit_file.js
casperjs/bin/casperjs test 010_page_visibility.js
echo "Done with all tests."
;;
*)
echo "Running test $1"
casperjs/bin/casperjs test $1
shift
esac
done
| joansmith/structr | structr-ui/src/test/javascript/run_tests.sh | Shell | gpl-3.0 | 804 |
#!/bin/bash
mkdir -p syn_yos
~/yosys/yosys -s sum.yos
| esonghori/TinyGarbled | circuit_synthesis/sum/compile_yos.sh | Shell | gpl-3.0 | 54 |
#!/bin/bash
# Authors:
# Petr Vobornik <[email protected]>
#
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
set -o errexit
#
# This script prepares working enviroment to use dojo toolkit.
#
# It checkouts public git mirrors of dojo svns then applies custom patches and
# makes symbolic links from install/ui/js/dojo and install/ui/js/util
# freeipa/install/ui absolute path - to use when this script is not run from
# install/ui directory
usage() {
cat <<-__EOF__;
NAME
prepare-dojo.sh - prepare FreeIPA Web UI developmnent enviroment to work
with Dojo Library
SYNOPSIS
path/to/prepare-dojo.sh [--help] [--all] [other options]
DESCRIPTION
prepare-dojo.sh is a shell script which prepares FreeIPA Web UI enviroment
for creating custom Dojo/Dojo or Dojo/Util/Build builds.
OPTIONS
--help print the help message
--clone clone git repository
--checkout checkout git repository
--patches applies custom patches, must be used with --checkout
--links makes symbolic links from src directory to Dojo directory
--dojo work with Dojo
--util work with Util
--all Do --clone --checkout --patches --links --dojo --util
--branch <br> Specify a Dojo branch/tag/hash to checkout, default: 1.16.2
--dir <dir> Specify a clone dir, default: freeipa/../dojo/
__EOF__
}
if [ "$#" = "0" ] ; then
usage
exit 0
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# relative path for target dir to checkout dojo
DOJO_DIR=$DIR/../../../../dojo
# working version of Dojo toolkit
BRANCH='1.16.2'
YES='YES'
args=`getopt -q -u -l help,checkout,clone,patches,links,dojo,util,all,branch:,dir: a $*`
if test $? != 0
then
usage
exit 1
fi
set -- $args
for i
do
case "$i" in
--help)
shift;
HELP=$YES
;;
--checkout)
shift;
CHECKOUT=$YES
;;
--clone)
shift;
CLONE=$YES
;;
--patches)
shift;
PATCHES=$YES
;;
--links)
shift;
LINKS=$YES
;;
--dojo)
shift;
DOJO=$YES
;;
--util)
shift;
UTIL=$YES
;;
--all | -a)
shift;
CHECKOUT=$YES
CLONE=$YES
PATCHES=$YES
LINKS=$YES
DOJO=$YES
UTIL=$YES
ALL=$YES
;;
--branch)
shift;
BRANCH=$1
shift;
;;
--dir)
shift;
DOJO_DIR=$1
shift;
;;
*)
;;
esac
done
if [[ $HELP = $YES ]] ; then
usage
exit 0
fi
if [ ! -d $DOJO_DIR ] ; then
mkdir $DOJO_DIR
fi
# clone dojo git repositories
pushd $DOJO_DIR
if [[ $DOJO = $YES ]] ; then
if [[ $CLONE = $YES ]] ; then
git clone https://github.com/dojo/dojo.git
fi
pushd dojo
if [[ $CHECKOUT = $YES ]] ; then
git clean -dfx
git checkout master
git fetch --tags
git fetch
git checkout $BRANCH
fi
popd
if [[ $LINKS = $YES ]] ; then
rm -f $DIR/../src/dojo
ln -s $DOJO_DIR/dojo $DIR/../src/dojo
fi
fi
if [[ $UTIL = $YES ]] ; then
if [[ $CLONE = $YES ]] ; then
git clone https://github.com/dojo/util.git
fi
pushd util
if [[ $CHECKOUT = $YES ]] ; then
git clean -dfx
git checkout master
git fetch --tags
git fetch
git checkout $BRANCH
fi
if [[ $PATCHES = $YES ]] ; then
# apply util custom patches
git am $DIR/build/patches/*.patch
fi
popd
if [[ $LINKS = $YES ]] ; then
rm -f $DIR/../src/build
ln -s $DOJO_DIR/util/build $DIR/../src/build
fi
fi
popd # $DOJO_DIR
| encukou/freeipa | install/ui/util/prepare-dojo.sh | Shell | gpl-3.0 | 4,738 |
#!/bin/bash
SHELL=/bin/zsh exec /bin/zsh --login
| mirceal/beekeeper | test/helpers/switch_to_zsh.sh | Shell | apache-2.0 | 49 |
#!/bin/bash
MY_PATH="`dirname \"$0\"`" # relative
MY_PATH="`( cd \"$MY_PATH\" && pwd )`" # absolutized and normalized
if [ -z "$MY_PATH" ] ; then
# error; for some reason, the path is not accessible
# to the script (e.g. permissions re-evaled after suid)
exit 1 # fail
fi
if [ -z "$1" ] ; then
echo please provide virtual environment. eg: /env/bin/activate
exit 1 # fail
fi
PROJDIR=$MY_PATH/../vaultier
PIDFILE="$MY_PATH/../run/vaultier.pid"
SOCKET="$MY_PATH/../run/vaultier.sock"
LOGSDIR="$MY_PATH/../logs"
VENV="$1"
NAME="Vaultier"
NUM_WORKERS=4
DJANGO_SETTINGS_MODULE=vaultier.settings.prod
DJANGO_WSGI_MODULE=vaultier.wsgi
echo "Starting $NAME as `whoami`"
# prepare environment
cd $PROJDIR
source $VENV
export DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE
export PYTHONPATH=$PROJDIR:$PYTHONPATH
# Create the run directory if it doesn't exist
RUNDIR=$(dirname $SOCKET)
test -d $RUNDIR || mkdir -p $RUNDIR
# Create the logs directory if it doesn't exist
test -d $LOGSDIR || mkdir -p $LOGSDIR
# Start your Django Unicorn
# Programs meant to be run under supervisor should not daemonize themselves (do not use --daemon)
exec gunicorn ${DJANGO_WSGI_MODULE}:application \
--name $NAME \
--workers $NUM_WORKERS \
--log-level=debug \
--bind=unix:$SOCKET
| karinepires/vaultier | bin/vaultier_start.sh | Shell | bsd-3-clause | 1,299 |
#!/bin/sh
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -e
for cmd in gen-signedexchange gen-certurl; do
if ! command -v $cmd > /dev/null 2>&1; then
echo "$cmd is not installed. Please run:"
echo " go get -u github.com/WICG/webpackage/go/signedexchange/cmd/..."
echo ' export PATH=$PATH:$(go env GOPATH)/bin'
exit 1
fi
done
sxg_version=1b3
certs_dir=../../../../../../../blink/tools/blinkpy/third_party/wpt/certs
tmpdir=$(mktemp -d)
# Make dummy OCSP data for cbor certificate chains.
echo -n OCSP >$tmpdir/ocsp
# Generate the certificate chain of "127.0.0.1.sxg.pem".
gen-certurl \
-pem $certs_dir/127.0.0.1.sxg.pem \
-ocsp $tmpdir/ocsp \
> 127.0.0.1.sxg.pem.cbor
# Generate the signed exchange file.
gen-signedexchange \
-version $sxg_version \
-uri https://127.0.0.1:8443/loading/sxg/resources/inner-url.html \
-status 200 \
-content sxg-location.html \
-certificate $certs_dir/127.0.0.1.sxg.pem \
-certUrl https://127.0.0.1:8443/loading/sxg/resources/127.0.0.1.sxg.pem.cbor \
-validityUrl https://127.0.0.1:8443/loading/sxg/resources/resource.validity.msg \
-privateKey $certs_dir/127.0.0.1.sxg.key \
-date 2018-04-01T00:00:00Z \
-expire 168h \
-o sxg-location.sxg \
-miRecordSize 100
# Generate the signed exchange file which also reports use counter info.
gen-signedexchange \
-version $sxg_version \
-uri https://127.0.0.1:8443/loading/sxg/resources/inner-url.html \
-status 200 \
-content sxg-usecounter.html \
-certificate $certs_dir/127.0.0.1.sxg.pem \
-certUrl https://127.0.0.1:8443/loading/sxg/resources/127.0.0.1.sxg.pem.cbor \
-validityUrl https://127.0.0.1:8443/loading/sxg/resources/resource.validity.msg \
-privateKey $certs_dir/127.0.0.1.sxg.key \
-date 2018-04-01T00:00:00Z \
-expire 168h \
-o sxg-usecounter.sxg \
-miRecordSize 100
# Generate the signed exchange file which certificate file is not available.
gen-signedexchange \
-version $sxg_version \
-uri https://127.0.0.1:8443/loading/sxg/resources/inner-url.html \
-status 200 \
-content sxg-location.html \
-certificate $certs_dir/127.0.0.1.sxg.pem \
-certUrl https://127.0.0.1:8443/loading/sxg/resources/not_found_cert.pem.cbor \
-validityUrl https://127.0.0.1:8443/loading/sxg/resources/not_found_cert.validity.msg \
-privateKey $certs_dir/127.0.0.1.sxg.key \
-date 2018-04-01T00:00:00Z \
-expire 168h \
-o sxg-cert-not-found.sxg \
-miRecordSize 100
# Generate the signed exchange file which validity URL is different origin from
# request URL.
gen-signedexchange \
-version $sxg_version \
-uri https://127.0.0.1:8443/loading/sxg/resources/inner-url.html \
-status 200 \
-content sxg-location.html \
-certificate $certs_dir/127.0.0.1.sxg.pem \
-certUrl https://127.0.0.1:8443/loading/sxg/resources/127.0.0.1.sxg.pem.cbor \
-validityUrl https://127.0.0.1:8444/loading/sxg/resources/resource.validity.msg \
-privateKey $certs_dir/127.0.0.1.sxg.key \
-date 2018-04-01T00:00:00Z \
-expire 168h \
-o sxg-invalid-validity-url.sxg \
-miRecordSize 100 \
-ignoreErrors true
# Generate the signed exchange file larger than 10KB.
gen-signedexchange \
-version $sxg_version \
-uri https://127.0.0.1:8443/loading/sxg/resources/inner-url.html \
-status 200 \
-content sxg-larger-than-10k.html \
-certificate $certs_dir/127.0.0.1.sxg.pem \
-certUrl https://127.0.0.1:8443/loading/sxg/resources/127.0.0.1.sxg.pem.cbor \
-validityUrl https://127.0.0.1:8443/loading/sxg/resources/resource.validity.msg \
-privateKey $certs_dir/127.0.0.1.sxg.key \
-date 2018-04-01T00:00:00Z \
-expire 168h \
-o sxg-larger-than-10k.sxg \
-miRecordSize 100
# Generate the signed exchange file of sxg-subresource-script-inner.js.
gen-signedexchange \
-version $sxg_version \
-uri https://127.0.0.1:8443/loading/sxg/resources/sxg-subresource-script.js \
-status 200 \
-content sxg-subresource-script-inner.js \
-certificate $certs_dir/127.0.0.1.sxg.pem \
-certUrl https://127.0.0.1:8443/loading/sxg/resources/127.0.0.1.sxg.pem.cbor \
-validityUrl https://127.0.0.1:8443/loading/sxg/resources/resource.validity.msg \
-privateKey $certs_dir/127.0.0.1.sxg.key \
-date 2030-04-01T00:00:00Z \
-expire 168h \
-o sxg-subresource-script.sxg \
-miRecordSize 100 \
-responseHeader "content-type:text/javascript; charset=utf-8"
# Get the header integrity hash value of sxg-subresource-script.sxg.
header_integrity=$(dump-signedexchange -i sxg-subresource-script.sxg | \
grep -o "header integrity: sha256-.*" | \
grep -o "sha256-.*$")
# Generate the signed exchange file for Origin Trial test.
gen-signedexchange \
-version $sxg_version \
-uri https://127.0.0.1:8443/loading/sxg/resources/sxg-subresource-origin-trial-inner-page.html \
-status 200 \
-content sxg-subresource-origin-trial-page.html \
-certificate $certs_dir/127.0.0.1.sxg.pem \
-certUrl https://127.0.0.1:8443/loading/sxg/resources/127.0.0.1.sxg.pem.cbor \
-validityUrl https://127.0.0.1:8443/loading/sxg/resources/resource.validity.msg \
-privateKey $certs_dir/127.0.0.1.sxg.key \
-date 2030-04-01T00:00:00Z \
-expire 168h \
-o sxg-subresource-origin-trial-page.sxg \
-miRecordSize 100 \
-responseHeader "link:<https://127.0.0.1:8443/loading/sxg/resources/sxg-subresource-script.js>;rel=allowed-alt-sxg;header-integrity=\"$header_integrity\",<https://127.0.0.1:8443/loading/sxg/resources/sxg-subresource-script.js>;rel=preload;as=script"
rm -fr $tmpdir
| nwjs/chromium.src | third_party/blink/web_tests/http/tests/loading/sxg/resources/generate-test-sxgs.sh | Shell | bsd-3-clause | 5,646 |
#!/bin/bash
FN="genomewidesnp6Crlmm_1.0.7.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.10/data/annotation/src/contrib/genomewidesnp6Crlmm_1.0.7.tar.gz"
"https://bioarchive.galaxyproject.org/genomewidesnp6Crlmm_1.0.7.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-genomewidesnp6crlmm/bioconductor-genomewidesnp6crlmm_1.0.7_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-genomewidesnp6crlmm/bioconductor-genomewidesnp6crlmm_1.0.7_src_all.tar.gz"
)
MD5="13dd5aef3d814524896f2f3013beb78b"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
| Luobiny/bioconda-recipes | recipes/bioconductor-genomewidesnp6crlmm/post-link.sh | Shell | mit | 1,475 |
#!/bin/bash -uxe
VAGRANT_USER=${VAGRANT_USER:-vagrant}
VAGRANT_USER_HOME=${VAGRANT_USER_HOME:-/home/${VAGRANT_USER}}
SUDOERS_FILE="/etc/sudoers.d/${VAGRANT_USER}"
# KEY_URL=https://raw.githubusercontent.com/mitchellh/vagrant/master/keys/vagrant.pub
# wget --no-check-certificate -O authorized_keys "${KEY_URL}"
VAGRANT_INSECURE_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key"
prog=${0##*/}
function say () { builtin echo $(date --rfc-3339=s): $prog "$@"; }
function usage ()
{
for info in "$@"; do
say "$info"
done
cat <<EOF
Usage: $0 [-g {groupadd_args}] [-u {useradd_args}] [username]
-g "{groupadd_args}"
-u "{useradd_args}"
EOF
}
#[[ "$#" -eq 0 ]] && usage "an argument is required" && exit 1
while getopts "g:u:" opt; do
case "$opt" in
g) GROUPADD_ARGS=${OPTARG} ;;
u) USERADD_ARGS=${OPTARG} ;;
h) usage && exit 0 ;;
\?) errors=1 ;;
esac
done
shift $((OPTIND-1))
[[ -n "$errors" ]] && usage "invalid arguments" && exit 3
[[ "$#" -ne 0 ]] && usage "invalid argument: $1" && exit 3
# https://github.com/sequenceiq/docker-pam/blob/master/centos-6.5/Dockerfile
# - so su will not work...
# see also:
# - https://github.com/boxcutter/centos/blob/master/script/vagrant.sh
# - https://github.com/smerrill/docker-vagrant-centos/blob/master/centos-6/provision.sh
# redhat-lsb-core rsync
yum install -y initscripts awk xargs openssh-clients openssh-server rsyslog sudo
# generate ssh keys
service sshd start
service sshd stop
# turn off all services by default
chkconfig --list | awk '!/ssh|syslog/ && /:on/{print $1}' | xargs -I {} chkconfig {} off
# Set up some things to make /sbin/init and udev work (or not start as appropriate)
# https://github.com/dotcloud/docker/issues/1240#issuecomment-21807183
# ALREADY ON: echo "NETWORKING=yes" > /etc/sysconfig/network
# http://gaijin-nippon.blogspot.com/2013/07/audit-on-lxc-host.html
sed -i -e '/pam_loginuid\.so/ d' /etc/pam.d/sshd
sed -i -e 's/^\(UsePam\) yes/\1 no/i' /etc/ssh/sshd_config
# Kill udev. (http://serverfault.com/a/580504/82874)
echo " " > /sbin/start_udev
# No more requiretty for sudo. (Vagrant likes to run Puppet/shell via sudo.)
sed -i 's/.*requiretty$/Defaults !requiretty/' /etc/sudoers
# Let this run as an unmodified Vagrant box
echo 'Configuring settings for vagrant...'
useradd=(-m -g "${VAGRANT_USER}" -G wheel $USERADD_ARGS)
echo "Creating group '${VAGRANT_USER}' with args '${GROUPADD_ARGS}'"
groupadd ${GROUPADD_ARGS} "${VAGRANT_USER}"
echo "Creating user '${VAGRANT_USER}' with args '${useradd[@]}'"
useradd "${useradd[@]}" "${VAGRANT_USER}"
echo "${VAGRANT_USER}:${VAGRANT_USER}" | chpasswd
echo "Creating sudoers file '${SUDOERS_FILE}'"
echo "${VAGRANT_USER} ALL=(ALL) NOPASSWD: ALL" > "${SUDOERS_FILE}"
chmod 0440 "${SUDOERS_FILE}"
echo "Installing vagrant ssh key"
mkdir -pm 700 ${VAGRANT_USER_HOME}/.ssh
echo "${VAGRANT_INSECURE_KEY}" > "${VAGRANT_USER_HOME}/.ssh/authorized_keys"
chmod 0600 "${VAGRANT_USER_HOME}/.ssh/authorized_keys"
chown -R "${VAGRANT_USER}:${VAGRANT_USER}" "${VAGRANT_USER_HOME}/.ssh"
yum clean all
| danchr/vagrant-provision-zimbra | centos6/vagrantize.sh | Shell | mit | 3,456 |
#! /bin/bash
maim scr_$(date +%s).png
pacat -p .icewm/winoptions
| KERNELULTRAS/LegacyIce | antiX-20/configs/.icewm/contrib/screenshot.sh | Shell | gpl-2.0 | 65 |
#!/bin/sh
version=1
EmptyError(){
zenity --error \
--text="Password fields can't be blank!"
exit
}
bob=$(zenity --forms --title="Change Password" \
--text="Enter your current and new password" \
--separator="€%%" \
--add-password="Current Password" \
--add-password="New Password" \
--add-password="Repeat New Password" )
case $? in
0)
oldpassword=""
newpassword1=""
newpassword2=""
oldpassword=$(echo "$bob" | awk -F'€%%' '{print $1}')
newpassword1=$(echo "$bob" | awk -F'€%%' '{print $2}')
newpassword2=$(echo "$bob" | awk -F'€%%' '{print $3}')
if [ "$oldpassword" = "" ]; then
EmptyError
fi
if [ "$newpassword1" = "" ]; then
EmptyError
fi
if [ "$newpassword2" = "" ]; then
EmptyError
fi
if [ "$newpassword1" = "$newpassword2" ]; then
echo -e "$oldpassword\n$newpassword1\n$newpassword1" | passwd
exitstatus=$?
if [ $exitstatus -eq 10 ]; then
zenity --error --text="Your new password was not complex enough or you entered your old password incorrectly."
#clear
else
if [ $exitstatus -eq 0 ]; then
zenity --info --text="Password change complete."
else
zenity --error --text="An unknown error has occured."
fi
fi
else
zenity --error \
--text="Inputted passwords don't match.
No change was made."
exit
fi
;;
1)
echo "No change made"
;;
-1)
echo "An unexpected error has occurred."
;;
esac | PiNet/PiNet | Scripts/changePassword.sh | Shell | gpl-2.0 | 1,454 |
#!/bin/sh
echo -e "BASHISM" \
"something else \n"
#exec ${loclibdir}/tkcon.tcl \
# -eval "source ${loclibdir}/console.tcl" \
# -slave "package require Tk; set argc $#; set argv [list $*]; \
# source ${loclibdir}/xcircuit.tcl"
| Debian/devscripts | test/bashisms/fps.sh | Shell | gpl-2.0 | 253 |
#!/bin/bash
# Copyright (c) 2016, Hitachi, Erlon Cruz <[email protected]>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -x
export TEMPEST_USER=${TEMPEST_USER:-tempest}
chmod +w $BASE/new/tempest
cd $BASE/new/tempest
source $BASE/new/devstack/functions
source $BASE/new/devstack/functions-common
source $WORKSPACE/devstack-gate/functions.sh
source $BASE/new/cinder/tools/hooks/utils.sh
export TEMPEST_CONFIG=$BASE/new/tempest/etc/tempest.conf
# Disable bash verbose so we have a cleaner output. Also, exit on error must
# be disable as we will run several tests that can return error.
set +x +e
function configure_tempest_backends {
be1=$1
be2=$2
echo "Configuring tempest conf in ${TEMPEST_CONFIG}"
iniset -sudo $TEMPEST_CONFIG 'volume' 'backend_names' ${be1},${be2}
}
BACKENDS='lvm ceph nfs'
RGEX="(.*test_volume_retype_with_migration.*|.*test_volume_migrate_attached.*)"
final_result=0
final_message='Migrations tests finished SUCCESSFULLY!'
declare -A TEST_RESULTS
start_time=`date +%s`
for be1 in ${BACKENDS}; do
for be2 in ${BACKENDS}; do
if [ ${be1} != ${be2} ]; then
configure_tempest_backends ${be1} ${be2}
echo "============================================================"
echo "Testing multibackend features: ${be1} vs ${be2}"
echo "============================================================"
run_tempest "${be1} vs ${be2}" ${RGEX}
result=$?
# If any of the test fail, we keep running but return failure as
# the final result
if [ ${result} -ne 0 ]; then
TEST_RESULTS[${be1},${be2}]="FAILURE"
final_message='Migrations tests FAILED!'
final_result=1
else
TEST_RESULTS[${be1},${be2}]="SUCCESS"
fi
fi
done
done
end_time=`date +%s`
elapsed=$(expr $(expr ${end_time} - ${start_time}) / 60)
# Print the results
num_rows=$(echo $BACKENDS | wc -w)
fmt=" %15s"
echo "============================================================"
echo " ${final_message} In ${elapsed} minutes."
echo "============================================================"
printf "$fmt" ''
for be1 in ${BACKENDS}; do
printf "$fmt" ${be1}
done
echo
for be1 in ${BACKENDS}; do
printf "$fmt" ${be1}
for be2 in ${BACKENDS}; do
if [ ${be1} == ${be2} ]; then
printf "$fmt" '---'
else
printf "$fmt" ${TEST_RESULTS[${be1},${be2}]}
fi
done
echo
done
exit ${final_result}
| Datera/cinder | tools/hooks/run_multi_backend_matrix.sh | Shell | apache-2.0 | 3,121 |
#!/usr/bin/env bash
default='solarized'
dir=`dirname $0`
if [ -d $dir/colors/$1 ]; then
theme=$1
else
echo "Theme $1 not found, using $default"
theme=$default
fi
if [ "$#" -ne 1 ]; then
theme=$default
fi
PROFILE=${1:-Default}
gconftool-2 -s -t string /apps/guake/style/background/color `cat $dir/colors/$theme/base03`
gconftool-2 -s -t string /apps/guake/style/font/color `cat $dir/colors/$theme/base0`
gconftool-2 -s -t string /apps/guake/style/font/palette `cat $dir/colors/$theme/palette`
| SnailB/guake-colors-solarized | set_dark.sh | Shell | mit | 504 |
#!/bin/bash
if [ 0$UID -gt 0 ]; then
echo Run as root.
exit 1
fi
print_help() {
cat <<HELP
usage: configure-proxy.sh [options]
options:
--answer-file=filename
Indicates the location of an answer file to be use for answering
questions asked during the installation process. See man page for
for an example and documentation.
--force-own-ca
Do not use parent CA and force to create your own.
-h, --help
show this help message and exit
--http-password=HTTP_PASSWORD
The password to use for an authenticated proxy.
--http-proxy=HTTP_PROXY
HTTP proxy in host:port format, e.g. squid.redhat.com:3128
--http-username=HTTP_USERNAME
The username for an authenticated proxy.
--non-interactive
For use only with --answer-file. If the --answer-file doesn't
provide a required response, default answer is used.
--populate-config-channel
Create config chanel and save configuration files to that channel.
Configuration channel name is rhn_proxy_config_\${SYSTEM_ID}.
--rhn-password=RHN_PASSWORD
Red Hat Network or Spacewalk password.
--rhn-user=RHN_USER
Red Hat Network or Spacewalk user account.
--ssl-build-dir=SSL_BUILD_DIR
The directory where we build SSL certificate. Default is /root/ssl-build
--ssl-city=SSL_CITY
City to be used in SSL certificate.
--ssl-common=SSL_COMMON
Common name to be used in SSL certificate.
--ssl-country=SSL_COUNTRY
Two letters country code to be used in SSL certificate.
--ssl-email=SSL_EMAIL
Email to be used in SSL certificate.
--ssl-org=SSL_ORG
Organization name to be used in SSL certificate.
--ssl-orgunit=SSL_ORGUNIT
Organization unit name to be used in SSL certificate.
--ssl-password=SSL_PASSWORD
Password to be used for SSL CA certificate.
--ssl-state=SSL_STATE
State to be used in SSL certificate.
--ssl-cname=CNAME_ALIAS
Cname alias of the machine. Can be specified multiple times.
--start-services[=N]
1 or Y to start all services after configuration. This is default.
0 or N to not start services after configuration.
--traceback-email=TRACEBACK_EMAIL
Email to which tracebacks should be sent.
--use-ssl
Let Spacewalk Proxy Server communicate with parent over SSL.
Even if it is disabled client can still use SSL to connect
to Spacewalk Proxy Server.
--version=VERSION
Version of Spacewalk Proxy Server you want to activate.
HELP
exit 1
}
parse_answer_file() {
local FILE="$1"
local ALIAS
if [ ! -r "$FILE" ] ; then
echo "Answer file '$FILE' is not readable."
exit 1
fi
. "$FILE"
for ALIAS in ${SSL_CNAME[@]}; do
SSL_CNAME_PARSED[CNAME_INDEX++]=--set-cname=$ALIAS
done
}
set_value() {
local OPTION="$1"
local VAR="$2"
local ARG="$3"
[[ "$ARG" =~ ^- ]] \
&& echo "$0: option $OPTION requires argument! Use answer file if your argument starts with '-'." \
&& print_help
eval "$(printf "%q=%q" "$VAR" "$ARG")"
}
INTERACTIVE=1
CNAME_INDEX=0
OPTS=$(getopt --longoptions=help,answer-file:,non-interactive,version:,traceback-email:,use-ssl::,force-own-ca,http-proxy:,http-username:,http-password:,ssl-build-dir:,ssl-org:,ssl-orgunit:,ssl-common:,ssl-city:,ssl-state:,ssl-country:,ssl-email:,ssl-password:,ssl-cname:,populate-config-channel::,start-services:: -n ${0##*/} -- h "$@")
if [ $? != 0 ] ; then
print_help
fi
# It is getopt's responsibility to make this safe
eval set -- "$OPTS"
while : ; do
case "$1" in
--help|-h) print_help;;
--answer-file) set_value "$1" ANSWER_FILE "$2";
parse_answer_file "$ANSWER_FILE"; shift;;
--non-interactive) INTERACTIVE=0;;
--version) set_value "$1" VERSION "$2"; shift;;
--traceback-email) set_value "$1" TRACEBACK_EMAIL "$2"; shift;;
--use-ssl) USE_SSL="${2:-1}"; shift;;
--force-own-ca) FORCE_OWN_CA=1;;
--http-proxy) set_value "$1" HTTP_PROXY "$2"; shift;;
--http-username) set_value "$1" HTTP_USERNAME "$2"; shift;;
--http-password) set_value "$1" HTTP_PASSWORD "$2"; shift;;
--ssl-build-dir) set_value "$1" SSL_BUILD_DIR "$2"; shift;;
--ssl-org) set_value "$1" SSL_ORG "$2"; shift;;
--ssl-orgunit) set_value "$1" SSL_ORGUNIT "$2"; shift;;
--ssl-common) set_value "$1" SSL_COMMON "$2"; shift;;
--ssl-city) set_value "$1" SSL_CITY "$2"; shift;;
--ssl-state) set_value "$1" SSL_STATE "$2"; shift;;
--ssl-country) set_value "$1" SSL_COUNTRY "$2"; shift;;
--ssl-email) set_value "$1" SSL_EMAIL "$2"; shift;;
--ssl-password) set_value "$1" SSL_PASSWORD "$2"; shift;;
--ssl-cname) SSL_CNAME_PARSED[CNAME_INDEX++]="--set-cname=$2"; shift;;
--populate-config-channel) POPULATE_CONFIG_CHANNEL="${2:-Y}"; shift;;
--start-services) START_SERVICES="${2:-Y}"; shift;;
--rhn-user) set_value "$1" RHN_USER "$2"; shift;;
--rhn-password) set_value "$1" RHN_PASSWORD "$2"; shift;;
--) shift;
if [ $# -gt 0 ] ; then
echo "Error: Extra arguments found: $@"
print_help
exit 1
fi
break;;
*) echo Error: Invalid option $1; exit 1;;
esac
shift
done
# params dep check
if [[ $INTERACTIVE == 0 \
&& ( -z $POPULATE_CONFIG_CHANNEL || $( yes_no $POPULATE_CONFIG_CHANNEL ) == 1 ) \
&& ( -z $RHN_USER || -z $RHN_PASSWORD ) ]]; then
echo "Error: When --populate-config-channel is set to Yes both --rhn-user and --rhn-password have to be provided."
exit 1
fi
if [[ $INTERACTIVE == 0 && -z $ANSWER_FILE ]]; then
echo "Option --non-interactive is for use only with option --answer-file."
exit 1
fi
ACCUMULATED_ANSWERS=""
generate_answers() {
if [ "$INTERACTIVE" = 1 -a ! -z "$ACCUMULATED_ANSWERS" ]; then
local WRITE_ANSWERS
echo "There were some answers you had to enter manually."
echo "Would you like to have written those into file"
echo -n "formatted as answers file? [Y/n]: "
read WRITE_ANSWERS
WRITE_ANSWERS=$(yes_no ${WRITE_ANSWERS:-Y})
if [ "$WRITE_ANSWERS" = 1 ]; then
local tmp=$(mktemp proxy-answers.txt.XXXXX)
echo "Writing $tmp"
echo "# Answer file generated by ${0##*/} at $(date)$ACCUMULATED_ANSWERS" > $tmp
fi
fi
}
default_or_input() {
local MSG="$1"
local VARIABLE="$2"
local DEFAULT="$3"
local INPUT
local CURRENT_VALUE=${!VARIABLE}
#in following code is used not so common expansion
#var_a=${var_b:-word}
#which is like: var_a = $var_b ? word
DEFAULT=${CURRENT_VALUE:-$DEFAULT}
local VARIABLE_ISSET=$(set | grep "^$VARIABLE=")
echo -n "$MSG [$DEFAULT]: "
if [ "$INTERACTIVE" = "1" -a -z "$VARIABLE_ISSET" ]; then
read INPUT
ACCUMULATED_ANSWERS+=$(printf "\n%q=%q" "$VARIABLE" "${INPUT:-$DEFAULT}")
elif [ -z "$VARIABLE_ISSET" ]; then
echo "$DEFAULT"
else
DEFAULT=${!VARIABLE}
echo "$DEFAULT"
fi
if [ -z "$INPUT" ]; then
INPUT="$DEFAULT"
fi
eval "$(printf "%q=%q" "$VARIABLE" "$INPUT")"
}
yes_no() {
case "$1" in
Y|y|Y/n|n/Y|1)
echo 1
;;
*)
echo 0
;;
esac
}
config_error() {
if [ $1 -gt 0 ]; then
echo "$2 Installation interrupted."
/usr/bin/rhn-proxy-activate \
--server="$RHN_PARENT" \
--http-proxy="$HTTP_PROXY" \
--http-proxy-username="$HTTP_USERNAME" \
--http-proxy-password="$HTTP_PASSWORD" \
--ca-cert="$CA_CHAIN" \
--deactivate --non-interactive
generate_answers
exit $1
fi
}
# Return 0 if rhnParent is Hosted. Otherwise return 1.
is_hosted() {
[ "$1" = "xmlrpc.rhn.redhat.com" -o \
$( PYTHONPATH='/usr/share/rhn' python -c "from up2date_client import config; cfg = config.initUp2dateConfig(); print '$1' in cfg['hostedWhitelist']" ) = "True" ]
return $?
}
check_ca_conf() {
if [ -f /root/ssl-build/rhn-ca-openssl.cnf ] \
&& awk '/^[[:space:]]*\[[[:space:]]*[_[:alnum:]]*[[:space:]]*]/ {CORRECT_SECTION=0} \
/^[[:space:]]*\[[[:space:]]*CA_default[[:space:]]*]/ {CORRECT_SECTION=1} \
/^[[:space:]]*copy_extensions[[:space:]]*=[[:space:]]*copy/ && CORRECT_SECTION==1 {exit 1}' \
/root/ssl-build/rhn-ca-openssl.cnf > /dev/null \
&& [ ${#SSL_CNAME_PARSED[@]} -gt 0 ]; then
cat <<WARNING
It seems you tried to use the --set-cname option. On inspection we noticed that the openssl configuration file we use is missing a critically important option. Without this option, not only will multi host SSL certificates not work, but the planet Earth will implode in a massive rip in the time/space continuum. To avoid this failure, we choose to gracefully exit here and request for you to edit the openssl configuration file
/root/ssl-build/rhn-ca-openssl.cnf
and add this line:
copy_extensions = copy
in
[ CA_default ]
section.
Then re-run this script again.
WARNING
generate_answers
exit 3
fi
}
YUM="yum install"
UPGRADE="yum upgrade"
# add -y for non-interactive installation
if [ "$INTERACTIVE" = "0" ]; then
YUM="$YUM -y"
UPGRADE="$UPGRADE -y"
fi
SYSCONFIG_DIR=/etc/sysconfig/rhn
RHNCONF_DIR=/etc/rhn
HTTPDCONF_DIR=/etc/httpd/conf
HTTPDCONFD_DIR=/etc/httpd/conf.d
HTMLPUB_DIR=/var/www/html/pub
JABBERD_DIR=/etc/jabberd
SQUID_DIR=/etc/squid
SYSTEMID_PATH=`PYTHONPATH='/usr/share/rhn' python -c "from up2date_client import config; cfg = config.initUp2dateConfig(); print cfg['systemIdPath'] "`
if [ ! -r $SYSTEMID_PATH ]; then
echo ERROR: Spacewalk Proxy does not appear to be registered
exit 2
fi
SYSTEM_ID=$(/usr/bin/xsltproc /usr/share/rhn/get_system_id.xslt $SYSTEMID_PATH | cut -d- -f2)
DIR=/usr/share/doc/proxy/conf-template
HOSTNAME=$(hostname)
FORCE_OWN_CA=$(yes_no $FORCE_OWN_CA)
SSL_BUILD_DIR=${SSL_BUILD_DIR:-/root/ssl-build}
if ! [ -d $SSL_BUILD_DIR ] && [ 0$FORCE_OWN_CA -eq 0 ]; then
echo "Error: ssl build directory $SSL_BUILD_DIR does not exist. Please create this directory."
exit 1
fi
UP2DATE_FILE=$SYSCONFIG_DIR/up2date
RHN_PARENT=$(awk -F= '/serverURL=/ {split($2, a, "/")} END {print a[3]}' $UP2DATE_FILE)
echo "Using RHN parent (from $UP2DATE_FILE): $RHN_PARENT"
if [ "$RHN_PARENT" == "rhn.redhat.com" ]; then
RHN_PARENT="xmlrpc.rhn.redhat.com"
cat <<WARNING
*** Warning: plain rhn.redhat.com should not be used as RHN Parent.
*** Using xmlrpc.rhn.redhat.com instead.
WARNING
fi
CA_CHAIN=$(awk -F'[=;]' '/sslCACert=/ {a=$2} END {print a}' $UP2DATE_FILE)
echo "Using CA Chain (from $UP2DATE_FILE): $CA_CHAIN"
if [ 0$FORCE_OWN_CA -eq 0 ] && \
! is_hosted "$RHN_PARENT" && \
[ ! -f /root/ssl-build/RHN-ORG-PRIVATE-SSL-KEY ] && \
! diff $CA_CHAIN /root/ssl-build/RHN-ORG-TRUSTED-SSL-KEY &>/dev/null; then
cat <<CA_KEYS
Please do copy your CA key and public certificate from $RHN_PARENT to
/root/ssl-build directory. You may want to execute this command:
scp 'root@$RHN_PARENT:/root/ssl-build/{RHN-ORG-PRIVATE-SSL-KEY,RHN-ORG-TRUSTED-SSL-CERT,rhn-ca-openssl.cnf}' $SSL_BUILD_DIR
CA_KEYS
exit 1
fi
check_ca_conf
if ! /sbin/runuser nobody -s /bin/sh --command="[ -r $CA_CHAIN ]" ; then
echo Error: File $CA_CHAIN is not readable by nobody user.
exit 1
fi
default_or_input "HTTP Proxy" HTTP_PROXY ''
if [ "$HTTP_PROXY" != "" ]; then
default_or_input "HTTP username" HTTP_USERNAME ''
if [ "$HTTP_USERNAME" != "" ]; then
default_or_input "HTTP password" HTTP_PASSWORD ''
fi
fi
VERSION_FROM_PARENT=$(rhn-proxy-activate --server=$RHN_PARENT \
--http-proxy="$HTTP_PROXY" \
--http-proxy-username="$HTTP_USERNAME" \
--http-proxy-password="$HTTP_PASSWORD" \
--ca-cert="$CA_CHAIN" \
--list-available-versions 2>/dev/null|sort|tail -n1)
VERSION_FROM_RPM=$(rpm -q --queryformat %{version} spacewalk-proxy-installer|cut -d. -f1-2)
default_or_input "Proxy version to activate" VERSION ${VERSION_FROM_PARENT:-$VERSION_FROM_RPM}
default_or_input "Traceback email" TRACEBACK_EMAIL ''
default_or_input "Use SSL" USE_SSL 'Y/n'
USE_SSL=$(yes_no $USE_SSL)
cat <<SSLCERT
Regardless of whether you enabled SSL for the connection to the Spacewalk Parent
Server, you will be prompted to generate an SSL certificate.
This SSL certificate will allow client systems to connect to this Spacewalk Proxy
securely. Refer to the Spacewalk Proxy Installation Guide for more information.
SSLCERT
default_or_input "Organization" SSL_ORG ''
default_or_input "Organization Unit" SSL_ORGUNIT "$HOSTNAME"
default_or_input "Common Name" SSL_COMMON "$HOSTNAME"
default_or_input "City" SSL_CITY ''
default_or_input "State" SSL_STATE ''
default_or_input "Country code" SSL_COUNTRY ''
default_or_input "Email" SSL_EMAIL "$TRACEBACK_EMAIL"
if [ ${#SSL_CNAME_PARSED[@]} -eq 0 ]; then
VARIABLE_ISSET=$(set | grep "^SSL_CNAME=")
if [ -z $VARIABLE_ISSET ]; then
default_or_input "Cname aliases (separated by space)" SSL_CNAME_ASK ''
CNAME=($SSL_CNAME_ASK)
for ALIAS in ${CNAME[@]}; do
SSL_CNAME_PARSED[CNAME_INDEX++]=--set-cname=$ALIAS
done
check_ca_conf
fi
fi
/usr/bin/rhn-proxy-activate --server="$RHN_PARENT" \
--http-proxy="$HTTP_PROXY" \
--http-proxy-username="$HTTP_USERNAME" \
--http-proxy-password="$HTTP_PASSWORD" \
--ca-cert="$CA_CHAIN" \
--version="$VERSION" \
--non-interactive
config_error $? "Proxy activation failed!"
rpm -q rhn-apache >/dev/null
if [ $? -eq 0 ]; then
echo "Package rhn-apache present - assuming upgrade:"
echo "Force removal of /etc/httpd/conf/httpd.conf - backed up to /etc/httpd/conf/httpd.conf.rpmsave"
mv /etc/httpd/conf/httpd.conf /etc/httpd/conf/httpd.conf.rpmsave
fi
if [ -x /usr/sbin/rhn-proxy ]; then
/usr/sbin/rhn-proxy stop
fi
$YUM spacewalk-proxy-management
# check if package install successfully
rpm -q spacewalk-proxy-management >/dev/null
if [ $? -ne 0 ]; then
config_error 2 "Installation of package spacewalk-proxy-management failed."
fi
$UPGRADE
ln -sf /etc/pki/spacewalk/jabberd/server.pem /etc/jabberd/server.pem
if [ "$VERSION" = '5.3' -o "$VERSION" = '5.2' -o "$VERSION" = '5.1' -o "$VERSION" = '5.0' ]; then
sed -e "s/\${session.hostname}/$HOSTNAME/g" </usr/share/rhn/installer/jabberd/c2s.xml >/etc/jabberd/c2s.xml
sed -e "s/\${session.hostname}/$HOSTNAME/g" </usr/share/rhn/installer/jabberd/sm.xml >/etc/jabberd/sm.xml
else
/usr/bin/spacewalk-setup-jabberd --macros "hostname:$HOSTNAME"
fi
# size of squid disk cache will be 60% of free space on /var/spool/squid
# df -P give free space in kB
# * 60 / 100 is 60% of that space
# / 1024 is to get value in MB
SQUID_SIZE=$(df -P /var/spool/squid | awk '{a=$4} END {printf("%d", a * 60 / 100 / 1024)}')
SQUID_REWRITE="s|cache_dir ufs /var/spool/squid 15000 16 256|cache_dir ufs /var/spool/squid $SQUID_SIZE 16 256|g;"
SQUID_VER_MAJOR=$(squid -v | awk -F'[ .]' '/Version/ {print $4}')
if [ $SQUID_VER_MAJOR -ge 3 ] ; then
# squid 3.X has acl 'all' built-in
SQUID_REWRITE="$SQUID_REWRITE s/^acl all.*//;"
# squid 3.2 and later need none instead of -1 for range_offset_limit
SQUID_VER_MINOR=$(squid -v | awk -F'[ .]' '/Version/ {print $5}')
if [[ $SQUID_VER_MAJOR -ge 4 || ( $SQUID_VER_MAJOR -eq 3 && $SQUID_VER_MINOR -ge 2 ) ]] ; then
SQUID_REWRITE="$SQUID_REWRITE s/^range_offset_limit.*/range_offset_limit none/;"
fi
fi
sed "$SQUID_REWRITE" < $DIR/squid.conf > $SQUID_DIR/squid.conf
sed -e "s|\${session.ca_chain:/usr/share/rhn/RHNS-CA-CERT}|$CA_CHAIN|g" \
-e "s/\${session.http_proxy}/$HTTP_PROXY/g" \
-e "s/\${session.http_proxy_username}/$HTTP_USERNAME/g" \
-e "s/\${session.http_proxy_password}/$HTTP_PASSWORD/g" \
-e "s/\${session.rhn_parent}/$RHN_PARENT/g" \
-e "s/\${session.traceback_mail}/$TRACEBACK_EMAIL/g" \
-e "s/\${session.use_ssl:0}/$USE_SSL/g" \
< $DIR/rhn.conf > $RHNCONF_DIR/rhn.conf
# systemid need to be readable by apache/proxy
for file in $SYSTEMID_PATH $UP2DATE_FILE; do
chown root:apache $file
chmod 0640 $file
done
#Setup the cobbler stuff, needed to use koan through a proxy
PROTO="http";
if [ $USE_SSL -eq 1 ]; then
PROTO="https"
fi
sed -e "s/\$PROTO/$PROTO/g" \
-e "s/\$RHN_PARENT/$RHN_PARENT/g" < $DIR/cobbler-proxy.conf > $HTTPDCONFD_DIR/cobbler-proxy.conf
# lets do SSL stuff
SSL_BUILD_DIR=${SSL_BUILD_DIR:-"/root/ssl-build"}
if [ -n "$SSL_PASSWORD" ] ; then
# use SSL_PASSWORD if already set
RHN_SSL_TOOL_PASSWORD_OPTION="--password"
RHN_SSL_TOOL_PASSWORD="$SSL_PASSWORD"
elif [ "$INTERACTIVE" = "0" ] ; then
# non-interactive mode but no SSL_PASSWORD :(
config_error 4 "Please define SSL_PASSWORD."
fi
if [ ! -f $SSL_BUILD_DIR/RHN-ORG-PRIVATE-SSL-KEY ]; then
echo "Generating CA key and public certificate:"
/usr/bin/rhn-ssl-tool --gen-ca -q \
--dir="$SSL_BUILD_DIR" \
--set-common-name="$SSL_COMMON" \
--set-country="$SSL_COUNTRY" \
--set-city="$SSL_CITY" \
--set-state="$SSL_STATE" \
--set-org="$SSL_ORG" \
--set-org-unit="$SSL_ORGUNIT" \
--set-email="$SSL_EMAIL" \
$RHN_SSL_TOOL_PASSWORD_OPTION $RHN_SSL_TOOL_PASSWORD
config_error $? "CA certificate generation failed!"
else
echo "Using CA key at $SSL_BUILD_DIR/RHN-ORG-PRIVATE-SSL-KEY."
fi
RPM_CA=$(grep noarch $SSL_BUILD_DIR/latest.txt 2>/dev/null)
if [ ! -f $SSL_BUILD_DIR/$RPM_CA ]; then
echo "Generating distributable RPM for CA public certificate:"
/usr/bin/rhn-ssl-tool --gen-ca -q --rpm-only --dir="$SSL_BUILD_DIR"
RPM_CA=$(grep noarch $SSL_BUILD_DIR/latest.txt)
fi
if [ ! -f $HTMLPUB_DIR/$RPM_CA ] || [ ! -f $HTMLPUB_DIR/RHN-ORG-TRUSTED-SSL-CERT ] || \
! diff $HTMLPUB_DIR/RHN-ORG-TRUSTED-SSL-CERT $SSL_BUILD_DIR/RHN-ORG-TRUSTED-SSL-CERT &>/dev/null; then
echo "Copying CA public certificate to $HTMLPUB_DIR for distribution to clients:"
cp $SSL_BUILD_DIR/RHN-ORG-TRUSTED-SSL-CERT $SSL_BUILD_DIR/$RPM_CA $HTMLPUB_DIR/
fi
echo "Generating SSL key and public certificate:"
/usr/bin/rhn-ssl-tool --gen-server -q --no-rpm \
--set-hostname "$HOSTNAME" \
--dir="$SSL_BUILD_DIR" \
--set-country="$SSL_COUNTRY" \
--set-city="$SSL_CITY" \
--set-state="$SSL_STATE" \
--set-org="$SSL_ORG" \
--set-org-unit="$SSL_ORGUNIT" \
--set-email="$SSL_EMAIL" \
${SSL_CNAME_PARSED[@]} \
$RHN_SSL_TOOL_PASSWORD_OPTION $RHN_SSL_TOOL_PASSWORD
config_error $? "SSL key generation failed!"
echo "Installing SSL certificate for Apache and Jabberd:"
rpm -Uv $(/usr/bin/rhn-ssl-tool --gen-server --rpm-only --dir="$SSL_BUILD_DIR" 2>/dev/null |grep noarch.rpm)
if [ -e $HTTPDCONFD_DIR/ssl.conf ]; then
mv $HTTPDCONFD_DIR/ssl.conf $HTTPDCONFD_DIR/ssl.conf.bak
fi
sed -e "s|^SSLCertificateFile /etc/pki/tls/certs/localhost.crt$|SSLCertificateFile $HTTPDCONF_DIR/ssl.crt/server.crt|g" \
-e "s|^SSLCertificateKeyFile /etc/pki/tls/private/localhost.key$|SSLCertificateKeyFile $HTTPDCONF_DIR/ssl.key/server.key|g" \
-e "s|</VirtualHost>|RewriteEngine on\nRewriteOptions inherit\nSSLProxyEngine on\n</VirtualHost>|" \
< $HTTPDCONFD_DIR/ssl.conf.bak > $HTTPDCONFD_DIR/ssl.conf
CHANNEL_LABEL="rhn_proxy_config_$SYSTEM_ID"
default_or_input "Create and populate configuration channel $CHANNEL_LABEL?" POPULATE_CONFIG_CHANNEL 'Y/n'
POPULATE_CONFIG_CHANNEL=$(yes_no $POPULATE_CONFIG_CHANNEL)
if [ "$POPULATE_CONFIG_CHANNEL" = "1" ]; then
RHNCFG_STATUS=1
default_or_input "RHN username:" RHN_USER ''
while [ $RHNCFG_STATUS != 0 ] ; do
CONFIG_CHANNELS=$(rhncfg-manager list-channels ${RHN_USER:+--username="${RHN_USER}"} ${RHN_PASSWORD:+--password="${RHN_PASSWORD}"} --server-name="$RHN_PARENT")
RHNCFG_STATUS=$?
# In case of incorrect username/password, we want to re-ask user
unset RHN_USER
unset RHN_PASSWORD
done
if ! grep -q -E "^ +$CHANNEL_LABEL$" <<<"$CONFIG_CHANNELS" ; then
rhncfg-manager create-channel --server-name "$RHN_PARENT" "$CHANNEL_LABEL"
fi
rhncfg-manager update --server-name "$RHN_PARENT" \
--channel="$CHANNEL_LABEL" \
$HTTPDCONFD_DIR/ssl.conf \
$RHNCONF_DIR/rhn.conf \
$SQUID_DIR/squid.conf \
$HTTPDCONFD_DIR/cobbler-proxy.conf \
$HTTPDCONF_DIR/httpd.conf \
$JABBERD_DIR/c2s.xml \
$JABBERD_DIR/sm.xml
fi
echo "Enabling Spacewalk Proxy."
for service in squid httpd jabberd; do
if [ -x /usr/bin/systemctl ] ; then
/usr/bin/systemctl enable $service
else
/sbin/chkconfig --add $service
/sbin/chkconfig --level 345 $service on
fi
done
# default is 1
START_SERVICES=$(yes_no ${START_SERVICES:-1})
if [ "$START_SERVICES" = "1" ]; then
/usr/sbin/rhn-proxy restart
else
echo Skipping start of services.
echo Use "/usr/sbin/rhn-proxy start" to manualy start proxy.
fi
generate_answers
| xkollar/spacewalk | proxy/installer/configure-proxy.sh | Shell | gpl-2.0 | 21,506 |
#!/bin/sh
test_description='test dumb fetching over http via static file'
. ./test-lib.sh
if test -n "$NO_CURL"; then
skip_all='skipping test, git built without http support'
test_done
fi
. "$TEST_DIRECTORY"/lib-httpd.sh
LIB_HTTPD_PORT=${LIB_HTTPD_PORT-'5550'}
start_httpd
test_expect_success 'setup repository' '
echo content >file &&
git add file &&
git commit -m one
'
test_expect_success 'create http-accessible bare repository' '
mkdir "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
(cd "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
git --bare init &&
echo "exec git update-server-info" >hooks/post-update &&
chmod +x hooks/post-update
) &&
git remote add public "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
git push public master:master
'
test_expect_success 'clone http repository' '
git clone $HTTPD_URL/dumb/repo.git clone &&
test_cmp file clone/file
'
test_expect_success 'fetch changes via http' '
echo content >>file &&
git commit -a -m two &&
git push public
(cd clone && git pull) &&
test_cmp file clone/file
'
test_expect_success 'http remote detects correct HEAD' '
git push public master:other &&
(cd clone &&
git remote set-head origin -d &&
git remote set-head origin -a &&
git symbolic-ref refs/remotes/origin/HEAD > output &&
echo refs/remotes/origin/master > expect &&
test_cmp expect output
)
'
test_expect_success 'fetch packed objects' '
cp -R "$HTTPD_DOCUMENT_ROOT_PATH"/repo.git "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git &&
(cd "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git &&
git --bare repack &&
git --bare prune-packed
) &&
git clone $HTTPD_URL/dumb/repo_pack.git
'
test_expect_success 'fetch notices corrupt pack' '
cp -R "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad1.git &&
(cd "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad1.git &&
p=`ls objects/pack/pack-*.pack` &&
chmod u+w $p &&
printf %0256d 0 | dd of=$p bs=256 count=1 seek=1 conv=notrunc
) &&
mkdir repo_bad1.git &&
(cd repo_bad1.git &&
git --bare init &&
test_must_fail git --bare fetch $HTTPD_URL/dumb/repo_bad1.git &&
test 0 = `ls objects/pack/pack-*.pack | wc -l`
)
'
test_expect_success 'fetch notices corrupt idx' '
cp -R "$HTTPD_DOCUMENT_ROOT_PATH"/repo_pack.git "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
(cd "$HTTPD_DOCUMENT_ROOT_PATH"/repo_bad2.git &&
p=`ls objects/pack/pack-*.idx` &&
chmod u+w $p &&
printf %0256d 0 | dd of=$p bs=256 count=1 seek=1 conv=notrunc
) &&
mkdir repo_bad2.git &&
(cd repo_bad2.git &&
git --bare init &&
test_must_fail git --bare fetch $HTTPD_URL/dumb/repo_bad2.git &&
test 0 = `ls objects/pack | wc -l`
)
'
test_expect_success 'did not use upload-pack service' '
grep '/git-upload-pack' <"$HTTPD_ROOT_PATH"/access.log >act
: >exp
test_cmp exp act
'
stop_httpd
test_done
| vidarh/Git | t/t5550-http-fetch.sh | Shell | gpl-2.0 | 2,799 |
#! /bin/sh
# Copyright (C) 2011-2015 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Check that automake really automatically distributes all the files
# it advertises to do.
# Related to automake bug#7819.
# Keep this test in sync with sister test 'autodist-subdir.sh'.
am_create_testdir=empty
. test-init.sh
cat > configure.ac <<END
AC_INIT([$me], [1.0])
AC_CONFIG_AUX_DIR([.])
AM_INIT_AUTOMAKE
AC_CONFIG_FILES([Makefile])
AC_OUTPUT
END
$ACLOCAL
$AUTOCONF
# The automake manual states that the list of automatically-distributed
# files should be given by 'automake --help'.
list=$($AUTOMAKE --help \
| sed -n '/^Files.*automatically distributed.*if found.*always/,/^ *$/p' \
| sed 1d)
# Normalize whitespace, just in case.
list=$(echo $list)
test -n "$list"
cat > Makefile.am <<'END'
include distfiles.am
check-local:
## For debugging.
@echo DIST_COMMON:
@for f in $(DIST_COMMON); do echo " $$f"; done
@echo DISTDIR:
@ls -l $(distdir) | sed 's/^/ /'
## Now the checks.
@for f in $(autodist_list); do \
echo "file: $$f"; \
test -f $(distdir)/$$f \
|| { echo $$f: distdir fail >&2; exit 1; }; \
## Some filenames might contain dots, but this won't cause spurious
## failures, and "spurious successes" are so unlikely that they're
## not worth worrying about.
echo ' ' $(DIST_COMMON) ' ' | grep "[ /]$$f " >/dev/null \
|| { echo $$f: distcom fail >&2; exit 1; }; \
done
END
: First try listing the automatically-distributed files in proper
: targets in Makefile.am
echo "MAINTAINERCLEANFILES = $list" > distfiles.am
for f in $list; do echo "$f :; touch $f"; done >> distfiles.am
cat distfiles.am # For debugging.
$AUTOMAKE -a
./configure
$MAKE distdir
autodist_list="$list" $MAKE check
$MAKE maintainer-clean
test ! -e README # Sanity check.
rm -rf $me-1.0 # Remove $(distdir).
: Now try creating the automatically-distributed files before
: running automake.
: > distfiles.am
for f in $list; do
echo dummy > $f
done
ls -l # For debugging.
$AUTOMAKE
./configure
$MAKE distdir
autodist_list="$list" $MAKE check
:
| darrengarvey/automake | t/autodist.sh | Shell | gpl-2.0 | 2,701 |
if [ -f rixc ]
then
echo Clearing: rixc -- preventing unexpected test results
rm rixc
fi
if [ -f lex.yy.c ]
then
echo Clearing: lex.yy.c -- flex output
rm lex.yy.c
fi
if [ -f rix.tab.c ]
then
echo Clearing: rix.tab.c -- bison output
rm rix.tab.c
fi
if [ -f rix.tab.h ]
then
echo Clearing: rix.tab.h -- bison output
rm rix.tab.h
fi
if [ -f out.c ]
then
rm out.c
fi
if [ -f out.h ]
then
rm out.h
fi
| berong91/ride | ride-gui/ride_parser/clr.sh | Shell | gpl-3.0 | 451 |
#!/bin/bash
#PBS -l nodes=1:ppn=20
#PBS -l walltime=48:00:00
#PBS -N session1_default
#PBS -A course
#PBS -q GpuQ
export THEANO_FLAGS=device=cpu,floatX=float32
cd $PBS_O_WORKDIR
python ./rescore_with_lm.py -n -b 0.5 \
${HOME}/models/model_session0.npz \
${HOME}/models/model_session0.npz.pkl \
${HOME}/data/wiki.tok.txt.gz.pkl \
${HOME}/data/europarl-v7.fr-en.en.tok.pkl \
./newstest2011.trans.en.tok \
./newstest2011.trans.en.tok.rescored
| BladeSun/NliWithKnowledge | session3/score_nbest.sh | Shell | bsd-3-clause | 452 |
#!/bin/bash
## (GEN002780: CAT II) The SA will configure the auditing system to audit
## use of privileged commands (unsuccessful and successful)
echo '==================================================='
echo ' Patching GEN002780: Audit use of privileged command'
echo '==================================================='
# THIS CODE HAS BEEN REPLACED BY audit.rules IN config
| jpschaaf/hardening-script-el6 | scripts/gen002780.sh | Shell | gpl-2.0 | 380 |
#!/bin/sh
# This file is part of BOINC.
# http://boinc.berkeley.edu
# Copyright (C) 2008 University of California
#
# BOINC is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later version.
#
# BOINC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with BOINC. If not, see <http://www.gnu.org/licenses/>.
#
#
# Script to build Macintosh example_app using Makefile
#
# by Charlie Fenton 2/16/10
# Updated 10/11/10 for XCode 3.2 and OS 10.6
# Updated 7/12/12 for Xcode 4.3 and later which are not at a fixed address
# Updated 8/3/12 for TrueType fonts
# Updated 11/8/12 to add slide_show
# Updated 4/14/15 to fix build instructions
#
## This script requires OS 10.6 or later
#
## If you drag-install Xcode 4.3 or later, you must have opened Xcode
## and clicked the Install button on the dialog which appears to
## complete the Xcode installation before running this script.
#
## First, build the BOINC libraries using boinc/mac_build/BuildMacBOINC.sh
## This file assumes the locations of the needed libraries are those
## resulting from following the instructions found in the file:
## boinc/mac_build/HowToBuildBOINC_XCode.rtf
##
## In Terminal, CD to the example_app directory.
## cd [path]/example_app/
## then run this script:
## sh [path]/MakeMacExample.sh
##
GCCPATH=`xcrun -find gcc`
if [ $? -ne 0 ]; then
echo "ERROR: can't find gcc compiler"
exit 1
fi
GPPPATH=`xcrun -find g++`
if [ $? -ne 0 ]; then
echo "ERROR: can't find g++ compiler"
exit 1
fi
MAKEPATH=`xcrun -find make`
if [ $? -ne 0 ]; then
echo "ERROR: can't find make tool"
exit 1
fi
TOOLSPATH1=${MAKEPATH%/make}
ARPATH=`xcrun -find ar`
if [ $? -ne 0 ]; then
echo "ERROR: can't find ar tool"
exit 1
fi
TOOLSPATH2=${ARPATH%/ar}
export PATH="${TOOLSPATH1}":"${TOOLSPATH2}":/usr/local/bin:$PATH
SDKPATH=`xcodebuild -version -sdk macosx Path`
rm -fR i386 x86_64
echo
echo "***************************************************"
echo "******* Building 32-bit Intel Application *********"
echo "***************************************************"
echo
export CC="${GCCPATH}";export CXX="${GPPPATH}"
export LDFLAGS="-Wl,-syslibroot,${SDKPATH},-arch,i386"
export VARIANTFLAGS="-isysroot ${SDKPATH} -arch i386 -DMAC_OS_X_VERSION_MAX_ALLOWED=1040 -DMAC_OS_X_VERSION_MIN_REQUIRED=1040 -fvisibility=hidden -fvisibility-inlines-hidden"
export SDKROOT="${SDKPATH}"
export MACOSX_DEPLOYMENT_TARGET=10.4
make -f Makefile_mac clean
make -f Makefile_mac all
if [ $? -ne 0 ]; then exit 1; fi
mkdir i386
mv uc2 i386/
mv uc2_graphics i386/
mv slide_show i386/
echo
echo "***************************************************"
echo "******* Building 64-bit Intel Application *********"
echo "***************************************************"
echo
export CC="${GCCPATH}";export CXX="${GPPPATH}"
export LDFLAGS="-Wl,-syslibroot,${SDKPATH},-arch,x86_64"
export VARIANTFLAGS="-isysroot ${SDKPATH} -arch x86_64 -DMAC_OS_X_VERSION_MAX_ALLOWED=1050 -DMAC_OS_X_VERSION_MIN_REQUIRED=1050 -fvisibility=hidden -fvisibility-inlines-hidden"
export SDKROOT="${SDKPATH}"
export MACOSX_DEPLOYMENT_TARGET=10.5
make -f Makefile_mac clean
make -f Makefile_mac all
if [ $? -ne 0 ]; then exit 1; fi
mkdir x86_64
mv uc2 x86_64/
mv uc2_graphics x86_64/
mv slide_show x86_64/
rm -f uc2.o
rm -f ttfont.o
rm -f uc2_graphics.o
rm -f slide_show.o
echo
echo "***************************************************"
echo "**************** Build Succeeded! *****************"
echo "***************************************************"
echo
export CC="";export CXX=""
export LDFLAGS=""
export CPPFLAGS=""
export CFLAGS=""
export SDKROOT=""
exit 0
| Simek/boinc | samples/example_app/MakeMacExample.sh | Shell | gpl-3.0 | 4,081 |
#!/bin/bash
#
# The Alluxio Open Foundation licenses this work under the Apache License, version 2.0
# (the "License"). You may not use this work except in compliance with the License, which is
# available at www.apache.org/licenses/LICENSE-2.0
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied, as more fully set forth in the License.
#
# See the NOTICE file distributed with this work for information regarding copyright ownership.
#
# Running this script will create edited versions of the files in the .generated directory
set -e
readonly ALLUXIO_DOWNLOAD_URL=${1}
readonly DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
readonly SCRIPT="alluxio-emr.sh"
mkdir -p ${DIR}/.generated
cp ${DIR}/alluxio-emr.* ${DIR}/.generated
# replace ALLUXIO_DOWNLOAD_URL in emr bootstrap script
if [[ -n ${ALLUXIO_DOWNLOAD_URL} ]]; then
if [[ -z $(grep "readonly ALLUXIO_DOWNLOAD_URL=" ${DIR}/${SCRIPT}) ]]; then
echo "ERROR: unable to replace 'readonly ALLUXIO_DOWNLOAD_URL=' - pattern could not be found"
exit 1
fi
perl -p -e "s|^readonly ALLUXIO_DOWNLOAD_URL.*\$|readonly ALLUXIO_DOWNLOAD_URL=\"${ALLUXIO_DOWNLOAD_URL}\"|" ${DIR}/${SCRIPT} > ${DIR}/.generated/${SCRIPT}
fi
| Alluxio/alluxio | integration/emr/build.sh | Shell | apache-2.0 | 1,292 |
# Generated from ltmain.m4sh.
# ltmain.sh (GNU libtool) 2.2.4
# Written by Gordon Matzigkeit <[email protected]>, 1996
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, 2007 2008 Free Software Foundation, Inc.
# This is free software; see the source for copying conditions. There is NO
# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# GNU Libtool is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# As a special exception to the GNU General Public License,
# if you distribute this file as part of a program or library that
# is built using GNU Libtool, you may include this file under the
# same distribution terms that you use for the rest of that program.
#
# GNU Libtool is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Libtool; see the file COPYING. If not, a copy
# can be downloaded from http://www.gnu.org/licenses/gpl.html,
# or obtained by writing to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Usage: $progname [OPTION]... [MODE-ARG]...
#
# Provide generalized library-building support services.
#
# --config show all configuration variables
# --debug enable verbose shell tracing
# -n, --dry-run display commands without modifying any files
# --features display basic configuration information and exit
# --mode=MODE use operation mode MODE
# --preserve-dup-deps don't remove duplicate dependency libraries
# --quiet, --silent don't print informational messages
# --tag=TAG use configuration variables from tag TAG
# -v, --verbose print informational messages (default)
# --version print version information
# -h, --help print short or long help message
#
# MODE must be one of the following:
#
# clean remove files from the build directory
# compile compile a source file into a libtool object
# execute automatically set library path, then run a program
# finish complete the installation of libtool libraries
# install install libraries or executables
# link create a library or an executable
# uninstall remove libraries from an installed directory
#
# MODE-ARGS vary depending on the MODE.
# Try `$progname --help --mode=MODE' for a more detailed description of MODE.
#
# When reporting a bug, please describe a test case to reproduce it and
# include the following information:
#
# host-triplet: $host
# shell: $SHELL
# compiler: $LTCC
# compiler flags: $LTCFLAGS
# linker: $LD (gnu? $with_gnu_ld)
# $progname: (GNU libtool) 2.2.4
# automake: $automake_version
# autoconf: $autoconf_version
#
# Report bugs to <[email protected]>.
PROGRAM=ltmain.sh
PACKAGE=libtool
VERSION=2.2.4
TIMESTAMP=""
package_revision=1.2976
# Be Bourne compatible
if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
emulate sh
NULLCMD=:
# Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
# is contrary to our usage. Disable this feature.
alias -g '${1+"$@"}'='"$@"'
setopt NO_GLOB_SUBST
else
case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac
fi
BIN_SH=xpg4; export BIN_SH # for Tru64
DUALCASE=1; export DUALCASE # for MKS sh
# NLS nuisances: We save the old values to restore during execute mode.
# Only set LANG and LC_ALL to C if already set.
# These must not be set unconditionally because not all systems understand
# e.g. LANG=C (notably SCO).
lt_user_locale=
lt_safe_locale=
for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
do
eval "if test \"\${$lt_var+set}\" = set; then
save_$lt_var=\$$lt_var
$lt_var=C
export $lt_var
lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\"
lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\"
fi"
done
$lt_unset CDPATH
: ${CP="cp -f"}
: ${ECHO="echo"}
: ${EGREP="/usr/bin/grep -E"}
: ${FGREP="/usr/bin/grep -F"}
: ${GREP="/usr/bin/grep"}
: ${LN_S="ln -s"}
: ${MAKE="make"}
: ${MKDIR="mkdir"}
: ${MV="mv -f"}
: ${RM="rm -f"}
: ${SED="/opt/local/bin/gsed"}
: ${SHELL="${CONFIG_SHELL-/bin/sh}"}
: ${Xsed="$SED -e 1s/^X//"}
# Global variables:
EXIT_SUCCESS=0
EXIT_FAILURE=1
EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing.
EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake.
exit_status=$EXIT_SUCCESS
# Make sure IFS has a sensible default
lt_nl='
'
IFS=" $lt_nl"
dirname="s,/[^/]*$,,"
basename="s,^.*/,,"
# func_dirname_and_basename file append nondir_replacement
# perform func_basename and func_dirname in a single function
# call:
# dirname: Compute the dirname of FILE. If nonempty,
# add APPEND to the result, otherwise set result
# to NONDIR_REPLACEMENT.
# value returned in "$func_dirname_result"
# basename: Compute filename of FILE.
# value retuned in "$func_basename_result"
# Implementation must be kept synchronized with func_dirname
# and func_basename. For efficiency, we do not delegate to
# those functions but instead duplicate the functionality here.
func_dirname_and_basename ()
{
# Extract subdirectory from the argument.
func_dirname_result=`$ECHO "X${1}" | $Xsed -e "$dirname"`
if test "X$func_dirname_result" = "X${1}"; then
func_dirname_result="${3}"
else
func_dirname_result="$func_dirname_result${2}"
fi
func_basename_result=`$ECHO "X${1}" | $Xsed -e "$basename"`
}
# Generated shell functions inserted here.
# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh
# is ksh but when the shell is invoked as "sh" and the current value of
# the _XPG environment variable is not equal to 1 (one), the special
# positional parameter $0, within a function call, is the name of the
# function.
progpath="$0"
# The name of this program:
# In the unlikely event $progname began with a '-', it would play havoc with
# func_echo (imagine progname=-n), so we prepend ./ in that case:
func_dirname_and_basename "$progpath"
progname=$func_basename_result
case $progname in
-*) progname=./$progname ;;
esac
# Make sure we have an absolute path for reexecution:
case $progpath in
[\\/]*|[A-Za-z]:\\*) ;;
*[\\/]*)
progdir=$func_dirname_result
progdir=`cd "$progdir" && pwd`
progpath="$progdir/$progname"
;;
*)
save_IFS="$IFS"
IFS=:
for progdir in $PATH; do
IFS="$save_IFS"
test -x "$progdir/$progname" && break
done
IFS="$save_IFS"
test -n "$progdir" || progdir=`pwd`
progpath="$progdir/$progname"
;;
esac
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
Xsed="${SED}"' -e 1s/^X//'
sed_quote_subst='s/\([`"$\\]\)/\\\1/g'
# Same as above, but do not quote variable references.
double_quote_subst='s/\(["`\\]\)/\\\1/g'
# Re-`\' parameter expansions in output of double_quote_subst that were
# `\'-ed in input to the same. If an odd number of `\' preceded a '$'
# in input to double_quote_subst, that '$' was protected from expansion.
# Since each input `\' is now two `\'s, look for any number of runs of
# four `\'s followed by two `\'s and then a '$'. `\' that '$'.
bs='\\'
bs2='\\\\'
bs4='\\\\\\\\'
dollar='\$'
sed_double_backslash="\
s/$bs4/&\\
/g
s/^$bs2$dollar/$bs&/
s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g
s/\n//g"
# Standard options:
opt_dry_run=false
opt_help=false
opt_quiet=false
opt_verbose=false
opt_warning=:
# func_echo arg...
# Echo program name prefixed message, along with the current mode
# name if it has been set yet.
func_echo ()
{
$ECHO "$progname${mode+: }$mode: $*"
}
# func_verbose arg...
# Echo program name prefixed message in verbose mode only.
func_verbose ()
{
$opt_verbose && func_echo ${1+"$@"}
# A bug in bash halts the script if the last line of a function
# fails when set -e is in force, so we need another command to
# work around that:
:
}
# func_error arg...
# Echo program name prefixed message to standard error.
func_error ()
{
$ECHO "$progname${mode+: }$mode: "${1+"$@"} 1>&2
}
# func_warning arg...
# Echo program name prefixed warning message to standard error.
func_warning ()
{
$opt_warning && $ECHO "$progname${mode+: }$mode: warning: "${1+"$@"} 1>&2
# bash bug again:
:
}
# func_fatal_error arg...
# Echo program name prefixed message to standard error, and exit.
func_fatal_error ()
{
func_error ${1+"$@"}
exit $EXIT_FAILURE
}
# func_fatal_help arg...
# Echo program name prefixed message to standard error, followed by
# a help hint, and exit.
func_fatal_help ()
{
func_error ${1+"$@"}
func_fatal_error "$help"
}
help="Try \`$progname --help' for more information." ## default
# func_grep expression filename
# Check whether EXPRESSION matches any line of FILENAME, without output.
func_grep ()
{
$GREP "$1" "$2" >/dev/null 2>&1
}
# func_mkdir_p directory-path
# Make sure the entire path to DIRECTORY-PATH is available.
func_mkdir_p ()
{
my_directory_path="$1"
my_dir_list=
if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then
# Protect directory names starting with `-'
case $my_directory_path in
-*) my_directory_path="./$my_directory_path" ;;
esac
# While some portion of DIR does not yet exist...
while test ! -d "$my_directory_path"; do
# ...make a list in topmost first order. Use a colon delimited
# list incase some portion of path contains whitespace.
my_dir_list="$my_directory_path:$my_dir_list"
# If the last portion added has no slash in it, the list is done
case $my_directory_path in */*) ;; *) break ;; esac
# ...otherwise throw away the child directory and loop
my_directory_path=`$ECHO "X$my_directory_path" | $Xsed -e "$dirname"`
done
my_dir_list=`$ECHO "X$my_dir_list" | $Xsed -e 's,:*$,,'`
save_mkdir_p_IFS="$IFS"; IFS=':'
for my_dir in $my_dir_list; do
IFS="$save_mkdir_p_IFS"
# mkdir can fail with a `File exist' error if two processes
# try to create one of the directories concurrently. Don't
# stop in that case!
$MKDIR "$my_dir" 2>/dev/null || :
done
IFS="$save_mkdir_p_IFS"
# Bail out if we (or some other process) failed to create a directory.
test -d "$my_directory_path" || \
func_fatal_error "Failed to create \`$1'"
fi
}
# func_mktempdir [string]
# Make a temporary directory that won't clash with other running
# libtool processes, and avoids race conditions if possible. If
# given, STRING is the basename for that directory.
func_mktempdir ()
{
my_template="${TMPDIR-/tmp}/${1-$progname}"
if test "$opt_dry_run" = ":"; then
# Return a directory name, but don't create it in dry-run mode
my_tmpdir="${my_template}-$$"
else
# If mktemp works, use that first and foremost
my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null`
if test ! -d "$my_tmpdir"; then
# Failing that, at least try and use $RANDOM to avoid a race
my_tmpdir="${my_template}-${RANDOM-0}$$"
save_mktempdir_umask=`umask`
umask 0077
$MKDIR "$my_tmpdir"
umask $save_mktempdir_umask
fi
# If we're not in dry-run mode, bomb out on failure
test -d "$my_tmpdir" || \
func_fatal_error "cannot create temporary directory \`$my_tmpdir'"
fi
$ECHO "X$my_tmpdir" | $Xsed
}
# func_quote_for_eval arg
# Aesthetically quote ARG to be evaled later.
# This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT
# is double-quoted, suitable for a subsequent eval, whereas
# FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters
# which are still active within double quotes backslashified.
func_quote_for_eval ()
{
case $1 in
*[\\\`\"\$]*)
func_quote_for_eval_unquoted_result=`$ECHO "X$1" | $Xsed -e "$sed_quote_subst"` ;;
*)
func_quote_for_eval_unquoted_result="$1" ;;
esac
case $func_quote_for_eval_unquoted_result in
# Double-quote args containing shell metacharacters to delay
# word splitting, command substitution and and variable
# expansion for a subsequent eval.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\""
;;
*)
func_quote_for_eval_result="$func_quote_for_eval_unquoted_result"
esac
}
# func_quote_for_expand arg
# Aesthetically quote ARG to be evaled later; same as above,
# but do not quote variable references.
func_quote_for_expand ()
{
case $1 in
*[\\\`\"]*)
my_arg=`$ECHO "X$1" | $Xsed \
-e "$double_quote_subst" -e "$sed_double_backslash"` ;;
*)
my_arg="$1" ;;
esac
case $my_arg in
# Double-quote args containing shell metacharacters to delay
# word splitting and command substitution for a subsequent eval.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
my_arg="\"$my_arg\""
;;
esac
func_quote_for_expand_result="$my_arg"
}
# func_show_eval cmd [fail_exp]
# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is
# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP
# is given, then evaluate it.
func_show_eval ()
{
my_cmd="$1"
my_fail_exp="${2-:}"
${opt_silent-false} || {
func_quote_for_expand "$my_cmd"
eval "func_echo $func_quote_for_expand_result"
}
if ${opt_dry_run-false}; then :; else
eval "$my_cmd"
my_status=$?
if test "$my_status" -eq 0; then :; else
eval "(exit $my_status); $my_fail_exp"
fi
fi
}
# func_show_eval_locale cmd [fail_exp]
# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is
# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP
# is given, then evaluate it. Use the saved locale for evaluation.
func_show_eval_locale ()
{
my_cmd="$1"
my_fail_exp="${2-:}"
${opt_silent-false} || {
func_quote_for_expand "$my_cmd"
eval "func_echo $func_quote_for_expand_result"
}
if ${opt_dry_run-false}; then :; else
eval "$lt_user_locale
$my_cmd"
my_status=$?
eval "$lt_safe_locale"
if test "$my_status" -eq 0; then :; else
eval "(exit $my_status); $my_fail_exp"
fi
fi
}
# func_version
# Echo version message to standard output and exit.
func_version ()
{
$SED -n '/^# '$PROGRAM' (GNU /,/# warranty; / {
s/^# //
s/^# *$//
s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/
p
}' < "$progpath"
exit $?
}
# func_usage
# Echo short help message to standard output and exit.
func_usage ()
{
$SED -n '/^# Usage:/,/# -h/ {
s/^# //
s/^# *$//
s/\$progname/'$progname'/
p
}' < "$progpath"
$ECHO
$ECHO "run \`$progname --help | more' for full usage"
exit $?
}
# func_help
# Echo long help message to standard output and exit.
func_help ()
{
$SED -n '/^# Usage:/,/# Report bugs to/ {
s/^# //
s/^# *$//
s*\$progname*'$progname'*
s*\$host*'"$host"'*
s*\$SHELL*'"$SHELL"'*
s*\$LTCC*'"$LTCC"'*
s*\$LTCFLAGS*'"$LTCFLAGS"'*
s*\$LD*'"$LD"'*
s/\$with_gnu_ld/'"$with_gnu_ld"'/
s/\$automake_version/'"`(automake --version) 2>/dev/null |$SED 1q`"'/
s/\$autoconf_version/'"`(autoconf --version) 2>/dev/null |$SED 1q`"'/
p
}' < "$progpath"
exit $?
}
# func_missing_arg argname
# Echo program name prefixed message to standard error and set global
# exit_cmd.
func_missing_arg ()
{
func_error "missing argument for $1"
exit_cmd=exit
}
exit_cmd=:
# Check that we have a working $ECHO.
if test "X$1" = X--no-reexec; then
# Discard the --no-reexec flag, and continue.
shift
elif test "X$1" = X--fallback-echo; then
# Avoid inline document here, it may be left over
:
elif test "X`{ $ECHO '\t'; } 2>/dev/null`" = 'X\t'; then
# Yippee, $ECHO works!
:
else
# Restart under the correct shell, and then maybe $ECHO will work.
exec $SHELL "$progpath" --no-reexec ${1+"$@"}
fi
# Same for EGREP, and just to be sure, do LTCC as well
if test "x$EGREP" = x ; then
EGREP=egrep
fi
if test "x$LTCC" = x ; then
LTCC=${CC-gcc}
fi
if test "X$1" = X--fallback-echo; then
# used as fallback echo
shift
cat <<EOF
$*
EOF
exit $EXIT_SUCCESS
fi
magic="%%%MAGIC variable%%%"
magic_exe="%%%MAGIC EXE variable%%%"
# Global variables.
# $mode is unset
nonopt=
execute_dlfiles=
preserve_args=
lo2o="s/\\.lo\$/.${objext}/"
o2lo="s/\\.${objext}\$/.lo/"
extracted_archives=
extracted_serial=0
opt_dry_run=false
opt_duplicate_deps=false
opt_silent=false
opt_debug=:
# If this variable is set in any of the actions, the command in it
# will be execed at the end. This prevents here-documents from being
# left over by shells.
exec_cmd=
# func_fatal_configuration arg...
# Echo program name prefixed message to standard error, followed by
# a configuration failure hint, and exit.
func_fatal_configuration ()
{
func_error ${1+"$@"}
func_error "See the $PACKAGE documentation for more information."
func_fatal_error "Fatal configuration error."
}
# func_config
# Display the configuration for all the tags in this script.
func_config ()
{
re_begincf='^# ### BEGIN LIBTOOL'
re_endcf='^# ### END LIBTOOL'
# Default configuration.
$SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath"
# Now print the configurations for the tags.
for tagname in $taglist; do
$SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath"
done
exit $?
}
# func_features
# Display the features supported by this script.
func_features ()
{
$ECHO "host: $host"
if test "$build_libtool_libs" = yes; then
$ECHO "enable shared libraries"
else
$ECHO "disable shared libraries"
fi
if test "$build_old_libs" = yes; then
$ECHO "enable static libraries"
else
$ECHO "disable static libraries"
fi
exit $?
}
# func_enable_tag tagname
# Verify that TAGNAME is valid, and either flag an error and exit, or
# enable the TAGNAME tag. We also add TAGNAME to the global $taglist
# variable here.
func_enable_tag ()
{
# Global variable:
tagname="$1"
re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$"
re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$"
sed_extractcf="/$re_begincf/,/$re_endcf/p"
# Validate tagname.
case $tagname in
*[!-_A-Za-z0-9,/]*)
func_fatal_error "invalid tag name: $tagname"
;;
esac
# Don't test for the "default" C tag, as we know it's
# there but not specially marked.
case $tagname in
CC) ;;
*)
if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then
taglist="$taglist $tagname"
# Evaluate the configuration. Be careful to quote the path
# and the sed script, to avoid splitting on whitespace, but
# also don't use non-portable quotes within backquotes within
# quotes we have to do it in 2 steps:
extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"`
eval "$extractedcf"
else
func_error "ignoring unknown tag $tagname"
fi
;;
esac
}
# Parse options once, thoroughly. This comes as soon as possible in
# the script to make things like `libtool --version' happen quickly.
{
# Shorthand for --mode=foo, only valid as the first argument
case $1 in
clean|clea|cle|cl)
shift; set dummy --mode clean ${1+"$@"}; shift
;;
compile|compil|compi|comp|com|co|c)
shift; set dummy --mode compile ${1+"$@"}; shift
;;
execute|execut|execu|exec|exe|ex|e)
shift; set dummy --mode execute ${1+"$@"}; shift
;;
finish|finis|fini|fin|fi|f)
shift; set dummy --mode finish ${1+"$@"}; shift
;;
install|instal|insta|inst|ins|in|i)
shift; set dummy --mode install ${1+"$@"}; shift
;;
link|lin|li|l)
shift; set dummy --mode link ${1+"$@"}; shift
;;
uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u)
shift; set dummy --mode uninstall ${1+"$@"}; shift
;;
esac
# Parse non-mode specific arguments:
while test "$#" -gt 0; do
opt="$1"
shift
case $opt in
--config) func_config ;;
--debug) preserve_args="$preserve_args $opt"
func_echo "enabling shell trace mode"
opt_debug='set -x'
$opt_debug
;;
-dlopen) test "$#" -eq 0 && func_missing_arg "$opt" && break
execute_dlfiles="$execute_dlfiles $1"
shift
;;
--dry-run | -n) opt_dry_run=: ;;
--features) func_features ;;
--finish) mode="finish" ;;
--mode) test "$#" -eq 0 && func_missing_arg "$opt" && break
case $1 in
# Valid mode arguments:
clean) ;;
compile) ;;
execute) ;;
finish) ;;
install) ;;
link) ;;
relink) ;;
uninstall) ;;
# Catch anything else as an error
*) func_error "invalid argument for $opt"
exit_cmd=exit
break
;;
esac
mode="$1"
shift
;;
--preserve-dup-deps)
opt_duplicate_deps=: ;;
--quiet|--silent) preserve_args="$preserve_args $opt"
opt_silent=:
;;
--verbose| -v) preserve_args="$preserve_args $opt"
opt_silent=false
;;
--tag) test "$#" -eq 0 && func_missing_arg "$opt" && break
preserve_args="$preserve_args $opt $1"
func_enable_tag "$1" # tagname is set here
shift
;;
# Separate optargs to long options:
-dlopen=*|--mode=*|--tag=*)
func_opt_split "$opt"
set dummy "$func_opt_split_opt" "$func_opt_split_arg" ${1+"$@"}
shift
;;
-\?|-h) func_usage ;;
--help) opt_help=: ;;
--version) func_version ;;
-*) func_fatal_help "unrecognized option \`$opt'" ;;
*) nonopt="$opt"
break
;;
esac
done
case $host in
*cygwin* | *mingw* | *pw32*)
# don't eliminate duplications in $postdeps and $predeps
opt_duplicate_compiler_generated_deps=:
;;
*)
opt_duplicate_compiler_generated_deps=$opt_duplicate_deps
;;
esac
# Having warned about all mis-specified options, bail out if
# anything was wrong.
$exit_cmd $EXIT_FAILURE
}
# func_check_version_match
# Ensure that we are using m4 macros, and libtool script from the same
# release of libtool.
func_check_version_match ()
{
if test "$package_revision" != "$macro_revision"; then
if test "$VERSION" != "$macro_version"; then
if test -z "$macro_version"; then
cat >&2 <<_LT_EOF
$progname: Version mismatch error. This is $PACKAGE $VERSION, but the
$progname: definition of this LT_INIT comes from an older release.
$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
$progname: and run autoconf again.
_LT_EOF
else
cat >&2 <<_LT_EOF
$progname: Version mismatch error. This is $PACKAGE $VERSION, but the
$progname: definition of this LT_INIT comes from $PACKAGE $macro_version.
$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
$progname: and run autoconf again.
_LT_EOF
fi
else
cat >&2 <<_LT_EOF
$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision,
$progname: but the definition of this LT_INIT comes from revision $macro_revision.
$progname: You should recreate aclocal.m4 with macros from revision $package_revision
$progname: of $PACKAGE $VERSION and run autoconf again.
_LT_EOF
fi
exit $EXIT_MISMATCH
fi
}
## ----------- ##
## Main. ##
## ----------- ##
$opt_help || {
# Sanity checks first:
func_check_version_match
if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
func_fatal_configuration "not configured to build any kind of library"
fi
test -z "$mode" && func_fatal_error "error: you must specify a MODE."
# Darwin sucks
eval std_shrext=\"$shrext_cmds\"
# Only execute mode is allowed to have -dlopen flags.
if test -n "$execute_dlfiles" && test "$mode" != execute; then
func_error "unrecognized option \`-dlopen'"
$ECHO "$help" 1>&2
exit $EXIT_FAILURE
fi
# Change the help message to a mode-specific one.
generic_help="$help"
help="Try \`$progname --help --mode=$mode' for more information."
}
# func_lalib_p file
# True iff FILE is a libtool `.la' library or `.lo' object file.
# This function is only a basic sanity check; it will hardly flush out
# determined imposters.
func_lalib_p ()
{
$SED -e 4q "$1" 2>/dev/null \
| $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1
}
# func_lalib_unsafe_p file
# True iff FILE is a libtool `.la' library or `.lo' object file.
# This function implements the same check as func_lalib_p without
# resorting to external programs. To this end, it redirects stdin and
# closes it afterwards, without saving the original file descriptor.
# As a safety measure, use it only where a negative result would be
# fatal anyway. Works if `file' does not exist.
func_lalib_unsafe_p ()
{
lalib_p=no
if test -r "$1" && exec 5<&0 <"$1"; then
for lalib_p_l in 1 2 3 4
do
read lalib_p_line
case "$lalib_p_line" in
\#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;;
esac
done
exec 0<&5 5<&-
fi
test "$lalib_p" = yes
}
# func_ltwrapper_script_p file
# True iff FILE is a libtool wrapper script
# This function is only a basic sanity check; it will hardly flush out
# determined imposters.
func_ltwrapper_script_p ()
{
func_lalib_p "$1"
}
# func_ltwrapper_executable_p file
# True iff FILE is a libtool wrapper executable
# This function is only a basic sanity check; it will hardly flush out
# determined imposters.
func_ltwrapper_executable_p ()
{
func_ltwrapper_exec_suffix=
case $1 in
*.exe) ;;
*) func_ltwrapper_exec_suffix=.exe ;;
esac
$GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1
}
# func_ltwrapper_scriptname file
# Assumes file is an ltwrapper_executable
# uses $file to determine the appropriate filename for a
# temporary ltwrapper_script.
func_ltwrapper_scriptname ()
{
func_ltwrapper_scriptname_result=""
if func_ltwrapper_executable_p "$1"; then
func_dirname_and_basename "$1" "" "."
func_stripname '' '.exe' "$func_basename_result"
func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper"
fi
}
# func_ltwrapper_p file
# True iff FILE is a libtool wrapper script or wrapper executable
# This function is only a basic sanity check; it will hardly flush out
# determined imposters.
func_ltwrapper_p ()
{
func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1"
}
# func_execute_cmds commands fail_cmd
# Execute tilde-delimited COMMANDS.
# If FAIL_CMD is given, eval that upon failure.
# FAIL_CMD may read-access the current command in variable CMD!
func_execute_cmds ()
{
$opt_debug
save_ifs=$IFS; IFS='~'
for cmd in $1; do
IFS=$save_ifs
eval cmd=\"$cmd\"
func_show_eval "$cmd" "${2-:}"
done
IFS=$save_ifs
}
# func_source file
# Source FILE, adding directory component if necessary.
# Note that it is not necessary on cygwin/mingw to append a dot to
# FILE even if both FILE and FILE.exe exist: automatic-append-.exe
# behavior happens only for exec(3), not for open(2)! Also, sourcing
# `FILE.' does not work on cygwin managed mounts.
func_source ()
{
$opt_debug
case $1 in
*/* | *\\*) . "$1" ;;
*) . "./$1" ;;
esac
}
# func_infer_tag arg
# Infer tagged configuration to use if any are available and
# if one wasn't chosen via the "--tag" command line option.
# Only attempt this if the compiler in the base compile
# command doesn't match the default compiler.
# arg is usually of the form 'gcc ...'
func_infer_tag ()
{
$opt_debug
if test -n "$available_tags" && test -z "$tagname"; then
CC_quoted=
for arg in $CC; do
func_quote_for_eval "$arg"
CC_quoted="$CC_quoted $func_quote_for_eval_result"
done
case $@ in
# Blanks in the command may have been stripped by the calling shell,
# but not from the CC environment variable when configure was run.
" $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*) ;;
# Blanks at the start of $base_compile will cause this to fail
# if we don't check for them as well.
*)
for z in $available_tags; do
if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then
# Evaluate the configuration.
eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`"
CC_quoted=
for arg in $CC; do
# Double-quote args containing other shell metacharacters.
func_quote_for_eval "$arg"
CC_quoted="$CC_quoted $func_quote_for_eval_result"
done
case "$@ " in
" $CC "* | "$CC "* | " `$ECHO $CC` "* | "`$ECHO $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$ECHO $CC_quoted` "* | "`$ECHO $CC_quoted` "*)
# The compiler in the base compile command matches
# the one in the tagged configuration.
# Assume this is the tagged configuration we want.
tagname=$z
break
;;
esac
fi
done
# If $tagname still isn't set, then no tagged configuration
# was found and let the user know that the "--tag" command
# line option must be used.
if test -z "$tagname"; then
func_echo "unable to infer tagged configuration"
func_fatal_error "specify a tag with \`--tag'"
# else
# func_verbose "using $tagname tagged configuration"
fi
;;
esac
fi
}
# func_write_libtool_object output_name pic_name nonpic_name
# Create a libtool object file (analogous to a ".la" file),
# but don't create it if we're doing a dry run.
func_write_libtool_object ()
{
write_libobj=${1}
if test "$build_libtool_libs" = yes; then
write_lobj=\'${2}\'
else
write_lobj=none
fi
if test "$build_old_libs" = yes; then
write_oldobj=\'${3}\'
else
write_oldobj=none
fi
$opt_dry_run || {
cat >${write_libobj}T <<EOF
# $write_libobj - a libtool object file
# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# Name of the PIC object.
pic_object=$write_lobj
# Name of the non-PIC object
non_pic_object=$write_oldobj
EOF
$MV "${write_libobj}T" "${write_libobj}"
}
}
# func_mode_compile arg...
func_mode_compile ()
{
$opt_debug
# Get the compilation command and the source file.
base_compile=
srcfile="$nonopt" # always keep a non-empty value in "srcfile"
suppress_opt=yes
suppress_output=
arg_mode=normal
libobj=
later=
pie_flag=
for arg
do
case $arg_mode in
arg )
# do not "continue". Instead, add this to base_compile
lastarg="$arg"
arg_mode=normal
;;
target )
libobj="$arg"
arg_mode=normal
continue
;;
normal )
# Accept any command-line options.
case $arg in
-o)
test -n "$libobj" && \
func_fatal_error "you cannot specify \`-o' more than once"
arg_mode=target
continue
;;
-pie | -fpie | -fPIE)
pie_flag="$pie_flag $arg"
continue
;;
-shared | -static | -prefer-pic | -prefer-non-pic)
later="$later $arg"
continue
;;
-no-suppress)
suppress_opt=no
continue
;;
-Xcompiler)
arg_mode=arg # the next one goes into the "base_compile" arg list
continue # The current "srcfile" will either be retained or
;; # replaced later. I would guess that would be a bug.
-Wc,*)
func_stripname '-Wc,' '' "$arg"
args=$func_stripname_result
lastarg=
save_ifs="$IFS"; IFS=','
for arg in $args; do
IFS="$save_ifs"
func_quote_for_eval "$arg"
lastarg="$lastarg $func_quote_for_eval_result"
done
IFS="$save_ifs"
func_stripname ' ' '' "$lastarg"
lastarg=$func_stripname_result
# Add the arguments to base_compile.
base_compile="$base_compile $lastarg"
continue
;;
*)
# Accept the current argument as the source file.
# The previous "srcfile" becomes the current argument.
#
lastarg="$srcfile"
srcfile="$arg"
;;
esac # case $arg
;;
esac # case $arg_mode
# Aesthetically quote the previous argument.
func_quote_for_eval "$lastarg"
base_compile="$base_compile $func_quote_for_eval_result"
done # for arg
case $arg_mode in
arg)
func_fatal_error "you must specify an argument for -Xcompile"
;;
target)
func_fatal_error "you must specify a target with \`-o'"
;;
*)
# Get the name of the library object.
test -z "$libobj" && {
func_basename "$srcfile"
libobj="$func_basename_result"
}
;;
esac
# Recognize several different file suffixes.
# If the user specifies -o file.o, it is replaced with file.lo
case $libobj in
*.[cCFSifmso] | \
*.ada | *.adb | *.ads | *.asm | \
*.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \
*.[fF][09]? | *.for | *.java | *.obj | *.sx)
func_xform "$libobj"
libobj=$func_xform_result
;;
esac
case $libobj in
*.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;;
*)
func_fatal_error "cannot determine name of library object from \`$libobj'"
;;
esac
func_infer_tag $base_compile
for arg in $later; do
case $arg in
-shared)
test "$build_libtool_libs" != yes && \
func_fatal_configuration "can not build a shared library"
build_old_libs=no
continue
;;
-static)
build_libtool_libs=no
build_old_libs=yes
continue
;;
-prefer-pic)
pic_mode=yes
continue
;;
-prefer-non-pic)
pic_mode=no
continue
;;
esac
done
func_quote_for_eval "$libobj"
test "X$libobj" != "X$func_quote_for_eval_result" \
&& $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \
&& func_warning "libobj name \`$libobj' may not contain shell special characters."
func_dirname_and_basename "$obj" "/" ""
objname="$func_basename_result"
xdir="$func_dirname_result"
lobj=${xdir}$objdir/$objname
test -z "$base_compile" && \
func_fatal_help "you must specify a compilation command"
# Delete any leftover library objects.
if test "$build_old_libs" = yes; then
removelist="$obj $lobj $libobj ${libobj}T"
else
removelist="$lobj $libobj ${libobj}T"
fi
# On Cygwin there's no "real" PIC flag so we must build both object types
case $host_os in
cygwin* | mingw* | pw32* | os2*)
pic_mode=default
;;
esac
if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then
# non-PIC code in shared libraries is not supported
pic_mode=default
fi
# Calculate the filename of the output object if compiler does
# not support -o with -c
if test "$compiler_c_o" = no; then
output_obj=`$ECHO "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext}
lockfile="$output_obj.lock"
else
output_obj=
need_locks=no
lockfile=
fi
# Lock this critical section if it is needed
# We use this script file to make the link, it avoids creating a new file
if test "$need_locks" = yes; then
until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do
func_echo "Waiting for $lockfile to be removed"
sleep 2
done
elif test "$need_locks" = warn; then
if test -f "$lockfile"; then
$ECHO "\
*** ERROR, $lockfile exists and contains:
`cat $lockfile 2>/dev/null`
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$opt_dry_run || $RM $removelist
exit $EXIT_FAILURE
fi
removelist="$removelist $output_obj"
$ECHO "$srcfile" > "$lockfile"
fi
$opt_dry_run || $RM $removelist
removelist="$removelist $lockfile"
trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15
if test -n "$fix_srcfile_path"; then
eval srcfile=\"$fix_srcfile_path\"
fi
func_quote_for_eval "$srcfile"
qsrcfile=$func_quote_for_eval_result
# Only build a PIC object if we are building libtool libraries.
if test "$build_libtool_libs" = yes; then
# Without this assignment, base_compile gets emptied.
fbsd_hideous_sh_bug=$base_compile
if test "$pic_mode" != no; then
command="$base_compile $qsrcfile $pic_flag"
else
# Don't build PIC code
command="$base_compile $qsrcfile"
fi
func_mkdir_p "$xdir$objdir"
if test -z "$output_obj"; then
# Place PIC objects in $objdir
command="$command -o $lobj"
fi
func_show_eval_locale "$command" \
'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE'
if test "$need_locks" = warn &&
test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
$ECHO "\
*** ERROR, $lockfile contains:
`cat $lockfile 2>/dev/null`
but it should contain:
$srcfile
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$opt_dry_run || $RM $removelist
exit $EXIT_FAILURE
fi
# Just move the object if needed, then go on to compile the next one
if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then
func_show_eval '$MV "$output_obj" "$lobj"' \
'error=$?; $opt_dry_run || $RM $removelist; exit $error'
fi
# Allow error messages only from the first compilation.
if test "$suppress_opt" = yes; then
suppress_output=' >/dev/null 2>&1'
fi
fi
# Only build a position-dependent object if we build old libraries.
if test "$build_old_libs" = yes; then
if test "$pic_mode" != yes; then
# Don't build PIC code
command="$base_compile $qsrcfile$pie_flag"
else
command="$base_compile $qsrcfile $pic_flag"
fi
if test "$compiler_c_o" = yes; then
command="$command -o $obj"
fi
# Suppress compiler output if we already did a PIC compilation.
command="$command$suppress_output"
func_show_eval_locale "$command" \
'$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE'
if test "$need_locks" = warn &&
test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
$ECHO "\
*** ERROR, $lockfile contains:
`cat $lockfile 2>/dev/null`
but it should contain:
$srcfile
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$opt_dry_run || $RM $removelist
exit $EXIT_FAILURE
fi
# Just move the object if needed
if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then
func_show_eval '$MV "$output_obj" "$obj"' \
'error=$?; $opt_dry_run || $RM $removelist; exit $error'
fi
fi
$opt_dry_run || {
func_write_libtool_object "$libobj" "$objdir/$objname" "$objname"
# Unlock the critical section if it was locked
if test "$need_locks" != no; then
removelist=$lockfile
$RM "$lockfile"
fi
}
exit $EXIT_SUCCESS
}
$opt_help || {
test "$mode" = compile && func_mode_compile ${1+"$@"}
}
func_mode_help ()
{
# We need to display help for each of the modes.
case $mode in
"")
# Generic help is extracted from the usage comments
# at the start of this file.
func_help
;;
clean)
$ECHO \
"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
Remove files from the build directory.
RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
to RM.
If FILE is a libtool library, object or program, all the files associated
with it are deleted. Otherwise, only FILE itself is deleted using RM."
;;
compile)
$ECHO \
"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
Compile a source file into a libtool library object.
This mode accepts the following additional options:
-o OUTPUT-FILE set the output file name to OUTPUT-FILE
-no-suppress do not suppress compiler output for multiple passes
-prefer-pic try to building PIC objects only
-prefer-non-pic try to building non-PIC objects only
-shared do not build a \`.o' file suitable for static linking
-static only build a \`.o' file suitable for static linking
COMPILE-COMMAND is a command to be used in creating a \`standard' object file
from the given SOURCEFILE.
The output file name is determined by removing the directory component from
SOURCEFILE, then substituting the C source code suffix \`.c' with the
library object suffix, \`.lo'."
;;
execute)
$ECHO \
"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]...
Automatically set library path, then run a program.
This mode accepts the following additional options:
-dlopen FILE add the directory containing FILE to the library path
This mode sets the library path environment variable according to \`-dlopen'
flags.
If any of the ARGS are libtool executable wrappers, then they are translated
into their corresponding uninstalled binary, and any of their required library
directories are added to the library path.
Then, COMMAND is executed, with ARGS as arguments."
;;
finish)
$ECHO \
"Usage: $progname [OPTION]... --mode=finish [LIBDIR]...
Complete the installation of libtool libraries.
Each LIBDIR is a directory that contains libtool libraries.
The commands that this mode executes may require superuser privileges. Use
the \`--dry-run' option if you just want to see what would be executed."
;;
install)
$ECHO \
"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND...
Install executables or libraries.
INSTALL-COMMAND is the installation command. The first component should be
either the \`install' or \`cp' program.
The following components of INSTALL-COMMAND are treated specially:
-inst-prefix PREFIX-DIR Use PREFIX-DIR as a staging area for installation
The rest of the components are interpreted as arguments to that command (only
BSD-compatible install options are recognized)."
;;
link)
$ECHO \
"Usage: $progname [OPTION]... --mode=link LINK-COMMAND...
Link object files or libraries together to form another library, or to
create an executable program.
LINK-COMMAND is a command using the C compiler that you would use to create
a program from several object files.
The following components of LINK-COMMAND are treated specially:
-all-static do not do any dynamic linking at all
-avoid-version do not add a version suffix if possible
-dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime
-dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols
-export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
-export-symbols SYMFILE
try to export only the symbols listed in SYMFILE
-export-symbols-regex REGEX
try to export only the symbols matching REGEX
-LLIBDIR search LIBDIR for required installed libraries
-lNAME OUTPUT-FILE requires the installed library libNAME
-module build a library that can dlopened
-no-fast-install disable the fast-install mode
-no-install link a not-installable executable
-no-undefined declare that a library does not refer to external symbols
-o OUTPUT-FILE create OUTPUT-FILE from the specified objects
-objectlist FILE Use a list of object files found in FILE to specify objects
-precious-files-regex REGEX
don't remove output files matching REGEX
-release RELEASE specify package release information
-rpath LIBDIR the created library will eventually be installed in LIBDIR
-R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries
-shared only do dynamic linking of libtool libraries
-shrext SUFFIX override the standard shared library file extension
-static do not do any dynamic linking of uninstalled libtool libraries
-static-libtool-libs
do not do any dynamic linking of libtool libraries
-version-info CURRENT[:REVISION[:AGE]]
specify library version info [each variable defaults to 0]
-weak LIBNAME declare that the target provides the LIBNAME interface
All other options (arguments beginning with \`-') are ignored.
Every other argument is treated as a filename. Files ending in \`.la' are
treated as uninstalled libtool libraries, other files are standard or library
object files.
If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
only library objects (\`.lo' files) may be specified, and \`-rpath' is
required, except when creating a convenience library.
If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
using \`ar' and \`ranlib', or on Windows using \`lib'.
If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
is created, otherwise an executable program is created."
;;
uninstall)
$ECHO \
"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
Remove libraries from an installation directory.
RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
to RM.
If FILE is a libtool library, all the files associated with it are deleted.
Otherwise, only FILE itself is deleted using RM."
;;
*)
func_fatal_help "invalid operation mode \`$mode'"
;;
esac
$ECHO
$ECHO "Try \`$progname --help' for more information about other modes."
exit $?
}
# Now that we've collected a possible --mode arg, show help if necessary
$opt_help && func_mode_help
# func_mode_execute arg...
func_mode_execute ()
{
$opt_debug
# The first argument is the command name.
cmd="$nonopt"
test -z "$cmd" && \
func_fatal_help "you must specify a COMMAND"
# Handle -dlopen flags immediately.
for file in $execute_dlfiles; do
test -f "$file" \
|| func_fatal_help "\`$file' is not a file"
dir=
case $file in
*.la)
# Check to see that this really is a libtool archive.
func_lalib_unsafe_p "$file" \
|| func_fatal_help "\`$lib' is not a valid libtool archive"
# Read the libtool library.
dlname=
library_names=
func_source "$file"
# Skip this library if it cannot be dlopened.
if test -z "$dlname"; then
# Warn if it was a shared library.
test -n "$library_names" && \
func_warning "\`$file' was not linked with \`-export-dynamic'"
continue
fi
func_dirname "$file" "" "."
dir="$func_dirname_result"
if test -f "$dir/$objdir/$dlname"; then
dir="$dir/$objdir"
else
if test ! -f "$dir/$dlname"; then
func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'"
fi
fi
;;
*.lo)
# Just add the directory containing the .lo file.
func_dirname "$file" "" "."
dir="$func_dirname_result"
;;
*)
func_warning "\`-dlopen' is ignored for non-libtool libraries and objects"
continue
;;
esac
# Get the absolute pathname.
absdir=`cd "$dir" && pwd`
test -n "$absdir" && dir="$absdir"
# Now add the directory to shlibpath_var.
if eval "test -z \"\$$shlibpath_var\""; then
eval "$shlibpath_var=\"\$dir\""
else
eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
fi
done
# This variable tells wrapper scripts just to set shlibpath_var
# rather than running their programs.
libtool_execute_magic="$magic"
# Check if any of the arguments is a wrapper script.
args=
for file
do
case $file in
-*) ;;
*)
# Do a test to see if this is really a libtool program.
if func_ltwrapper_script_p "$file"; then
func_source "$file"
# Transform arg to wrapped name.
file="$progdir/$program"
elif func_ltwrapper_executable_p "$file"; then
func_ltwrapper_scriptname "$file"
func_source "$func_ltwrapper_scriptname_result"
# Transform arg to wrapped name.
file="$progdir/$program"
fi
;;
esac
# Quote arguments (to preserve shell metacharacters).
func_quote_for_eval "$file"
args="$args $func_quote_for_eval_result"
done
if test "X$opt_dry_run" = Xfalse; then
if test -n "$shlibpath_var"; then
# Export the shlibpath_var.
eval "export $shlibpath_var"
fi
# Restore saved environment variables
for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
do
eval "if test \"\${save_$lt_var+set}\" = set; then
$lt_var=\$save_$lt_var; export $lt_var
else
$lt_unset $lt_var
fi"
done
# Now prepare to actually exec the command.
exec_cmd="\$cmd$args"
else
# Display what would be done.
if test -n "$shlibpath_var"; then
eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\""
$ECHO "export $shlibpath_var"
fi
$ECHO "$cmd$args"
exit $EXIT_SUCCESS
fi
}
test "$mode" = execute && func_mode_execute ${1+"$@"}
# func_mode_finish arg...
func_mode_finish ()
{
$opt_debug
libdirs="$nonopt"
admincmds=
if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
for dir
do
libdirs="$libdirs $dir"
done
for libdir in $libdirs; do
if test -n "$finish_cmds"; then
# Do each command in the finish commands.
func_execute_cmds "$finish_cmds" 'admincmds="$admincmds
'"$cmd"'"'
fi
if test -n "$finish_eval"; then
# Do the single finish_eval.
eval cmds=\"$finish_eval\"
$opt_dry_run || eval "$cmds" || admincmds="$admincmds
$cmds"
fi
done
fi
# Exit here if they wanted silent mode.
$opt_silent && exit $EXIT_SUCCESS
$ECHO "X----------------------------------------------------------------------" | $Xsed
$ECHO "Libraries have been installed in:"
for libdir in $libdirs; do
$ECHO " $libdir"
done
$ECHO
$ECHO "If you ever happen to want to link against installed libraries"
$ECHO "in a given directory, LIBDIR, you must either use libtool, and"
$ECHO "specify the full pathname of the library, or use the \`-LLIBDIR'"
$ECHO "flag during linking and do at least one of the following:"
if test -n "$shlibpath_var"; then
$ECHO " - add LIBDIR to the \`$shlibpath_var' environment variable"
$ECHO " during execution"
fi
if test -n "$runpath_var"; then
$ECHO " - add LIBDIR to the \`$runpath_var' environment variable"
$ECHO " during linking"
fi
if test -n "$hardcode_libdir_flag_spec"; then
libdir=LIBDIR
eval flag=\"$hardcode_libdir_flag_spec\"
$ECHO " - use the \`$flag' linker flag"
fi
if test -n "$admincmds"; then
$ECHO " - have your system administrator run these commands:$admincmds"
fi
if test -f /etc/ld.so.conf; then
$ECHO " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
fi
$ECHO
$ECHO "See any operating system documentation about shared libraries for"
case $host in
solaris2.[6789]|solaris2.1[0-9])
$ECHO "more information, such as the ld(1), crle(1) and ld.so(8) manual"
$ECHO "pages."
;;
*)
$ECHO "more information, such as the ld(1) and ld.so(8) manual pages."
;;
esac
$ECHO "X----------------------------------------------------------------------" | $Xsed
exit $EXIT_SUCCESS
}
test "$mode" = finish && func_mode_finish ${1+"$@"}
# func_mode_install arg...
func_mode_install ()
{
$opt_debug
# There may be an optional sh(1) argument at the beginning of
# install_prog (especially on Windows NT).
if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
# Allow the use of GNU shtool's install command.
$ECHO "X$nonopt" | $GREP shtool >/dev/null; then
# Aesthetically quote it.
func_quote_for_eval "$nonopt"
install_prog="$func_quote_for_eval_result "
arg=$1
shift
else
install_prog=
arg=$nonopt
fi
# The real first argument should be the name of the installation program.
# Aesthetically quote it.
func_quote_for_eval "$arg"
install_prog="$install_prog$func_quote_for_eval_result"
# We need to accept at least all the BSD install flags.
dest=
files=
opts=
prev=
install_type=
isdir=no
stripme=
for arg
do
if test -n "$dest"; then
files="$files $dest"
dest=$arg
continue
fi
case $arg in
-d) isdir=yes ;;
-f)
case " $install_prog " in
*[\\\ /]cp\ *) ;;
*) prev=$arg ;;
esac
;;
-g | -m | -o)
prev=$arg
;;
-s)
stripme=" -s"
continue
;;
-*)
;;
*)
# If the previous option needed an argument, then skip it.
if test -n "$prev"; then
prev=
else
dest=$arg
continue
fi
;;
esac
# Aesthetically quote the argument.
func_quote_for_eval "$arg"
install_prog="$install_prog $func_quote_for_eval_result"
done
test -z "$install_prog" && \
func_fatal_help "you must specify an install program"
test -n "$prev" && \
func_fatal_help "the \`$prev' option requires an argument"
if test -z "$files"; then
if test -z "$dest"; then
func_fatal_help "no file or destination specified"
else
func_fatal_help "you must specify a destination"
fi
fi
# Strip any trailing slash from the destination.
func_stripname '' '/' "$dest"
dest=$func_stripname_result
# Check to see that the destination is a directory.
test -d "$dest" && isdir=yes
if test "$isdir" = yes; then
destdir="$dest"
destname=
else
func_dirname_and_basename "$dest" "" "."
destdir="$func_dirname_result"
destname="$func_basename_result"
# Not a directory, so check to see that there is only one file specified.
set dummy $files; shift
test "$#" -gt 1 && \
func_fatal_help "\`$dest' is not a directory"
fi
case $destdir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
for file in $files; do
case $file in
*.lo) ;;
*)
func_fatal_help "\`$destdir' must be an absolute directory name"
;;
esac
done
;;
esac
# This variable tells wrapper scripts just to set variables rather
# than running their programs.
libtool_install_magic="$magic"
staticlibs=
future_libdirs=
current_libdirs=
for file in $files; do
# Do each installation.
case $file in
*.$libext)
# Do the static libraries later.
staticlibs="$staticlibs $file"
;;
*.la)
# Check to see that this really is a libtool archive.
func_lalib_unsafe_p "$file" \
|| func_fatal_help "\`$file' is not a valid libtool archive"
library_names=
old_library=
relink_command=
func_source "$file"
# Add the libdir to current_libdirs if it is the destination.
if test "X$destdir" = "X$libdir"; then
case "$current_libdirs " in
*" $libdir "*) ;;
*) current_libdirs="$current_libdirs $libdir" ;;
esac
else
# Note the libdir as a future libdir.
case "$future_libdirs " in
*" $libdir "*) ;;
*) future_libdirs="$future_libdirs $libdir" ;;
esac
fi
func_dirname "$file" "/" ""
dir="$func_dirname_result"
dir="$dir$objdir"
if test -n "$relink_command"; then
# Determine the prefix the user has applied to our future dir.
inst_prefix_dir=`$ECHO "X$destdir" | $Xsed -e "s%$libdir\$%%"`
# Don't allow the user to place us outside of our expected
# location b/c this prevents finding dependent libraries that
# are installed to the same prefix.
# At present, this check doesn't affect windows .dll's that
# are installed into $libdir/../bin (currently, that works fine)
# but it's something to keep an eye on.
test "$inst_prefix_dir" = "$destdir" && \
func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir"
if test -n "$inst_prefix_dir"; then
# Stick the inst_prefix_dir data into the link command.
relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"`
else
relink_command=`$ECHO "X$relink_command" | $Xsed -e "s%@inst_prefix_dir@%%"`
fi
func_warning "relinking \`$file'"
func_show_eval "$relink_command" \
'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"'
fi
# See the names of the shared library.
set dummy $library_names; shift
if test -n "$1"; then
realname="$1"
shift
srcname="$realname"
test -n "$relink_command" && srcname="$realname"T
# Install the shared library and build the symlinks.
func_show_eval "$install_prog $dir/$srcname $destdir/$realname" \
'exit $?'
tstripme="$stripme"
case $host_os in
cygwin* | mingw* | pw32*)
case $realname in
*.dll.a)
tstripme=""
;;
esac
;;
esac
if test -n "$tstripme" && test -n "$striplib"; then
func_show_eval "$striplib $destdir/$realname" 'exit $?'
fi
if test "$#" -gt 0; then
# Delete the old symlinks, and create new ones.
# Try `ln -sf' first, because the `ln' binary might depend on
# the symlink we replace! Solaris /bin/ln does not understand -f,
# so we also need to try rm && ln -s.
for linkname
do
test "$linkname" != "$realname" \
&& func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })"
done
fi
# Do each command in the postinstall commands.
lib="$destdir/$realname"
func_execute_cmds "$postinstall_cmds" 'exit $?'
fi
# Install the pseudo-library for information purposes.
func_basename "$file"
name="$func_basename_result"
instname="$dir/$name"i
func_show_eval "$install_prog $instname $destdir/$name" 'exit $?'
# Maybe install the static library, too.
test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library"
;;
*.lo)
# Install (i.e. copy) a libtool object.
# Figure out destination file name, if it wasn't already specified.
if test -n "$destname"; then
destfile="$destdir/$destname"
else
func_basename "$file"
destfile="$func_basename_result"
destfile="$destdir/$destfile"
fi
# Deduce the name of the destination old-style object file.
case $destfile in
*.lo)
func_lo2o "$destfile"
staticdest=$func_lo2o_result
;;
*.$objext)
staticdest="$destfile"
destfile=
;;
*)
func_fatal_help "cannot copy a libtool object to \`$destfile'"
;;
esac
# Install the libtool object if requested.
test -n "$destfile" && \
func_show_eval "$install_prog $file $destfile" 'exit $?'
# Install the old object if enabled.
if test "$build_old_libs" = yes; then
# Deduce the name of the old-style object file.
func_lo2o "$file"
staticobj=$func_lo2o_result
func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?'
fi
exit $EXIT_SUCCESS
;;
*)
# Figure out destination file name, if it wasn't already specified.
if test -n "$destname"; then
destfile="$destdir/$destname"
else
func_basename "$file"
destfile="$func_basename_result"
destfile="$destdir/$destfile"
fi
# If the file is missing, and there is a .exe on the end, strip it
# because it is most likely a libtool script we actually want to
# install
stripped_ext=""
case $file in
*.exe)
if test ! -f "$file"; then
func_stripname '' '.exe' "$file"
file=$func_stripname_result
stripped_ext=".exe"
fi
;;
esac
# Do a test to see if this is really a libtool program.
case $host in
*cygwin*|*mingw*)
if func_ltwrapper_executable_p "$file"; then
func_ltwrapper_scriptname "$file"
wrapper=$func_ltwrapper_scriptname_result
else
func_stripname '' '.exe' "$file"
wrapper=$func_stripname_result
fi
;;
*)
wrapper=$file
;;
esac
if func_ltwrapper_script_p "$wrapper"; then
notinst_deplibs=
relink_command=
func_source "$wrapper"
# Check the variables that should have been set.
test -z "$generated_by_libtool_version" && \
func_fatal_error "invalid libtool wrapper script \`$wrapper'"
finalize=yes
for lib in $notinst_deplibs; do
# Check to see that each library is installed.
libdir=
if test -f "$lib"; then
func_source "$lib"
fi
libfile="$libdir/"`$ECHO "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test
if test -n "$libdir" && test ! -f "$libfile"; then
func_warning "\`$lib' has not been installed in \`$libdir'"
finalize=no
fi
done
relink_command=
func_source "$wrapper"
outputname=
if test "$fast_install" = no && test -n "$relink_command"; then
$opt_dry_run || {
if test "$finalize" = yes; then
tmpdir=`func_mktempdir`
func_basename "$file$stripped_ext"
file="$func_basename_result"
outputname="$tmpdir/$file"
# Replace the output file specification.
relink_command=`$ECHO "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'`
$opt_silent || {
func_quote_for_expand "$relink_command"
eval "func_echo $func_quote_for_expand_result"
}
if eval "$relink_command"; then :
else
func_error "error: relink \`$file' with the above command before installing it"
$opt_dry_run || ${RM}r "$tmpdir"
continue
fi
file="$outputname"
else
func_warning "cannot relink \`$file'"
fi
}
else
# Install the binary that we compiled earlier.
file=`$ECHO "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"`
fi
fi
# remove .exe since cygwin /usr/bin/install will append another
# one anyway
case $install_prog,$host in
*/usr/bin/install*,*cygwin*)
case $file:$destfile in
*.exe:*.exe)
# this is ok
;;
*.exe:*)
destfile=$destfile.exe
;;
*:*.exe)
func_stripname '' '.exe' "$destfile"
destfile=$func_stripname_result
;;
esac
;;
esac
func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?'
$opt_dry_run || if test -n "$outputname"; then
${RM}r "$tmpdir"
fi
;;
esac
done
for file in $staticlibs; do
func_basename "$file"
name="$func_basename_result"
# Set up the ranlib parameters.
oldlib="$destdir/$name"
func_show_eval "$install_prog \$file \$oldlib" 'exit $?'
if test -n "$stripme" && test -n "$old_striplib"; then
func_show_eval "$old_striplib $oldlib" 'exit $?'
fi
# Do each command in the postinstall commands.
func_execute_cmds "$old_postinstall_cmds" 'exit $?'
done
test -n "$future_libdirs" && \
func_warning "remember to run \`$progname --finish$future_libdirs'"
if test -n "$current_libdirs"; then
# Maybe just do a dry run.
$opt_dry_run && current_libdirs=" -n$current_libdirs"
exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs'
else
exit $EXIT_SUCCESS
fi
}
test "$mode" = install && func_mode_install ${1+"$@"}
# func_generate_dlsyms outputname originator pic_p
# Extract symbols from dlprefiles and create ${outputname}S.o with
# a dlpreopen symbol table.
func_generate_dlsyms ()
{
$opt_debug
my_outputname="$1"
my_originator="$2"
my_pic_p="${3-no}"
my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'`
my_dlsyms=
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
if test -n "$NM" && test -n "$global_symbol_pipe"; then
my_dlsyms="${my_outputname}S.c"
else
func_error "not configured to extract global symbols from dlpreopened files"
fi
fi
if test -n "$my_dlsyms"; then
case $my_dlsyms in
"") ;;
*.c)
# Discover the nlist of each of the dlfiles.
nlist="$output_objdir/${my_outputname}.nm"
func_show_eval "$RM $nlist ${nlist}S ${nlist}T"
# Parse the name list into a source file.
func_verbose "creating $output_objdir/$my_dlsyms"
$opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\
/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */
/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */
#ifdef __cplusplus
extern \"C\" {
#endif
/* External symbol declarations for the compiler. */\
"
if test "$dlself" = yes; then
func_verbose "generating symbol list for \`$output'"
$opt_dry_run || echo ': @PROGRAM@ ' > "$nlist"
# Add our own program objects to the symbol list.
progfiles=`$ECHO "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
for progfile in $progfiles; do
func_verbose "extracting global C symbols from \`$progfile'"
$opt_dry_run || eval "$NM $progfile | $global_symbol_pipe >> '$nlist'"
done
if test -n "$exclude_expsyms"; then
$opt_dry_run || {
eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
eval '$MV "$nlist"T "$nlist"'
}
fi
if test -n "$export_symbols_regex"; then
$opt_dry_run || {
eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T'
eval '$MV "$nlist"T "$nlist"'
}
fi
# Prepare the list of exported symbols
if test -z "$export_symbols"; then
export_symbols="$output_objdir/$outputname.exp"
$opt_dry_run || {
$RM $export_symbols
eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
case $host in
*cygwin* | *mingw* )
eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"'
;;
esac
}
else
$opt_dry_run || {
eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"'
eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T'
eval '$MV "$nlist"T "$nlist"'
case $host in
*cygwin | *mingw* )
eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
eval 'cat "$nlist" >> "$output_objdir/$outputname.def"'
;;
esac
}
fi
fi
for dlprefile in $dlprefiles; do
func_verbose "extracting global C symbols from \`$dlprefile'"
func_basename "$dlprefile"
name="$func_basename_result"
$opt_dry_run || {
eval '$ECHO ": $name " >> "$nlist"'
eval "$NM $dlprefile 2>/dev/null | $global_symbol_pipe >> '$nlist'"
}
done
$opt_dry_run || {
# Make sure we have at least an empty file.
test -f "$nlist" || : > "$nlist"
if test -n "$exclude_expsyms"; then
$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
$MV "$nlist"T "$nlist"
fi
# Try sorting and uniquifying the output.
if $GREP -v "^: " < "$nlist" |
if sort -k 3 </dev/null >/dev/null 2>&1; then
sort -k 3
else
sort +2
fi |
uniq > "$nlist"S; then
:
else
$GREP -v "^: " < "$nlist" > "$nlist"S
fi
if test -f "$nlist"S; then
eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"'
else
$ECHO '/* NONE */' >> "$output_objdir/$my_dlsyms"
fi
$ECHO >> "$output_objdir/$my_dlsyms" "\
/* The mapping between symbol names and symbols. */
typedef struct {
const char *name;
void *address;
} lt_dlsymlist;
"
case $host in
*cygwin* | *mingw* )
$ECHO >> "$output_objdir/$my_dlsyms" "\
/* DATA imports from DLLs on WIN32 con't be const, because
runtime relocations are performed -- see ld's documentation
on pseudo-relocs. */"
lt_dlsym_const= ;;
*osf5*)
echo >> "$output_objdir/$my_dlsyms" "\
/* This system does not cope well with relocations in const data */"
lt_dlsym_const= ;;
*)
lt_dlsym_const=const ;;
esac
$ECHO >> "$output_objdir/$my_dlsyms" "\
extern $lt_dlsym_const lt_dlsymlist
lt_${my_prefix}_LTX_preloaded_symbols[];
$lt_dlsym_const lt_dlsymlist
lt_${my_prefix}_LTX_preloaded_symbols[] =
{\
{ \"$my_originator\", (void *) 0 },"
case $need_lib_prefix in
no)
eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms"
;;
*)
eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms"
;;
esac
$ECHO >> "$output_objdir/$my_dlsyms" "\
{0, (void *) 0}
};
/* This works around a problem in FreeBSD linker */
#ifdef FREEBSD_WORKAROUND
static const void *lt_preloaded_setup() {
return lt_${my_prefix}_LTX_preloaded_symbols;
}
#endif
#ifdef __cplusplus
}
#endif\
"
} # !$opt_dry_run
pic_flag_for_symtable=
case "$compile_command " in
*" -static "*) ;;
*)
case $host in
# compiling the symbol table file with pic_flag works around
# a FreeBSD bug that causes programs to crash when -lm is
# linked before any other PIC object. But we must not use
# pic_flag when linking with -static. The problem exists in
# FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
*-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;;
*-*-hpux*)
pic_flag_for_symtable=" $pic_flag" ;;
*)
if test "X$my_pic_p" != Xno; then
pic_flag_for_symtable=" $pic_flag"
fi
;;
esac
;;
esac
symtab_cflags=
for arg in $LTCFLAGS; do
case $arg in
-pie | -fpie | -fPIE) ;;
*) symtab_cflags="$symtab_cflags $arg" ;;
esac
done
# Now compile the dynamic symbol file.
func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?'
# Clean up the generated files.
func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"'
# Transform the symbol file into the correct name.
symfileobj="$output_objdir/${my_outputname}S.$objext"
case $host in
*cygwin* | *mingw* )
if test -f "$output_objdir/$my_outputname.def"; then
compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
else
compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"`
finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"`
fi
;;
*)
compile_command=`$ECHO "X$compile_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"`
finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$symfileobj%"`
;;
esac
;;
*)
func_fatal_error "unknown suffix for \`$my_dlsyms'"
;;
esac
else
# We keep going just in case the user didn't refer to
# lt_preloaded_symbols. The linker will fail if global_symbol_pipe
# really was required.
# Nullify the symbol file.
compile_command=`$ECHO "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"`
finalize_command=`$ECHO "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"`
fi
}
# func_win32_libid arg
# return the library type of file 'arg'
#
# Need a lot of goo to handle *both* DLLs and import libs
# Has to be a shell function in order to 'eat' the argument
# that is supplied when $file_magic_command is called.
func_win32_libid ()
{
$opt_debug
win32_libid_type="unknown"
win32_fileres=`file -L $1 2>/dev/null`
case $win32_fileres in
*ar\ archive\ import\ library*) # definitely import
win32_libid_type="x86 archive import"
;;
*ar\ archive*) # could be an import, or static
if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null |
$EGREP 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then
win32_nmres=`eval $NM -f posix -A $1 |
$SED -n -e '
1,100{
/ I /{
s,.*,import,
p
q
}
}'`
case $win32_nmres in
import*) win32_libid_type="x86 archive import";;
*) win32_libid_type="x86 archive static";;
esac
fi
;;
*DLL*)
win32_libid_type="x86 DLL"
;;
*executable*) # but shell scripts are "executable" too...
case $win32_fileres in
*MS\ Windows\ PE\ Intel*)
win32_libid_type="x86 DLL"
;;
esac
;;
esac
$ECHO "$win32_libid_type"
}
# func_extract_an_archive dir oldlib
func_extract_an_archive ()
{
$opt_debug
f_ex_an_ar_dir="$1"; shift
f_ex_an_ar_oldlib="$1"
func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" 'exit $?'
if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then
:
else
func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib"
fi
}
# func_extract_archives gentop oldlib ...
func_extract_archives ()
{
$opt_debug
my_gentop="$1"; shift
my_oldlibs=${1+"$@"}
my_oldobjs=""
my_xlib=""
my_xabs=""
my_xdir=""
for my_xlib in $my_oldlibs; do
# Extract the objects.
case $my_xlib in
[\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;;
*) my_xabs=`pwd`"/$my_xlib" ;;
esac
func_basename "$my_xlib"
my_xlib="$func_basename_result"
my_xlib_u=$my_xlib
while :; do
case " $extracted_archives " in
*" $my_xlib_u "*)
func_arith $extracted_serial + 1
extracted_serial=$func_arith_result
my_xlib_u=lt$extracted_serial-$my_xlib ;;
*) break ;;
esac
done
extracted_archives="$extracted_archives $my_xlib_u"
my_xdir="$my_gentop/$my_xlib_u"
func_mkdir_p "$my_xdir"
case $host in
*-darwin*)
func_verbose "Extracting $my_xabs"
# Do not bother doing anything if just a dry run
$opt_dry_run || {
darwin_orig_dir=`pwd`
cd $my_xdir || exit $?
darwin_archive=$my_xabs
darwin_curdir=`pwd`
darwin_base_archive=`basename "$darwin_archive"`
darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true`
if test -n "$darwin_arches"; then
darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'`
darwin_arch=
func_verbose "$darwin_base_archive has multiple architectures $darwin_arches"
for darwin_arch in $darwin_arches ; do
func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}"
$LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}"
cd "unfat-$$/${darwin_base_archive}-${darwin_arch}"
func_extract_an_archive "`pwd`" "${darwin_base_archive}"
cd "$darwin_curdir"
$RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}"
done # $darwin_arches
## Okay now we've a bunch of thin objects, gotta fatten them up :)
darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u`
darwin_file=
darwin_files=
for darwin_file in $darwin_filelist; do
darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP`
$LIPO -create -output "$darwin_file" $darwin_files
done # $darwin_filelist
$RM -rf unfat-$$
cd "$darwin_orig_dir"
else
cd $darwin_orig_dir
func_extract_an_archive "$my_xdir" "$my_xabs"
fi # $darwin_arches
} # !$opt_dry_run
;;
*)
func_extract_an_archive "$my_xdir" "$my_xabs"
;;
esac
my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP`
done
func_extract_archives_result="$my_oldobjs"
}
# func_emit_wrapper arg
#
# emit a libtool wrapper script on stdout
# don't directly open a file because we may want to
# incorporate the script contents within a cygwin/mingw
# wrapper executable. Must ONLY be called from within
# func_mode_link because it depends on a number of variable
# set therein.
#
# arg is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR
# variable will take. If 'yes', then the emitted script
# will assume that the directory in which it is stored is
# the '.lib' directory. This is a cygwin/mingw-specific
# behavior.
func_emit_wrapper ()
{
func_emit_wrapper_arg1=no
if test -n "$1" ; then
func_emit_wrapper_arg1=$1
fi
$ECHO "\
#! $SHELL
# $output - temporary wrapper script for $objdir/$outputname
# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
#
# The $output program cannot be directly executed until all the libtool
# libraries that it depends on are installed.
#
# This wrapper script should never be moved out of the build directory.
# If it is, it will not operate correctly.
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
Xsed='${SED} -e 1s/^X//'
sed_quote_subst='$sed_quote_subst'
# Be Bourne compatible
if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then
emulate sh
NULLCMD=:
# Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which
# is contrary to our usage. Disable this feature.
alias -g '\${1+\"\$@\"}'='\"\$@\"'
setopt NO_GLOB_SUBST
else
case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac
fi
BIN_SH=xpg4; export BIN_SH # for Tru64
DUALCASE=1; export DUALCASE # for MKS sh
# The HP-UX ksh and POSIX shell print the target directory to stdout
# if CDPATH is set.
(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
relink_command=\"$relink_command\"
# This environment variable determines our operation mode.
if test \"\$libtool_install_magic\" = \"$magic\"; then
# install mode needs the following variables:
generated_by_libtool_version='$macro_version'
notinst_deplibs='$notinst_deplibs'
else
# When we are sourced in execute mode, \$file and \$ECHO are already set.
if test \"\$libtool_execute_magic\" != \"$magic\"; then
ECHO=\"$qecho\"
file=\"\$0\"
# Make sure echo works.
if test \"X\$1\" = X--no-reexec; then
# Discard the --no-reexec flag, and continue.
shift
elif test \"X\`{ \$ECHO '\t'; } 2>/dev/null\`\" = 'X\t'; then
# Yippee, \$ECHO works!
:
else
# Restart under the correct shell, and then maybe \$ECHO will work.
exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"}
fi
fi\
"
$ECHO "\
# Find the directory that this script lives in.
thisdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\`
test \"x\$thisdir\" = \"x\$file\" && thisdir=.
# Follow symbolic links until we get to the real thisdir.
file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\`
while test -n \"\$file\"; do
destdir=\`\$ECHO \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\`
# If there was a directory component, then change thisdir.
if test \"x\$destdir\" != \"x\$file\"; then
case \"\$destdir\" in
[\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
*) thisdir=\"\$thisdir/\$destdir\" ;;
esac
fi
file=\`\$ECHO \"X\$file\" | \$Xsed -e 's%^.*/%%'\`
file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\`
done
# Usually 'no', except on cygwin/mingw when embedded into
# the cwrapper.
WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1
if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then
# special case for '.'
if test \"\$thisdir\" = \".\"; then
thisdir=\`pwd\`
fi
# remove .libs from thisdir
case \"\$thisdir\" in
*[\\\\/]$objdir ) thisdir=\`\$ECHO \"X\$thisdir\" | \$Xsed -e 's%[\\\\/][^\\\\/]*$%%'\` ;;
$objdir ) thisdir=. ;;
esac
fi
# Try to get the absolute directory name.
absdir=\`cd \"\$thisdir\" && pwd\`
test -n \"\$absdir\" && thisdir=\"\$absdir\"
"
if test "$fast_install" = yes; then
$ECHO "\
program=lt-'$outputname'$exeext
progdir=\"\$thisdir/$objdir\"
if test ! -f \"\$progdir/\$program\" ||
{ file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\
test \"X\$file\" != \"X\$progdir/\$program\"; }; then
file=\"\$\$-\$program\"
if test ! -d \"\$progdir\"; then
$MKDIR \"\$progdir\"
else
$RM \"\$progdir/\$file\"
fi"
$ECHO "\
# relink executable if necessary
if test -n \"\$relink_command\"; then
if relink_command_output=\`eval \$relink_command 2>&1\`; then :
else
$ECHO \"\$relink_command_output\" >&2
$RM \"\$progdir/\$file\"
exit 1
fi
fi
$MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
{ $RM \"\$progdir/\$program\";
$MV \"\$progdir/\$file\" \"\$progdir/\$program\"; }
$RM \"\$progdir/\$file\"
fi"
else
$ECHO "\
program='$outputname'
progdir=\"\$thisdir/$objdir\"
"
fi
$ECHO "\
if test -f \"\$progdir/\$program\"; then"
# Export our shlibpath_var if we have one.
if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
$ECHO "\
# Add our own library path to $shlibpath_var
$shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
# Some systems cannot cope with colon-terminated $shlibpath_var
# The second colon is a workaround for a bug in BeOS R4 sed
$shlibpath_var=\`\$ECHO \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\`
export $shlibpath_var
"
fi
# fixup the dll searchpath if we need to.
if test -n "$dllsearchpath"; then
$ECHO "\
# Add the dll search path components to the executable PATH
PATH=$dllsearchpath:\$PATH
"
fi
$ECHO "\
if test \"\$libtool_execute_magic\" != \"$magic\"; then
# Run the actual program with our arguments.
"
case $host in
# Backslashes separate directories on plain windows
*-*-mingw | *-*-os2*)
$ECHO "\
exec \"\$progdir\\\\\$program\" \${1+\"\$@\"}
"
;;
*)
$ECHO "\
exec \"\$progdir/\$program\" \${1+\"\$@\"}
"
;;
esac
$ECHO "\
\$ECHO \"\$0: cannot exec \$program \$*\" 1>&2
exit 1
fi
else
# The program doesn't exist.
\$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2
\$ECHO \"This script is just a wrapper for \$program.\" 1>&2
$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2
exit 1
fi
fi\
"
}
# end: func_emit_wrapper
# func_emit_cwrapperexe_src
# emit the source code for a wrapper executable on stdout
# Must ONLY be called from within func_mode_link because
# it depends on a number of variable set therein.
func_emit_cwrapperexe_src ()
{
cat <<EOF
/* $cwrappersource - temporary wrapper executable for $objdir/$outputname
Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
The $output program cannot be directly executed until all the libtool
libraries that it depends on are installed.
This wrapper executable should never be moved out of the build directory.
If it is, it will not operate correctly.
Currently, it simply execs the wrapper *script* "$SHELL $output",
but could eventually absorb all of the scripts functionality and
exec $objdir/$outputname directly.
*/
EOF
cat <<"EOF"
#include <stdio.h>
#include <stdlib.h>
#ifdef _MSC_VER
# include <direct.h>
# include <process.h>
# include <io.h>
# define setmode _setmode
#else
# include <unistd.h>
# include <stdint.h>
# ifdef __CYGWIN__
# include <io.h>
# endif
#endif
#include <malloc.h>
#include <stdarg.h>
#include <assert.h>
#include <string.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/stat.h>
#if defined(PATH_MAX)
# define LT_PATHMAX PATH_MAX
#elif defined(MAXPATHLEN)
# define LT_PATHMAX MAXPATHLEN
#else
# define LT_PATHMAX 1024
#endif
#ifndef S_IXOTH
# define S_IXOTH 0
#endif
#ifndef S_IXGRP
# define S_IXGRP 0
#endif
#ifdef _MSC_VER
# define S_IXUSR _S_IEXEC
# define stat _stat
# ifndef _INTPTR_T_DEFINED
# define intptr_t int
# endif
#endif
#ifndef DIR_SEPARATOR
# define DIR_SEPARATOR '/'
# define PATH_SEPARATOR ':'
#endif
#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \
defined (__OS2__)
# define HAVE_DOS_BASED_FILE_SYSTEM
# define FOPEN_WB "wb"
# ifndef DIR_SEPARATOR_2
# define DIR_SEPARATOR_2 '\\'
# endif
# ifndef PATH_SEPARATOR_2
# define PATH_SEPARATOR_2 ';'
# endif
#endif
#ifndef DIR_SEPARATOR_2
# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR)
#else /* DIR_SEPARATOR_2 */
# define IS_DIR_SEPARATOR(ch) \
(((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
#endif /* DIR_SEPARATOR_2 */
#ifndef PATH_SEPARATOR_2
# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR)
#else /* PATH_SEPARATOR_2 */
# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2)
#endif /* PATH_SEPARATOR_2 */
#ifdef __CYGWIN__
# define FOPEN_WB "wb"
#endif
#ifndef FOPEN_WB
# define FOPEN_WB "w"
#endif
#ifndef _O_BINARY
# define _O_BINARY 0
#endif
#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type)))
#define XFREE(stale) do { \
if (stale) { free ((void *) stale); stale = 0; } \
} while (0)
#undef LTWRAPPER_DEBUGPRINTF
#if defined DEBUGWRAPPER
# define LTWRAPPER_DEBUGPRINTF(args) ltwrapper_debugprintf args
static void
ltwrapper_debugprintf (const char *fmt, ...)
{
va_list args;
va_start (args, fmt);
(void) vfprintf (stderr, fmt, args);
va_end (args);
}
#else
# define LTWRAPPER_DEBUGPRINTF(args)
#endif
const char *program_name = NULL;
void *xmalloc (size_t num);
char *xstrdup (const char *string);
const char *base_name (const char *name);
char *find_executable (const char *wrapper);
char *chase_symlinks (const char *pathspec);
int make_executable (const char *path);
int check_executable (const char *path);
char *strendzap (char *str, const char *pat);
void lt_fatal (const char *message, ...);
static const char *script_text =
EOF
func_emit_wrapper yes |
$SED -e 's/\([\\"]\)/\\\1/g' \
-e 's/^/ "/' -e 's/$/\\n"/'
echo ";"
cat <<EOF
const char * MAGIC_EXE = "$magic_exe";
int
main (int argc, char *argv[])
{
char **newargz;
char *tmp_pathspec;
char *actual_cwrapper_path;
char *shwrapper_name;
intptr_t rval = 127;
FILE *shwrapper;
const char *dumpscript_opt = "--lt-dump-script";
int i;
program_name = (char *) xstrdup (base_name (argv[0]));
LTWRAPPER_DEBUGPRINTF (("(main) argv[0] : %s\n", argv[0]));
LTWRAPPER_DEBUGPRINTF (("(main) program_name : %s\n", program_name));
/* very simple arg parsing; don't want to rely on getopt */
for (i = 1; i < argc; i++)
{
if (strcmp (argv[i], dumpscript_opt) == 0)
{
EOF
case "$host" in
*mingw* | *cygwin* )
# make stdout use "unix" line endings
echo " setmode(1,_O_BINARY);"
;;
esac
cat <<EOF
printf ("%s", script_text);
return 0;
}
}
newargz = XMALLOC (char *, argc + 2);
EOF
if test -n "$TARGETSHELL" ; then
# no path translation at all
lt_newargv0=$TARGETSHELL
else
case "$host" in
*mingw* )
# awkward: cmd appends spaces to result
lt_sed_strip_trailing_spaces="s/[ ]*\$//"
lt_newargv0=`( cmd //c echo $SHELL | $SED -e "$lt_sed_strip_trailing_spaces" ) 2>/dev/null || echo $SHELL`
case $lt_newargv0 in
*.exe | *.EXE) ;;
*) lt_newargv0=$lt_newargv0.exe ;;
esac
;;
* ) lt_newargv0=$SHELL ;;
esac
fi
cat <<EOF
newargz[0] = (char *) xstrdup ("$lt_newargv0");
EOF
cat <<"EOF"
tmp_pathspec = find_executable (argv[0]);
if (tmp_pathspec == NULL)
lt_fatal ("Couldn't find %s", argv[0]);
LTWRAPPER_DEBUGPRINTF (("(main) found exe (before symlink chase) at : %s\n",
tmp_pathspec));
actual_cwrapper_path = chase_symlinks (tmp_pathspec);
LTWRAPPER_DEBUGPRINTF (("(main) found exe (after symlink chase) at : %s\n",
actual_cwrapper_path));
XFREE (tmp_pathspec);
shwrapper_name = (char *) xstrdup (base_name (actual_cwrapper_path));
strendzap (actual_cwrapper_path, shwrapper_name);
/* shwrapper_name transforms */
strendzap (shwrapper_name, ".exe");
tmp_pathspec = XMALLOC (char, (strlen (shwrapper_name) +
strlen ("_ltshwrapperTMP") + 1));
strcpy (tmp_pathspec, shwrapper_name);
strcat (tmp_pathspec, "_ltshwrapperTMP");
XFREE (shwrapper_name);
shwrapper_name = tmp_pathspec;
tmp_pathspec = 0;
LTWRAPPER_DEBUGPRINTF (("(main) libtool shell wrapper name: %s\n",
shwrapper_name));
EOF
cat <<EOF
newargz[1] =
XMALLOC (char, (strlen (actual_cwrapper_path) +
strlen ("$objdir") + 1 + strlen (shwrapper_name) + 1));
strcpy (newargz[1], actual_cwrapper_path);
strcat (newargz[1], "$objdir");
strcat (newargz[1], "/");
strcat (newargz[1], shwrapper_name);
EOF
case $host_os in
mingw*)
cat <<"EOF"
{
char* p;
while ((p = strchr (newargz[1], '\\')) != NULL)
{
*p = '/';
}
}
EOF
;;
esac
cat <<"EOF"
XFREE (shwrapper_name);
XFREE (actual_cwrapper_path);
/* always write in binary mode */
if ((shwrapper = fopen (newargz[1], FOPEN_WB)) == 0)
{
lt_fatal ("Could not open %s for writing", newargz[1]);
}
fprintf (shwrapper, "%s", script_text);
fclose (shwrapper);
make_executable (newargz[1]);
for (i = 1; i < argc; i++)
newargz[i + 1] = xstrdup (argv[i]);
newargz[argc + 1] = NULL;
for (i = 0; i < argc + 1; i++)
{
LTWRAPPER_DEBUGPRINTF (("(main) newargz[%d] : %s\n", i, newargz[i]));
}
EOF
case $host_os in
mingw*)
cat <<EOF
/* execv doesn't actually work on mingw as expected on unix */
rval = _spawnv (_P_WAIT, "$lt_newargv0", (const char * const *) newargz);
if (rval == -1)
{
/* failed to start process */
LTWRAPPER_DEBUGPRINTF (("(main) failed to launch target \"$lt_newargv0\": errno = %d\n", errno));
return 127;
}
return rval;
}
EOF
;;
*)
cat <<EOF
execv ("$lt_newargv0", newargz);
return rval; /* =127, but avoids unused variable warning */
}
EOF
;;
esac
cat <<"EOF"
void *
xmalloc (size_t num)
{
void *p = (void *) malloc (num);
if (!p)
lt_fatal ("Memory exhausted");
return p;
}
char *
xstrdup (const char *string)
{
return string ? strcpy ((char *) xmalloc (strlen (string) + 1),
string) : NULL;
}
const char *
base_name (const char *name)
{
const char *base;
#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
/* Skip over the disk name in MSDOS pathnames. */
if (isalpha ((unsigned char) name[0]) && name[1] == ':')
name += 2;
#endif
for (base = name; *name; name++)
if (IS_DIR_SEPARATOR (*name))
base = name + 1;
return base;
}
int
check_executable (const char *path)
{
struct stat st;
LTWRAPPER_DEBUGPRINTF (("(check_executable) : %s\n",
path ? (*path ? path : "EMPTY!") : "NULL!"));
if ((!path) || (!*path))
return 0;
if ((stat (path, &st) >= 0)
&& (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)))
return 1;
else
return 0;
}
int
make_executable (const char *path)
{
int rval = 0;
struct stat st;
LTWRAPPER_DEBUGPRINTF (("(make_executable) : %s\n",
path ? (*path ? path : "EMPTY!") : "NULL!"));
if ((!path) || (!*path))
return 0;
if (stat (path, &st) >= 0)
{
rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR);
}
return rval;
}
/* Searches for the full path of the wrapper. Returns
newly allocated full path name if found, NULL otherwise
Does not chase symlinks, even on platforms that support them.
*/
char *
find_executable (const char *wrapper)
{
int has_slash = 0;
const char *p;
const char *p_next;
/* static buffer for getcwd */
char tmp[LT_PATHMAX + 1];
int tmp_len;
char *concat_name;
LTWRAPPER_DEBUGPRINTF (("(find_executable) : %s\n",
wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!"));
if ((wrapper == NULL) || (*wrapper == '\0'))
return NULL;
/* Absolute path? */
#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':')
{
concat_name = xstrdup (wrapper);
if (check_executable (concat_name))
return concat_name;
XFREE (concat_name);
}
else
{
#endif
if (IS_DIR_SEPARATOR (wrapper[0]))
{
concat_name = xstrdup (wrapper);
if (check_executable (concat_name))
return concat_name;
XFREE (concat_name);
}
#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
}
#endif
for (p = wrapper; *p; p++)
if (*p == '/')
{
has_slash = 1;
break;
}
if (!has_slash)
{
/* no slashes; search PATH */
const char *path = getenv ("PATH");
if (path != NULL)
{
for (p = path; *p; p = p_next)
{
const char *q;
size_t p_len;
for (q = p; *q; q++)
if (IS_PATH_SEPARATOR (*q))
break;
p_len = q - p;
p_next = (*q == '\0' ? q : q + 1);
if (p_len == 0)
{
/* empty path: current directory */
if (getcwd (tmp, LT_PATHMAX) == NULL)
lt_fatal ("getcwd failed");
tmp_len = strlen (tmp);
concat_name =
XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
memcpy (concat_name, tmp, tmp_len);
concat_name[tmp_len] = '/';
strcpy (concat_name + tmp_len + 1, wrapper);
}
else
{
concat_name =
XMALLOC (char, p_len + 1 + strlen (wrapper) + 1);
memcpy (concat_name, p, p_len);
concat_name[p_len] = '/';
strcpy (concat_name + p_len + 1, wrapper);
}
if (check_executable (concat_name))
return concat_name;
XFREE (concat_name);
}
}
/* not found in PATH; assume curdir */
}
/* Relative path | not found in path: prepend cwd */
if (getcwd (tmp, LT_PATHMAX) == NULL)
lt_fatal ("getcwd failed");
tmp_len = strlen (tmp);
concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
memcpy (concat_name, tmp, tmp_len);
concat_name[tmp_len] = '/';
strcpy (concat_name + tmp_len + 1, wrapper);
if (check_executable (concat_name))
return concat_name;
XFREE (concat_name);
return NULL;
}
char *
chase_symlinks (const char *pathspec)
{
#ifndef S_ISLNK
return xstrdup (pathspec);
#else
char buf[LT_PATHMAX];
struct stat s;
char *tmp_pathspec = xstrdup (pathspec);
char *p;
int has_symlinks = 0;
while (strlen (tmp_pathspec) && !has_symlinks)
{
LTWRAPPER_DEBUGPRINTF (("checking path component for symlinks: %s\n",
tmp_pathspec));
if (lstat (tmp_pathspec, &s) == 0)
{
if (S_ISLNK (s.st_mode) != 0)
{
has_symlinks = 1;
break;
}
/* search backwards for last DIR_SEPARATOR */
p = tmp_pathspec + strlen (tmp_pathspec) - 1;
while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
p--;
if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
{
/* no more DIR_SEPARATORS left */
break;
}
*p = '\0';
}
else
{
char *errstr = strerror (errno);
lt_fatal ("Error accessing file %s (%s)", tmp_pathspec, errstr);
}
}
XFREE (tmp_pathspec);
if (!has_symlinks)
{
return xstrdup (pathspec);
}
tmp_pathspec = realpath (pathspec, buf);
if (tmp_pathspec == 0)
{
lt_fatal ("Could not follow symlinks for %s", pathspec);
}
return xstrdup (tmp_pathspec);
#endif
}
char *
strendzap (char *str, const char *pat)
{
size_t len, patlen;
assert (str != NULL);
assert (pat != NULL);
len = strlen (str);
patlen = strlen (pat);
if (patlen <= len)
{
str += len - patlen;
if (strcmp (str, pat) == 0)
*str = '\0';
}
return str;
}
static void
lt_error_core (int exit_status, const char *mode,
const char *message, va_list ap)
{
fprintf (stderr, "%s: %s: ", program_name, mode);
vfprintf (stderr, message, ap);
fprintf (stderr, ".\n");
if (exit_status >= 0)
exit (exit_status);
}
void
lt_fatal (const char *message, ...)
{
va_list ap;
va_start (ap, message);
lt_error_core (EXIT_FAILURE, "FATAL", message, ap);
va_end (ap);
}
EOF
}
# end: func_emit_cwrapperexe_src
# func_mode_link arg...
func_mode_link ()
{
$opt_debug
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
# It is impossible to link a dll without this setting, and
# we shouldn't force the makefile maintainer to figure out
# which system we are compiling for in order to pass an extra
# flag for every libtool invocation.
# allow_undefined=no
# FIXME: Unfortunately, there are problems with the above when trying
# to make a dll which has undefined symbols, in which case not
# even a static library is built. For now, we need to specify
# -no-undefined on the libtool link line when we can be certain
# that all symbols are satisfied, otherwise we get a static library.
allow_undefined=yes
;;
*)
allow_undefined=yes
;;
esac
libtool_args=$nonopt
base_compile="$nonopt $@"
compile_command=$nonopt
finalize_command=$nonopt
compile_rpath=
finalize_rpath=
compile_shlibpath=
finalize_shlibpath=
convenience=
old_convenience=
deplibs=
old_deplibs=
compiler_flags=
linker_flags=
dllsearchpath=
lib_search_path=`pwd`
inst_prefix_dir=
new_inherited_linker_flags=
avoid_version=no
dlfiles=
dlprefiles=
dlself=no
export_dynamic=no
export_symbols=
export_symbols_regex=
generated=
libobjs=
ltlibs=
module=no
no_install=no
objs=
non_pic_objects=
precious_files_regex=
prefer_static_libs=no
preload=no
prev=
prevarg=
release=
rpath=
xrpath=
perm_rpath=
temp_rpath=
thread_safe=no
vinfo=
vinfo_number=no
weak_libs=
single_module="${wl}-single_module"
func_infer_tag $base_compile
# We need to know -static, to get the right output filenames.
for arg
do
case $arg in
-shared)
test "$build_libtool_libs" != yes && \
func_fatal_configuration "can not build a shared library"
build_old_libs=no
break
;;
-all-static | -static | -static-libtool-libs)
case $arg in
-all-static)
if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
func_warning "complete static linking is impossible in this configuration"
fi
if test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
prefer_static_libs=yes
;;
-static)
if test -z "$pic_flag" && test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
prefer_static_libs=built
;;
-static-libtool-libs)
if test -z "$pic_flag" && test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
prefer_static_libs=yes
;;
esac
build_libtool_libs=no
build_old_libs=yes
break
;;
esac
done
# See if our shared archives depend on static archives.
test -n "$old_archive_from_new_cmds" && build_old_libs=yes
# Go through the arguments, transforming them on the way.
while test "$#" -gt 0; do
arg="$1"
shift
func_quote_for_eval "$arg"
qarg=$func_quote_for_eval_unquoted_result
func_append libtool_args " $func_quote_for_eval_result"
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
case $prev in
output)
func_append compile_command " @OUTPUT@"
func_append finalize_command " @OUTPUT@"
;;
esac
case $prev in
dlfiles|dlprefiles)
if test "$preload" = no; then
# Add the symbol object into the linking commands.
func_append compile_command " @SYMFILE@"
func_append finalize_command " @SYMFILE@"
preload=yes
fi
case $arg in
*.la | *.lo) ;; # We handle these cases below.
force)
if test "$dlself" = no; then
dlself=needless
export_dynamic=yes
fi
prev=
continue
;;
self)
if test "$prev" = dlprefiles; then
dlself=yes
elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then
dlself=yes
else
dlself=needless
export_dynamic=yes
fi
prev=
continue
;;
*)
if test "$prev" = dlfiles; then
dlfiles="$dlfiles $arg"
else
dlprefiles="$dlprefiles $arg"
fi
prev=
continue
;;
esac
;;
expsyms)
export_symbols="$arg"
test -f "$arg" \
|| func_fatal_error "symbol file \`$arg' does not exist"
prev=
continue
;;
expsyms_regex)
export_symbols_regex="$arg"
prev=
continue
;;
framework)
case $host in
*-*-darwin*)
case "$deplibs " in
*" $qarg.ltframework "*) ;;
*) deplibs="$deplibs $qarg.ltframework" # this is fixed later
;;
esac
;;
esac
prev=
continue
;;
inst_prefix)
inst_prefix_dir="$arg"
prev=
continue
;;
objectlist)
if test -f "$arg"; then
save_arg=$arg
moreargs=
for fil in `cat "$save_arg"`
do
# moreargs="$moreargs $fil"
arg=$fil
# A libtool-controlled object.
# Check to see that this really is a libtool object.
if func_lalib_unsafe_p "$arg"; then
pic_object=
non_pic_object=
# Read the .lo file
func_source "$arg"
if test -z "$pic_object" ||
test -z "$non_pic_object" ||
test "$pic_object" = none &&
test "$non_pic_object" = none; then
func_fatal_error "cannot find name of object for \`$arg'"
fi
# Extract subdirectory from the argument.
func_dirname "$arg" "/" ""
xdir="$func_dirname_result"
if test "$pic_object" != none; then
# Prepend the subdirectory the object is found in.
pic_object="$xdir$pic_object"
if test "$prev" = dlfiles; then
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
dlfiles="$dlfiles $pic_object"
prev=
continue
else
# If libtool objects are unsupported, then we need to preload.
prev=dlprefiles
fi
fi
# CHECK ME: I think I busted this. -Ossama
if test "$prev" = dlprefiles; then
# Preload the old-style object.
dlprefiles="$dlprefiles $pic_object"
prev=
fi
# A PIC object.
func_append libobjs " $pic_object"
arg="$pic_object"
fi
# Non-PIC object.
if test "$non_pic_object" != none; then
# Prepend the subdirectory the object is found in.
non_pic_object="$xdir$non_pic_object"
# A standard non-PIC object
func_append non_pic_objects " $non_pic_object"
if test -z "$pic_object" || test "$pic_object" = none ; then
arg="$non_pic_object"
fi
else
# If the PIC object exists, use it instead.
# $xdir was prepended to $pic_object above.
non_pic_object="$pic_object"
func_append non_pic_objects " $non_pic_object"
fi
else
# Only an error if not doing a dry-run.
if $opt_dry_run; then
# Extract subdirectory from the argument.
func_dirname "$arg" "/" ""
xdir="$func_dirname_result"
func_lo2o "$arg"
pic_object=$xdir$objdir/$func_lo2o_result
non_pic_object=$xdir$func_lo2o_result
func_append libobjs " $pic_object"
func_append non_pic_objects " $non_pic_object"
else
func_fatal_error "\`$arg' is not a valid libtool object"
fi
fi
done
else
func_fatal_error "link input file \`$arg' does not exist"
fi
arg=$save_arg
prev=
continue
;;
precious_regex)
precious_files_regex="$arg"
prev=
continue
;;
release)
release="-$arg"
prev=
continue
;;
rpath | xrpath)
# We need an absolute path.
case $arg in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
func_fatal_error "only absolute run-paths are allowed"
;;
esac
if test "$prev" = rpath; then
case "$rpath " in
*" $arg "*) ;;
*) rpath="$rpath $arg" ;;
esac
else
case "$xrpath " in
*" $arg "*) ;;
*) xrpath="$xrpath $arg" ;;
esac
fi
prev=
continue
;;
shrext)
shrext_cmds="$arg"
prev=
continue
;;
weak)
weak_libs="$weak_libs $arg"
prev=
continue
;;
xcclinker)
linker_flags="$linker_flags $qarg"
compiler_flags="$compiler_flags $qarg"
prev=
func_append compile_command " $qarg"
func_append finalize_command " $qarg"
continue
;;
xcompiler)
compiler_flags="$compiler_flags $qarg"
prev=
func_append compile_command " $qarg"
func_append finalize_command " $qarg"
continue
;;
xlinker)
linker_flags="$linker_flags $qarg"
compiler_flags="$compiler_flags $wl$qarg"
prev=
func_append compile_command " $wl$qarg"
func_append finalize_command " $wl$qarg"
continue
;;
*)
eval "$prev=\"\$arg\""
prev=
continue
;;
esac
fi # test -n "$prev"
prevarg="$arg"
case $arg in
-all-static)
if test -n "$link_static_flag"; then
# See comment for -static flag below, for more details.
func_append compile_command " $link_static_flag"
func_append finalize_command " $link_static_flag"
fi
continue
;;
-allow-undefined)
# FIXME: remove this flag sometime in the future.
func_fatal_error "\`-allow-undefined' must not be used because it is the default"
;;
-avoid-version)
avoid_version=yes
continue
;;
-dlopen)
prev=dlfiles
continue
;;
-dlpreopen)
prev=dlprefiles
continue
;;
-export-dynamic)
export_dynamic=yes
continue
;;
-export-symbols | -export-symbols-regex)
if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
func_fatal_error "more than one -exported-symbols argument is not allowed"
fi
if test "X$arg" = "X-export-symbols"; then
prev=expsyms
else
prev=expsyms_regex
fi
continue
;;
-framework)
prev=framework
continue
;;
-inst-prefix-dir)
prev=inst_prefix
continue
;;
# The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
# so, if we see these flags be careful not to treat them like -L
-L[A-Z][A-Z]*:*)
case $with_gcc/$host in
no/*-*-irix* | /*-*-irix*)
func_append compile_command " $arg"
func_append finalize_command " $arg"
;;
esac
continue
;;
-L*)
func_stripname '-L' '' "$arg"
dir=$func_stripname_result
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
absdir=`cd "$dir" && pwd`
test -z "$absdir" && \
func_fatal_error "cannot determine absolute directory name of \`$dir'"
dir="$absdir"
;;
esac
case "$deplibs " in
*" -L$dir "*) ;;
*)
deplibs="$deplibs -L$dir"
lib_search_path="$lib_search_path $dir"
;;
esac
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
testbindir=`$ECHO "X$dir" | $Xsed -e 's*/lib$*/bin*'`
case :$dllsearchpath: in
*":$dir:"*) ;;
*) dllsearchpath="$dllsearchpath:$dir";;
esac
case :$dllsearchpath: in
*":$testbindir:"*) ;;
*) dllsearchpath="$dllsearchpath:$testbindir";;
esac
;;
esac
continue
;;
-l*)
if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos*)
# These systems don't actually have a C or math library (as such)
continue
;;
*-*-os2*)
# These systems don't actually have a C library (as such)
test "X$arg" = "X-lc" && continue
;;
*-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
# Do not include libc due to us having libc/libc_r.
test "X$arg" = "X-lc" && continue
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C and math libraries are in the System framework
deplibs="$deplibs System.ltframework"
continue
;;
*-*-sco3.2v5* | *-*-sco5v6*)
# Causes problems with __ctype
test "X$arg" = "X-lc" && continue
;;
*-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*)
# Compiler inserts libc in the correct place for threads to work
test "X$arg" = "X-lc" && continue
;;
esac
elif test "X$arg" = "X-lc_r"; then
case $host in
*-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
# Do not include libc_r directly, use -pthread flag.
continue
;;
esac
fi
deplibs="$deplibs $arg"
continue
;;
-module)
module=yes
continue
;;
# Tru64 UNIX uses -model [arg] to determine the layout of C++
# classes, name mangling, and exception handling.
# Darwin uses the -arch flag to determine output architecture.
-model|-arch|-isysroot)
compiler_flags="$compiler_flags $arg"
func_append compile_command " $arg"
func_append finalize_command " $arg"
prev=xcompiler
continue
;;
-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads)
compiler_flags="$compiler_flags $arg"
func_append compile_command " $arg"
func_append finalize_command " $arg"
case "$new_inherited_linker_flags " in
*" $arg "*) ;;
* ) new_inherited_linker_flags="$new_inherited_linker_flags $arg" ;;
esac
continue
;;
-multi_module)
single_module="${wl}-multi_module"
continue
;;
-no-fast-install)
fast_install=no
continue
;;
-no-install)
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin*)
# The PATH hackery in wrapper scripts is required on Windows
# and Darwin in order for the loader to find any dlls it needs.
func_warning "\`-no-install' is ignored for $host"
func_warning "assuming \`-no-fast-install' instead"
fast_install=no
;;
*) no_install=yes ;;
esac
continue
;;
-no-undefined)
allow_undefined=no
continue
;;
-objectlist)
prev=objectlist
continue
;;
-o) prev=output ;;
-precious-files-regex)
prev=precious_regex
continue
;;
-release)
prev=release
continue
;;
-rpath)
prev=rpath
continue
;;
-R)
prev=xrpath
continue
;;
-R*)
func_stripname '-R' '' "$arg"
dir=$func_stripname_result
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
func_fatal_error "only absolute run-paths are allowed"
;;
esac
case "$xrpath " in
*" $dir "*) ;;
*) xrpath="$xrpath $dir" ;;
esac
continue
;;
-shared)
# The effects of -shared are defined in a previous loop.
continue
;;
-shrext)
prev=shrext
continue
;;
-static | -static-libtool-libs)
# The effects of -static are defined in a previous loop.
# We used to do the same as -all-static on platforms that
# didn't have a PIC flag, but the assumption that the effects
# would be equivalent was wrong. It would break on at least
# Digital Unix and AIX.
continue
;;
-thread-safe)
thread_safe=yes
continue
;;
-version-info)
prev=vinfo
continue
;;
-version-number)
prev=vinfo
vinfo_number=yes
continue
;;
-weak)
prev=weak
continue
;;
-Wc,*)
func_stripname '-Wc,' '' "$arg"
args=$func_stripname_result
arg=
save_ifs="$IFS"; IFS=','
for flag in $args; do
IFS="$save_ifs"
func_quote_for_eval "$flag"
arg="$arg $wl$func_quote_for_eval_result"
compiler_flags="$compiler_flags $func_quote_for_eval_result"
done
IFS="$save_ifs"
func_stripname ' ' '' "$arg"
arg=$func_stripname_result
;;
-Wl,*)
func_stripname '-Wl,' '' "$arg"
args=$func_stripname_result
arg=
save_ifs="$IFS"; IFS=','
for flag in $args; do
IFS="$save_ifs"
func_quote_for_eval "$flag"
arg="$arg $wl$func_quote_for_eval_result"
compiler_flags="$compiler_flags $wl$func_quote_for_eval_result"
linker_flags="$linker_flags $func_quote_for_eval_result"
done
IFS="$save_ifs"
func_stripname ' ' '' "$arg"
arg=$func_stripname_result
;;
-Xcompiler)
prev=xcompiler
continue
;;
-Xlinker)
prev=xlinker
continue
;;
-XCClinker)
prev=xcclinker
continue
;;
# -msg_* for osf cc
-msg_*)
func_quote_for_eval "$arg"
arg="$func_quote_for_eval_result"
;;
# -64, -mips[0-9] enable 64-bit mode on the SGI compiler
# -r[0-9][0-9]* specifies the processor on the SGI compiler
# -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler
# +DA*, +DD* enable 64-bit mode on the HP compiler
# -q* pass through compiler args for the IBM compiler
# -m*, -t[45]*, -txscale* pass through architecture-specific
# compiler args for GCC
# -F/path gives path to uninstalled frameworks, gcc on darwin
# -p, -pg, --coverage, -fprofile-* pass through profiling flag for GCC
# @file GCC response files
-64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \
-t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*)
func_quote_for_eval "$arg"
arg="$func_quote_for_eval_result"
func_append compile_command " $arg"
func_append finalize_command " $arg"
compiler_flags="$compiler_flags $arg"
continue
;;
# Some other compiler flag.
-* | +*)
func_quote_for_eval "$arg"
arg="$func_quote_for_eval_result"
;;
*.$objext)
# A standard object.
objs="$objs $arg"
;;
*.lo)
# A libtool-controlled object.
# Check to see that this really is a libtool object.
if func_lalib_unsafe_p "$arg"; then
pic_object=
non_pic_object=
# Read the .lo file
func_source "$arg"
if test -z "$pic_object" ||
test -z "$non_pic_object" ||
test "$pic_object" = none &&
test "$non_pic_object" = none; then
func_fatal_error "cannot find name of object for \`$arg'"
fi
# Extract subdirectory from the argument.
func_dirname "$arg" "/" ""
xdir="$func_dirname_result"
if test "$pic_object" != none; then
# Prepend the subdirectory the object is found in.
pic_object="$xdir$pic_object"
if test "$prev" = dlfiles; then
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
dlfiles="$dlfiles $pic_object"
prev=
continue
else
# If libtool objects are unsupported, then we need to preload.
prev=dlprefiles
fi
fi
# CHECK ME: I think I busted this. -Ossama
if test "$prev" = dlprefiles; then
# Preload the old-style object.
dlprefiles="$dlprefiles $pic_object"
prev=
fi
# A PIC object.
func_append libobjs " $pic_object"
arg="$pic_object"
fi
# Non-PIC object.
if test "$non_pic_object" != none; then
# Prepend the subdirectory the object is found in.
non_pic_object="$xdir$non_pic_object"
# A standard non-PIC object
func_append non_pic_objects " $non_pic_object"
if test -z "$pic_object" || test "$pic_object" = none ; then
arg="$non_pic_object"
fi
else
# If the PIC object exists, use it instead.
# $xdir was prepended to $pic_object above.
non_pic_object="$pic_object"
func_append non_pic_objects " $non_pic_object"
fi
else
# Only an error if not doing a dry-run.
if $opt_dry_run; then
# Extract subdirectory from the argument.
func_dirname "$arg" "/" ""
xdir="$func_dirname_result"
func_lo2o "$arg"
pic_object=$xdir$objdir/$func_lo2o_result
non_pic_object=$xdir$func_lo2o_result
func_append libobjs " $pic_object"
func_append non_pic_objects " $non_pic_object"
else
func_fatal_error "\`$arg' is not a valid libtool object"
fi
fi
;;
*.$libext)
# An archive.
deplibs="$deplibs $arg"
old_deplibs="$old_deplibs $arg"
continue
;;
*.la)
# A libtool-controlled library.
if test "$prev" = dlfiles; then
# This library was specified with -dlopen.
dlfiles="$dlfiles $arg"
prev=
elif test "$prev" = dlprefiles; then
# The library was specified with -dlpreopen.
dlprefiles="$dlprefiles $arg"
prev=
else
deplibs="$deplibs $arg"
fi
continue
;;
# Some other compiler argument.
*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
func_quote_for_eval "$arg"
arg="$func_quote_for_eval_result"
;;
esac # arg
# Now actually substitute the argument into the commands.
if test -n "$arg"; then
func_append compile_command " $arg"
func_append finalize_command " $arg"
fi
done # argument parsing loop
test -n "$prev" && \
func_fatal_help "the \`$prevarg' option requires an argument"
if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
eval arg=\"$export_dynamic_flag_spec\"
func_append compile_command " $arg"
func_append finalize_command " $arg"
fi
oldlibs=
# calculate the name of the file, without its directory
func_basename "$output"
outputname="$func_basename_result"
libobjs_save="$libobjs"
if test -n "$shlibpath_var"; then
# get the directories listed in $shlibpath_var
eval shlib_search_path=\`\$ECHO \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
else
shlib_search_path=
fi
eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
func_dirname "$output" "/" ""
output_objdir="$func_dirname_result$objdir"
# Create the object directory.
func_mkdir_p "$output_objdir"
# Determine the type of output
case $output in
"")
func_fatal_help "you must specify an output file"
;;
*.$libext) linkmode=oldlib ;;
*.lo | *.$objext) linkmode=obj ;;
*.la) linkmode=lib ;;
*) linkmode=prog ;; # Anything else should be a program.
esac
specialdeplibs=
libs=
# Find all interdependent deplibs by searching for libraries
# that are linked more than once (e.g. -la -lb -la)
for deplib in $deplibs; do
if $opt_duplicate_deps ; then
case "$libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
libs="$libs $deplib"
done
if test "$linkmode" = lib; then
libs="$predeps $libs $compiler_lib_search_path $postdeps"
# Compute libraries that are listed more than once in $predeps
# $postdeps and mark them as special (i.e., whose duplicates are
# not to be eliminated).
pre_post_deps=
if $opt_duplicate_compiler_generated_deps; then
for pre_post_dep in $predeps $postdeps; do
case "$pre_post_deps " in
*" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;;
esac
pre_post_deps="$pre_post_deps $pre_post_dep"
done
fi
pre_post_deps=
fi
deplibs=
newdependency_libs=
newlib_search_path=
need_relink=no # whether we're linking any uninstalled libtool libraries
notinst_deplibs= # not-installed libtool libraries
notinst_path= # paths that contain not-installed libtool libraries
case $linkmode in
lib)
passes="conv dlpreopen link"
for file in $dlfiles $dlprefiles; do
case $file in
*.la) ;;
*)
func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file"
;;
esac
done
;;
prog)
compile_deplibs=
finalize_deplibs=
alldeplibs=no
newdlfiles=
newdlprefiles=
passes="conv scan dlopen dlpreopen link"
;;
*) passes="conv"
;;
esac
for pass in $passes; do
# The preopen pass in lib mode reverses $deplibs; put it back here
# so that -L comes before libs that need it for instance...
if test "$linkmode,$pass" = "lib,link"; then
## FIXME: Find the place where the list is rebuilt in the wrong
## order, and fix it there properly
tmp_deplibs=
for deplib in $deplibs; do
tmp_deplibs="$deplib $tmp_deplibs"
done
deplibs="$tmp_deplibs"
fi
if test "$linkmode,$pass" = "lib,link" ||
test "$linkmode,$pass" = "prog,scan"; then
libs="$deplibs"
deplibs=
fi
if test "$linkmode" = prog; then
case $pass in
dlopen) libs="$dlfiles" ;;
dlpreopen) libs="$dlprefiles" ;;
link) libs="$deplibs %DEPLIBS% $dependency_libs" ;;
esac
fi
if test "$linkmode,$pass" = "lib,dlpreopen"; then
# Collect and forward deplibs of preopened libtool libs
for lib in $dlprefiles; do
# Ignore non-libtool-libs
dependency_libs=
case $lib in
*.la) func_source "$lib" ;;
esac
# Collect preopened libtool deplibs, except any this library
# has declared as weak libs
for deplib in $dependency_libs; do
deplib_base=`$ECHO "X$deplib" | $Xsed -e "$basename"`
case " $weak_libs " in
*" $deplib_base "*) ;;
*) deplibs="$deplibs $deplib" ;;
esac
done
done
libs="$dlprefiles"
fi
if test "$pass" = dlopen; then
# Collect dlpreopened libraries
save_deplibs="$deplibs"
deplibs=
fi
for deplib in $libs; do
lib=
found=no
case $deplib in
-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads)
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
compiler_flags="$compiler_flags $deplib"
if test "$linkmode" = lib ; then
case "$new_inherited_linker_flags " in
*" $deplib "*) ;;
* ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;;
esac
fi
fi
continue
;;
-l*)
if test "$linkmode" != lib && test "$linkmode" != prog; then
func_warning "\`-l' is ignored for archives/objects"
continue
fi
func_stripname '-l' '' "$deplib"
name=$func_stripname_result
if test "$linkmode" = lib; then
searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path"
else
searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path"
fi
for searchdir in $searchdirs; do
for search_ext in .la $std_shrext .so .a; do
# Search the libtool library
lib="$searchdir/lib${name}${search_ext}"
if test -f "$lib"; then
if test "$search_ext" = ".la"; then
found=yes
else
found=no
fi
break 2
fi
done
done
if test "$found" != yes; then
# deplib doesn't seem to be a libtool library
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
fi
continue
else # deplib is a libtool library
# If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib,
# We need to do some special things here, and not later.
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $deplib "*)
if func_lalib_p "$lib"; then
library_names=
old_library=
func_source "$lib"
for l in $old_library $library_names; do
ll="$l"
done
if test "X$ll" = "X$old_library" ; then # only static version available
found=no
func_dirname "$lib" "" "."
ladir="$func_dirname_result"
lib=$ladir/$old_library
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
fi
continue
fi
fi
;;
*) ;;
esac
fi
fi
;; # -l
*.ltframework)
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
if test "$linkmode" = lib ; then
case "$new_inherited_linker_flags " in
*" $deplib "*) ;;
* ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;;
esac
fi
fi
continue
;;
-L*)
case $linkmode in
lib)
deplibs="$deplib $deplibs"
test "$pass" = conv && continue
newdependency_libs="$deplib $newdependency_libs"
func_stripname '-L' '' "$deplib"
newlib_search_path="$newlib_search_path $func_stripname_result"
;;
prog)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
continue
fi
if test "$pass" = scan; then
deplibs="$deplib $deplibs"
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
func_stripname '-L' '' "$deplib"
newlib_search_path="$newlib_search_path $func_stripname_result"
;;
*)
func_warning "\`-L' is ignored for archives/objects"
;;
esac # linkmode
continue
;; # -L
-R*)
if test "$pass" = link; then
func_stripname '-R' '' "$deplib"
dir=$func_stripname_result
# Make sure the xrpath contains only unique directories.
case "$xrpath " in
*" $dir "*) ;;
*) xrpath="$xrpath $dir" ;;
esac
fi
deplibs="$deplib $deplibs"
continue
;;
*.la) lib="$deplib" ;;
*.$libext)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
continue
fi
case $linkmode in
lib)
# Linking convenience modules into shared libraries is allowed,
# but linking other static libraries is non-portable.
case " $dlpreconveniencelibs " in
*" $deplib "*) ;;
*)
valid_a_lib=no
case $deplibs_check_method in
match_pattern*)
set dummy $deplibs_check_method; shift
match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
if eval "\$ECHO \"X$deplib\"" 2>/dev/null | $Xsed -e 10q \
| $EGREP "$match_pattern_regex" > /dev/null; then
valid_a_lib=yes
fi
;;
pass_all)
valid_a_lib=yes
;;
esac
if test "$valid_a_lib" != yes; then
$ECHO
$ECHO "*** Warning: Trying to link with static lib archive $deplib."
$ECHO "*** I have the capability to make that library automatically link in when"
$ECHO "*** you link to this library. But I can only do this if you have a"
$ECHO "*** shared version of the library, which you do not appear to have"
$ECHO "*** because the file extensions .$libext of this argument makes me believe"
$ECHO "*** that it is just a static archive that I should not use here."
else
$ECHO
$ECHO "*** Warning: Linking the shared library $output against the"
$ECHO "*** static library $deplib is not portable!"
deplibs="$deplib $deplibs"
fi
;;
esac
continue
;;
prog)
if test "$pass" != link; then
deplibs="$deplib $deplibs"
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
continue
;;
esac # linkmode
;; # *.$libext
*.lo | *.$objext)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
elif test "$linkmode" = prog; then
if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
# If there is no dlopen support or we're linking statically,
# we need to preload.
newdlprefiles="$newdlprefiles $deplib"
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
newdlfiles="$newdlfiles $deplib"
fi
fi
continue
;;
%DEPLIBS%)
alldeplibs=yes
continue
;;
esac # case $deplib
if test "$found" = yes || test -f "$lib"; then :
else
func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'"
fi
# Check to see that this really is a libtool archive.
func_lalib_unsafe_p "$lib" \
|| func_fatal_error "\`$lib' is not a valid libtool archive"
func_dirname "$lib" "" "."
ladir="$func_dirname_result"
dlname=
dlopen=
dlpreopen=
libdir=
library_names=
old_library=
inherited_linker_flags=
# If the library was installed with an old release of libtool,
# it will not redefine variables installed, or shouldnotlink
installed=yes
shouldnotlink=no
avoidtemprpath=
# Read the .la file
func_source "$lib"
# Convert "-framework foo" to "foo.ltframework"
if test -n "$inherited_linker_flags"; then
tmp_inherited_linker_flags=`$ECHO "X$inherited_linker_flags" | $Xsed -e 's/-framework \([^ $]*\)/\1.ltframework/g'`
for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do
case " $new_inherited_linker_flags " in
*" $tmp_inherited_linker_flag "*) ;;
*) new_inherited_linker_flags="$new_inherited_linker_flags $tmp_inherited_linker_flag";;
esac
done
fi
dependency_libs=`$ECHO "X $dependency_libs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
if test "$linkmode,$pass" = "lib,link" ||
test "$linkmode,$pass" = "prog,scan" ||
{ test "$linkmode" != prog && test "$linkmode" != lib; }; then
test -n "$dlopen" && dlfiles="$dlfiles $dlopen"
test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen"
fi
if test "$pass" = conv; then
# Only check for convenience libraries
deplibs="$lib $deplibs"
if test -z "$libdir"; then
if test -z "$old_library"; then
func_fatal_error "cannot find name of link library for \`$lib'"
fi
# It is a libtool convenience library, so add in its objects.
convenience="$convenience $ladir/$objdir/$old_library"
old_convenience="$old_convenience $ladir/$objdir/$old_library"
elif test "$linkmode" != prog && test "$linkmode" != lib; then
func_fatal_error "\`$lib' is not a convenience library"
fi
tmp_libs=
for deplib in $dependency_libs; do
deplibs="$deplib $deplibs"
if $opt_duplicate_deps ; then
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
tmp_libs="$tmp_libs $deplib"
done
continue
fi # $pass = conv
# Get the name of the library we link against.
linklib=
for l in $old_library $library_names; do
linklib="$l"
done
if test -z "$linklib"; then
func_fatal_error "cannot find name of link library for \`$lib'"
fi
# This library was specified with -dlopen.
if test "$pass" = dlopen; then
if test -z "$libdir"; then
func_fatal_error "cannot -dlopen a convenience library: \`$lib'"
fi
if test -z "$dlname" ||
test "$dlopen_support" != yes ||
test "$build_libtool_libs" = no; then
# If there is no dlname, no dlopen support or we're linking
# statically, we need to preload. We also need to preload any
# dependent libraries so libltdl's deplib preloader doesn't
# bomb out in the load deplibs phase.
dlprefiles="$dlprefiles $lib $dependency_libs"
else
newdlfiles="$newdlfiles $lib"
fi
continue
fi # $pass = dlopen
# We need an absolute path.
case $ladir in
[\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;;
*)
abs_ladir=`cd "$ladir" && pwd`
if test -z "$abs_ladir"; then
func_warning "cannot determine absolute directory name of \`$ladir'"
func_warning "passing it literally to the linker, although it might fail"
abs_ladir="$ladir"
fi
;;
esac
func_basename "$lib"
laname="$func_basename_result"
# Find the relevant object directory and library name.
if test "X$installed" = Xyes; then
if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
func_warning "library \`$lib' was moved."
dir="$ladir"
absdir="$abs_ladir"
libdir="$abs_ladir"
else
dir="$libdir"
absdir="$libdir"
fi
test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes
else
if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then
dir="$ladir"
absdir="$abs_ladir"
# Remove this search path later
notinst_path="$notinst_path $abs_ladir"
else
dir="$ladir/$objdir"
absdir="$abs_ladir/$objdir"
# Remove this search path later
notinst_path="$notinst_path $abs_ladir"
fi
fi # $installed = yes
func_stripname 'lib' '.la' "$laname"
name=$func_stripname_result
# This library was specified with -dlpreopen.
if test "$pass" = dlpreopen; then
if test -z "$libdir" && test "$linkmode" = prog; then
func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'"
fi
# Prefer using a static library (so that no silly _DYNAMIC symbols
# are required to link).
if test -n "$old_library"; then
newdlprefiles="$newdlprefiles $dir/$old_library"
# Keep a list of preopened convenience libraries to check
# that they are being used correctly in the link pass.
test -z "$libdir" && \
dlpreconveniencelibs="$dlpreconveniencelibs $dir/$old_library"
# Otherwise, use the dlname, so that lt_dlopen finds it.
elif test -n "$dlname"; then
newdlprefiles="$newdlprefiles $dir/$dlname"
else
newdlprefiles="$newdlprefiles $dir/$linklib"
fi
fi # $pass = dlpreopen
if test -z "$libdir"; then
# Link the convenience library
if test "$linkmode" = lib; then
deplibs="$dir/$old_library $deplibs"
elif test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$dir/$old_library $compile_deplibs"
finalize_deplibs="$dir/$old_library $finalize_deplibs"
else
deplibs="$lib $deplibs" # used for prog,scan pass
fi
continue
fi
if test "$linkmode" = prog && test "$pass" != link; then
newlib_search_path="$newlib_search_path $ladir"
deplibs="$lib $deplibs"
linkalldeplibs=no
if test "$link_all_deplibs" != no || test -z "$library_names" ||
test "$build_libtool_libs" = no; then
linkalldeplibs=yes
fi
tmp_libs=
for deplib in $dependency_libs; do
case $deplib in
-L*) func_stripname '-L' '' "$deplib"
newlib_search_path="$newlib_search_path $func_stripname_result"
;;
esac
# Need to link against all dependency_libs?
if test "$linkalldeplibs" = yes; then
deplibs="$deplib $deplibs"
else
# Need to hardcode shared library paths
# or/and link against static libraries
newdependency_libs="$deplib $newdependency_libs"
fi
if $opt_duplicate_deps ; then
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
tmp_libs="$tmp_libs $deplib"
done # for deplib
continue
fi # $linkmode = prog...
if test "$linkmode,$pass" = "prog,link"; then
if test -n "$library_names" &&
{ { test "$prefer_static_libs" = no ||
test "$prefer_static_libs,$installed" = "built,yes"; } ||
test -z "$old_library"; }; then
# We need to hardcode the library path
if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then
# Make sure the rpath contains only unique directories.
case "$temp_rpath:" in
*"$absdir:"*) ;;
*) temp_rpath="$temp_rpath$absdir:" ;;
esac
fi
# Hardcode the library path.
# Skip directories that are in the system default run-time
# search path.
case " $sys_lib_dlsearch_path " in
*" $absdir "*) ;;
*)
case "$compile_rpath " in
*" $absdir "*) ;;
*) compile_rpath="$compile_rpath $absdir"
esac
;;
esac
case " $sys_lib_dlsearch_path " in
*" $libdir "*) ;;
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir"
esac
;;
esac
fi # $linkmode,$pass = prog,link...
if test "$alldeplibs" = yes &&
{ test "$deplibs_check_method" = pass_all ||
{ test "$build_libtool_libs" = yes &&
test -n "$library_names"; }; }; then
# We only need to search for static libraries
continue
fi
fi
link_static=no # Whether the deplib will be linked statically
use_static_libs=$prefer_static_libs
if test "$use_static_libs" = built && test "$installed" = yes; then
use_static_libs=no
fi
if test -n "$library_names" &&
{ test "$use_static_libs" = no || test -z "$old_library"; }; then
case $host in
*cygwin* | *mingw*)
# No point in relinking DLLs because paths are not encoded
notinst_deplibs="$notinst_deplibs $lib"
need_relink=no
;;
*)
if test "$installed" = no; then
notinst_deplibs="$notinst_deplibs $lib"
need_relink=yes
fi
;;
esac
# This is a shared library
# Warn about portability, can't link against -module's on some
# systems (darwin). Don't bleat about dlopened modules though!
dlopenmodule=""
for dlpremoduletest in $dlprefiles; do
if test "X$dlpremoduletest" = "X$lib"; then
dlopenmodule="$dlpremoduletest"
break
fi
done
if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then
$ECHO
if test "$linkmode" = prog; then
$ECHO "*** Warning: Linking the executable $output against the loadable module"
else
$ECHO "*** Warning: Linking the shared library $output against the loadable module"
fi
$ECHO "*** $linklib is not portable!"
fi
if test "$linkmode" = lib &&
test "$hardcode_into_libs" = yes; then
# Hardcode the library path.
# Skip directories that are in the system default run-time
# search path.
case " $sys_lib_dlsearch_path " in
*" $absdir "*) ;;
*)
case "$compile_rpath " in
*" $absdir "*) ;;
*) compile_rpath="$compile_rpath $absdir"
esac
;;
esac
case " $sys_lib_dlsearch_path " in
*" $libdir "*) ;;
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir"
esac
;;
esac
fi
if test -n "$old_archive_from_expsyms_cmds"; then
# figure out the soname
set dummy $library_names
shift
realname="$1"
shift
libname=`eval "\\$ECHO \"$libname_spec\""`
# use dlname if we got it. it's perfectly good, no?
if test -n "$dlname"; then
soname="$dlname"
elif test -n "$soname_spec"; then
# bleh windows
case $host in
*cygwin* | mingw*)
func_arith $current - $age
major=$func_arith_result
versuffix="-$major"
;;
esac
eval soname=\"$soname_spec\"
else
soname="$realname"
fi
# Make a new name for the extract_expsyms_cmds to use
soroot="$soname"
func_basename "$soroot"
soname="$func_basename_result"
func_stripname 'lib' '.dll' "$soname"
newlib=libimp-$func_stripname_result.a
# If the library has no export list, then create one now
if test -f "$output_objdir/$soname-def"; then :
else
func_verbose "extracting exported symbol list from \`$soname'"
func_execute_cmds "$extract_expsyms_cmds" 'exit $?'
fi
# Create $newlib
if test -f "$output_objdir/$newlib"; then :; else
func_verbose "generating import library for \`$soname'"
func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?'
fi
# make sure the library variables are pointing to the new library
dir=$output_objdir
linklib=$newlib
fi # test -n "$old_archive_from_expsyms_cmds"
if test "$linkmode" = prog || test "$mode" != relink; then
add_shlibpath=
add_dir=
add=
lib_linked=yes
case $hardcode_action in
immediate | unsupported)
if test "$hardcode_direct" = no; then
add="$dir/$linklib"
case $host in
*-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;;
*-*-sysv4*uw2*) add_dir="-L$dir" ;;
*-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \
*-*-unixware7*) add_dir="-L$dir" ;;
*-*-darwin* )
# if the lib is a (non-dlopened) module then we can not
# link against it, someone is ignoring the earlier warnings
if /usr/bin/file -L $add 2> /dev/null |
$GREP ": [^:]* bundle" >/dev/null ; then
if test "X$dlopenmodule" != "X$lib"; then
$ECHO "*** Warning: lib $linklib is a module, not a shared library"
if test -z "$old_library" ; then
$ECHO
$ECHO "*** And there doesn't seem to be a static archive available"
$ECHO "*** The link will probably fail, sorry"
else
add="$dir/$old_library"
fi
elif test -n "$old_library"; then
add="$dir/$old_library"
fi
fi
esac
elif test "$hardcode_minus_L" = no; then
case $host in
*-*-sunos*) add_shlibpath="$dir" ;;
esac
add_dir="-L$dir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = no; then
add_shlibpath="$dir"
add="-l$name"
else
lib_linked=no
fi
;;
relink)
if test "$hardcode_direct" = yes &&
test "$hardcode_direct_absolute" = no; then
add="$dir/$linklib"
elif test "$hardcode_minus_L" = yes; then
add_dir="-L$dir"
# Try looking first in the location we're being installed to.
if test -n "$inst_prefix_dir"; then
case $libdir in
[\\/]*)
add_dir="$add_dir -L$inst_prefix_dir$libdir"
;;
esac
fi
add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
add_shlibpath="$dir"
add="-l$name"
else
lib_linked=no
fi
;;
*) lib_linked=no ;;
esac
if test "$lib_linked" != yes; then
func_fatal_configuration "unsupported hardcode properties"
fi
if test -n "$add_shlibpath"; then
case :$compile_shlibpath: in
*":$add_shlibpath:"*) ;;
*) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;;
esac
fi
if test "$linkmode" = prog; then
test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
test -n "$add" && compile_deplibs="$add $compile_deplibs"
else
test -n "$add_dir" && deplibs="$add_dir $deplibs"
test -n "$add" && deplibs="$add $deplibs"
if test "$hardcode_direct" != yes &&
test "$hardcode_minus_L" != yes &&
test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
*) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
fi
fi
fi
if test "$linkmode" = prog || test "$mode" = relink; then
add_shlibpath=
add_dir=
add=
# Finalize command for both is simple: just hardcode it.
if test "$hardcode_direct" = yes &&
test "$hardcode_direct_absolute" = no; then
add="$libdir/$linklib"
elif test "$hardcode_minus_L" = yes; then
add_dir="-L$libdir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
*) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
add="-l$name"
elif test "$hardcode_automatic" = yes; then
if test -n "$inst_prefix_dir" &&
test -f "$inst_prefix_dir$libdir/$linklib" ; then
add="$inst_prefix_dir$libdir/$linklib"
else
add="$libdir/$linklib"
fi
else
# We cannot seem to hardcode it, guess we'll fake it.
add_dir="-L$libdir"
# Try looking first in the location we're being installed to.
if test -n "$inst_prefix_dir"; then
case $libdir in
[\\/]*)
add_dir="$add_dir -L$inst_prefix_dir$libdir"
;;
esac
fi
add="-l$name"
fi
if test "$linkmode" = prog; then
test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
else
test -n "$add_dir" && deplibs="$add_dir $deplibs"
test -n "$add" && deplibs="$add $deplibs"
fi
fi
elif test "$linkmode" = prog; then
# Here we assume that one of hardcode_direct or hardcode_minus_L
# is not unsupported. This is valid on all known static and
# shared platforms.
if test "$hardcode_direct" != unsupported; then
test -n "$old_library" && linklib="$old_library"
compile_deplibs="$dir/$linklib $compile_deplibs"
finalize_deplibs="$dir/$linklib $finalize_deplibs"
else
compile_deplibs="-l$name -L$dir $compile_deplibs"
finalize_deplibs="-l$name -L$dir $finalize_deplibs"
fi
elif test "$build_libtool_libs" = yes; then
# Not a shared library
if test "$deplibs_check_method" != pass_all; then
# We're trying link a shared library against a static one
# but the system doesn't support it.
# Just print a warning and add the library to dependency_libs so
# that the program can be linked against the static library.
$ECHO
$ECHO "*** Warning: This system can not link to static lib archive $lib."
$ECHO "*** I have the capability to make that library automatically link in when"
$ECHO "*** you link to this library. But I can only do this if you have a"
$ECHO "*** shared version of the library, which you do not appear to have."
if test "$module" = yes; then
$ECHO "*** But as you try to build a module library, libtool will still create "
$ECHO "*** a static module, that should work as long as the dlopening application"
$ECHO "*** is linked with the -dlopen flag to resolve symbols at runtime."
if test -z "$global_symbol_pipe"; then
$ECHO
$ECHO "*** However, this would only work if libtool was able to extract symbol"
$ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could"
$ECHO "*** not find such a program. So, this module is probably useless."
$ECHO "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
fi
else
deplibs="$dir/$old_library $deplibs"
link_static=yes
fi
fi # link shared/static library?
if test "$linkmode" = lib; then
if test -n "$dependency_libs" &&
{ test "$hardcode_into_libs" != yes ||
test "$build_old_libs" = yes ||
test "$link_static" = yes; }; then
# Extract -R from dependency_libs
temp_deplibs=
for libdir in $dependency_libs; do
case $libdir in
-R*) func_stripname '-R' '' "$libdir"
temp_xrpath=$func_stripname_result
case " $xrpath " in
*" $temp_xrpath "*) ;;
*) xrpath="$xrpath $temp_xrpath";;
esac;;
*) temp_deplibs="$temp_deplibs $libdir";;
esac
done
dependency_libs="$temp_deplibs"
fi
newlib_search_path="$newlib_search_path $absdir"
# Link against this library
test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
# ... and its dependency_libs
tmp_libs=
for deplib in $dependency_libs; do
newdependency_libs="$deplib $newdependency_libs"
if $opt_duplicate_deps ; then
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
tmp_libs="$tmp_libs $deplib"
done
if test "$link_all_deplibs" != no; then
# Add the search paths of all dependency libraries
for deplib in $dependency_libs; do
case $deplib in
-L*) path="$deplib" ;;
*.la)
func_dirname "$deplib" "" "."
dir="$func_dirname_result"
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
*)
absdir=`cd "$dir" && pwd`
if test -z "$absdir"; then
func_warning "cannot determine absolute directory name of \`$dir'"
absdir="$dir"
fi
;;
esac
if $GREP "^installed=no" $deplib > /dev/null; then
case $host in
*-*-darwin*)
depdepl=
eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib`
if test -n "$deplibrary_names" ; then
for tmp in $deplibrary_names ; do
depdepl=$tmp
done
if test -f "$absdir/$objdir/$depdepl" ; then
depdepl="$absdir/$objdir/$depdepl"
darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'`
if test -z "$darwin_install_name"; then
darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'`
fi
compiler_flags="$compiler_flags ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}"
linker_flags="$linker_flags -dylib_file ${darwin_install_name}:${depdepl}"
path=
fi
fi
;;
*)
path="-L$absdir/$objdir"
;;
esac
else
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
test -z "$libdir" && \
func_fatal_error "\`$deplib' is not a valid libtool archive"
test "$absdir" != "$libdir" && \
func_warning "\`$deplib' seems to be moved"
path="-L$absdir"
fi
;;
esac
case " $deplibs " in
*" $path "*) ;;
*) deplibs="$path $deplibs" ;;
esac
done
fi # link_all_deplibs != no
fi # linkmode = lib
done # for deplib in $libs
if test "$pass" = link; then
if test "$linkmode" = "prog"; then
compile_deplibs="$new_inherited_linker_flags $compile_deplibs"
finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs"
else
compiler_flags="$compiler_flags "`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
fi
fi
dependency_libs="$newdependency_libs"
if test "$pass" = dlpreopen; then
# Link the dlpreopened libraries before other libraries
for deplib in $save_deplibs; do
deplibs="$deplib $deplibs"
done
fi
if test "$pass" != dlopen; then
if test "$pass" != conv; then
# Make sure lib_search_path contains only unique directories.
lib_search_path=
for dir in $newlib_search_path; do
case "$lib_search_path " in
*" $dir "*) ;;
*) lib_search_path="$lib_search_path $dir" ;;
esac
done
newlib_search_path=
fi
if test "$linkmode,$pass" != "prog,link"; then
vars="deplibs"
else
vars="compile_deplibs finalize_deplibs"
fi
for var in $vars dependency_libs; do
# Add libraries to $var in reverse order
eval tmp_libs=\"\$$var\"
new_libs=
for deplib in $tmp_libs; do
# FIXME: Pedantically, this is the right thing to do, so
# that some nasty dependency loop isn't accidentally
# broken:
#new_libs="$deplib $new_libs"
# Pragmatically, this seems to cause very few problems in
# practice:
case $deplib in
-L*) new_libs="$deplib $new_libs" ;;
-R*) ;;
*)
# And here is the reason: when a library appears more
# than once as an explicit dependence of a library, or
# is implicitly linked in more than once by the
# compiler, it is considered special, and multiple
# occurrences thereof are not removed. Compare this
# with having the same library being listed as a
# dependency of multiple other libraries: in this case,
# we know (pedantically, we assume) the library does not
# need to be listed more than once, so we keep only the
# last copy. This is not always right, but it is rare
# enough that we require users that really mean to play
# such unportable linking tricks to link the library
# using -Wl,-lname, so that libtool does not consider it
# for duplicate removal.
case " $specialdeplibs " in
*" $deplib "*) new_libs="$deplib $new_libs" ;;
*)
case " $new_libs " in
*" $deplib "*) ;;
*) new_libs="$deplib $new_libs" ;;
esac
;;
esac
;;
esac
done
tmp_libs=
for deplib in $new_libs; do
case $deplib in
-L*)
case " $tmp_libs " in
*" $deplib "*) ;;
*) tmp_libs="$tmp_libs $deplib" ;;
esac
;;
*) tmp_libs="$tmp_libs $deplib" ;;
esac
done
eval $var=\"$tmp_libs\"
done # for var
fi
# Last step: remove runtime libs from dependency_libs
# (they stay in deplibs)
tmp_libs=
for i in $dependency_libs ; do
case " $predeps $postdeps $compiler_lib_search_path " in
*" $i "*)
i=""
;;
esac
if test -n "$i" ; then
tmp_libs="$tmp_libs $i"
fi
done
dependency_libs=$tmp_libs
done # for pass
if test "$linkmode" = prog; then
dlfiles="$newdlfiles"
fi
if test "$linkmode" = prog || test "$linkmode" = lib; then
dlprefiles="$newdlprefiles"
fi
case $linkmode in
oldlib)
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
func_warning "\`-dlopen' is ignored for archives"
fi
case " $deplibs" in
*\ -l* | *\ -L*)
func_warning "\`-l' and \`-L' are ignored for archives" ;;
esac
test -n "$rpath" && \
func_warning "\`-rpath' is ignored for archives"
test -n "$xrpath" && \
func_warning "\`-R' is ignored for archives"
test -n "$vinfo" && \
func_warning "\`-version-info/-version-number' is ignored for archives"
test -n "$release" && \
func_warning "\`-release' is ignored for archives"
test -n "$export_symbols$export_symbols_regex" && \
func_warning "\`-export-symbols' is ignored for archives"
# Now set the variables for building old libraries.
build_libtool_libs=no
oldlibs="$output"
objs="$objs$old_deplibs"
;;
lib)
# Make sure we only generate libraries of the form `libNAME.la'.
case $outputname in
lib*)
func_stripname 'lib' '.la' "$outputname"
name=$func_stripname_result
eval shared_ext=\"$shrext_cmds\"
eval libname=\"$libname_spec\"
;;
*)
test "$module" = no && \
func_fatal_help "libtool library \`$output' must begin with \`lib'"
if test "$need_lib_prefix" != no; then
# Add the "lib" prefix for modules if required
func_stripname '' '.la' "$outputname"
name=$func_stripname_result
eval shared_ext=\"$shrext_cmds\"
eval libname=\"$libname_spec\"
else
func_stripname '' '.la' "$outputname"
libname=$func_stripname_result
fi
;;
esac
if test -n "$objs"; then
if test "$deplibs_check_method" != pass_all; then
func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs"
else
$ECHO
$ECHO "*** Warning: Linking the shared library $output against the non-libtool"
$ECHO "*** objects $objs is not portable!"
libobjs="$libobjs $objs"
fi
fi
test "$dlself" != no && \
func_warning "\`-dlopen self' is ignored for libtool libraries"
set dummy $rpath
shift
test "$#" -gt 1 && \
func_warning "ignoring multiple \`-rpath's for a libtool library"
install_libdir="$1"
oldlibs=
if test -z "$rpath"; then
if test "$build_libtool_libs" = yes; then
# Building a libtool convenience library.
# Some compilers have problems with a `.al' extension so
# convenience libraries should have the same extension an
# archive normally would.
oldlibs="$output_objdir/$libname.$libext $oldlibs"
build_libtool_libs=convenience
build_old_libs=yes
fi
test -n "$vinfo" && \
func_warning "\`-version-info/-version-number' is ignored for convenience libraries"
test -n "$release" && \
func_warning "\`-release' is ignored for convenience libraries"
else
# Parse the version information argument.
save_ifs="$IFS"; IFS=':'
set dummy $vinfo 0 0 0
shift
IFS="$save_ifs"
test -n "$7" && \
func_fatal_help "too many parameters to \`-version-info'"
# convert absolute version numbers to libtool ages
# this retains compatibility with .la files and attempts
# to make the code below a bit more comprehensible
case $vinfo_number in
yes)
number_major="$1"
number_minor="$2"
number_revision="$3"
#
# There are really only two kinds -- those that
# use the current revision as the major version
# and those that subtract age and use age as
# a minor version. But, then there is irix
# which has an extra 1 added just for fun
#
case $version_type in
darwin|linux|osf|windows|none)
func_arith $number_major + $number_minor
current=$func_arith_result
age="$number_minor"
revision="$number_revision"
;;
freebsd-aout|freebsd-elf|sunos)
current="$number_major"
revision="$number_minor"
age="0"
;;
irix|nonstopux)
func_arith $number_major + $number_minor
current=$func_arith_result
age="$number_minor"
revision="$number_minor"
lt_irix_increment=no
;;
esac
;;
no)
current="$1"
revision="$2"
age="$3"
;;
esac
# Check that each of the things are valid numbers.
case $current in
0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
*)
func_error "CURRENT \`$current' must be a nonnegative integer"
func_fatal_error "\`$vinfo' is not valid version information"
;;
esac
case $revision in
0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
*)
func_error "REVISION \`$revision' must be a nonnegative integer"
func_fatal_error "\`$vinfo' is not valid version information"
;;
esac
case $age in
0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
*)
func_error "AGE \`$age' must be a nonnegative integer"
func_fatal_error "\`$vinfo' is not valid version information"
;;
esac
if test "$age" -gt "$current"; then
func_error "AGE \`$age' is greater than the current interface number \`$current'"
func_fatal_error "\`$vinfo' is not valid version information"
fi
# Calculate the version variables.
major=
versuffix=
verstring=
case $version_type in
none) ;;
darwin)
# Like Linux, but with the current version available in
# verstring for coding it into the library header
func_arith $current - $age
major=.$func_arith_result
versuffix="$major.$age.$revision"
# Darwin ld doesn't like 0 for these options...
func_arith $current + 1
minor_current=$func_arith_result
xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision"
verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
;;
freebsd-aout)
major=".$current"
versuffix=".$current.$revision";
;;
freebsd-elf)
major=".$current"
versuffix=".$current"
;;
irix | nonstopux)
if test "X$lt_irix_increment" = "Xno"; then
func_arith $current - $age
else
func_arith $current - $age + 1
fi
major=$func_arith_result
case $version_type in
nonstopux) verstring_prefix=nonstopux ;;
*) verstring_prefix=sgi ;;
esac
verstring="$verstring_prefix$major.$revision"
# Add in all the interfaces that we are compatible with.
loop=$revision
while test "$loop" -ne 0; do
func_arith $revision - $loop
iface=$func_arith_result
func_arith $loop - 1
loop=$func_arith_result
verstring="$verstring_prefix$major.$iface:$verstring"
done
# Before this point, $major must not contain `.'.
major=.$major
versuffix="$major.$revision"
;;
linux)
func_arith $current - $age
major=.$func_arith_result
versuffix="$major.$age.$revision"
;;
osf)
func_arith $current - $age
major=.$func_arith_result
versuffix=".$current.$age.$revision"
verstring="$current.$age.$revision"
# Add in all the interfaces that we are compatible with.
loop=$age
while test "$loop" -ne 0; do
func_arith $current - $loop
iface=$func_arith_result
func_arith $loop - 1
loop=$func_arith_result
verstring="$verstring:${iface}.0"
done
# Make executables depend on our current version.
verstring="$verstring:${current}.0"
;;
qnx)
major=".$current"
versuffix=".$current"
;;
sunos)
major=".$current"
versuffix=".$current.$revision"
;;
windows)
# Use '-' rather than '.', since we only want one
# extension on DOS 8.3 filesystems.
func_arith $current - $age
major=$func_arith_result
versuffix="-$major"
;;
*)
func_fatal_configuration "unknown library version type \`$version_type'"
;;
esac
# Clear the version info if we defaulted, and they specified a release.
if test -z "$vinfo" && test -n "$release"; then
major=
case $version_type in
darwin)
# we can't check for "0.0" in archive_cmds due to quoting
# problems, so we reset it completely
verstring=
;;
*)
verstring="0.0"
;;
esac
if test "$need_version" = no; then
versuffix=
else
versuffix=".0.0"
fi
fi
# Remove version info from name if versioning should be avoided
if test "$avoid_version" = yes && test "$need_version" = no; then
major=
versuffix=
verstring=""
fi
# Check to see if the archive will have undefined symbols.
if test "$allow_undefined" = yes; then
if test "$allow_undefined_flag" = unsupported; then
func_warning "undefined symbols not allowed in $host shared libraries"
build_libtool_libs=no
build_old_libs=yes
fi
else
# Don't allow undefined symbols.
allow_undefined_flag="$no_undefined_flag"
fi
fi
func_generate_dlsyms "$libname" "$libname" "yes"
libobjs="$libobjs $symfileobj"
test "X$libobjs" = "X " && libobjs=
if test "$mode" != relink; then
# Remove our outputs, but don't remove object files since they
# may have been created when compiling PIC objects.
removelist=
tempremovelist=`$ECHO "$output_objdir/*"`
for p in $tempremovelist; do
case $p in
*.$objext)
;;
$output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*)
if test "X$precious_files_regex" != "X"; then
if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1
then
continue
fi
fi
removelist="$removelist $p"
;;
*) ;;
esac
done
test -n "$removelist" && \
func_show_eval "${RM}r \$removelist"
fi
# Now set the variables for building old libraries.
if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
oldlibs="$oldlibs $output_objdir/$libname.$libext"
# Transform .lo files to .o files.
oldobjs="$objs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP`
fi
# Eliminate all temporary directories.
#for path in $notinst_path; do
# lib_search_path=`$ECHO "X$lib_search_path " | $Xsed -e "s% $path % %g"`
# deplibs=`$ECHO "X$deplibs " | $Xsed -e "s% -L$path % %g"`
# dependency_libs=`$ECHO "X$dependency_libs " | $Xsed -e "s% -L$path % %g"`
#done
if test -n "$xrpath"; then
# If the user specified any rpath flags, then add them.
temp_xrpath=
for libdir in $xrpath; do
temp_xrpath="$temp_xrpath -R$libdir"
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
esac
done
if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then
dependency_libs="$temp_xrpath $dependency_libs"
fi
fi
# Make sure dlfiles contains only unique files that won't be dlpreopened
old_dlfiles="$dlfiles"
dlfiles=
for lib in $old_dlfiles; do
case " $dlprefiles $dlfiles " in
*" $lib "*) ;;
*) dlfiles="$dlfiles $lib" ;;
esac
done
# Make sure dlprefiles contains only unique files
old_dlprefiles="$dlprefiles"
dlprefiles=
for lib in $old_dlprefiles; do
case "$dlprefiles " in
*" $lib "*) ;;
*) dlprefiles="$dlprefiles $lib" ;;
esac
done
if test "$build_libtool_libs" = yes; then
if test -n "$rpath"; then
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*)
# these systems don't actually have a c library (as such)!
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C library is in the System framework
deplibs="$deplibs System.ltframework"
;;
*-*-netbsd*)
# Don't link with libc until the a.out ld.so is fixed.
;;
*-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
# Do not include libc due to us having libc/libc_r.
;;
*-*-sco3.2v5* | *-*-sco5v6*)
# Causes problems with __ctype
;;
*-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*)
# Compiler inserts libc in the correct place for threads to work
;;
*)
# Add libc to deplibs on all other systems if necessary.
if test "$build_libtool_need_lc" = "yes"; then
deplibs="$deplibs -lc"
fi
;;
esac
fi
# Transform deplibs into only deplibs that can be linked in shared.
name_save=$name
libname_save=$libname
release_save=$release
versuffix_save=$versuffix
major_save=$major
# I'm not sure if I'm treating the release correctly. I think
# release should show up in the -l (ie -lgmp5) so we don't want to
# add it in twice. Is that correct?
release=""
versuffix=""
major=""
newdeplibs=
droppeddeps=no
case $deplibs_check_method in
pass_all)
# Don't check for shared/static. Everything works.
# This might be a little naive. We might want to check
# whether the library exists or not. But this is on
# osf3 & osf4 and I'm not really sure... Just
# implementing what was already the behavior.
newdeplibs=$deplibs
;;
test_compile)
# This code stresses the "libraries are programs" paradigm to its
# limits. Maybe even breaks it. We compile a program, linking it
# against the deplibs as a proxy for the library. Then we can check
# whether they linked in statically or dynamically with ldd.
$opt_dry_run || $RM conftest.c
cat > conftest.c <<EOF
int main() { return 0; }
EOF
$opt_dry_run || $RM conftest
if $LTCC $LTCFLAGS -o conftest conftest.c $deplibs; then
ldd_output=`ldd conftest`
for i in $deplibs; do
case $i in
-l*)
func_stripname -l '' "$i"
name=$func_stripname_result
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $i "*)
newdeplibs="$newdeplibs $i"
i=""
;;
esac
fi
if test -n "$i" ; then
libname=`eval "\\$ECHO \"$libname_spec\""`
deplib_matches=`eval "\\$ECHO \"$library_names_spec\""`
set dummy $deplib_matches; shift
deplib_match=$1
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
newdeplibs="$newdeplibs $i"
else
droppeddeps=yes
$ECHO
$ECHO "*** Warning: dynamic linker does not accept needed library $i."
$ECHO "*** I have the capability to make that library automatically link in when"
$ECHO "*** you link to this library. But I can only do this if you have a"
$ECHO "*** shared version of the library, which I believe you do not have"
$ECHO "*** because a test_compile did reveal that the linker did not use it for"
$ECHO "*** its dynamic dependency list that programs get resolved with at runtime."
fi
fi
;;
*)
newdeplibs="$newdeplibs $i"
;;
esac
done
else
# Error occurred in the first compile. Let's try to salvage
# the situation: Compile a separate program for each library.
for i in $deplibs; do
case $i in
-l*)
func_stripname -l '' "$i"
name=$func_stripname_result
$opt_dry_run || $RM conftest
if $LTCC $LTCFLAGS -o conftest conftest.c $i; then
ldd_output=`ldd conftest`
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $i "*)
newdeplibs="$newdeplibs $i"
i=""
;;
esac
fi
if test -n "$i" ; then
libname=`eval "\\$ECHO \"$libname_spec\""`
deplib_matches=`eval "\\$ECHO \"$library_names_spec\""`
set dummy $deplib_matches; shift
deplib_match=$1
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
newdeplibs="$newdeplibs $i"
else
droppeddeps=yes
$ECHO
$ECHO "*** Warning: dynamic linker does not accept needed library $i."
$ECHO "*** I have the capability to make that library automatically link in when"
$ECHO "*** you link to this library. But I can only do this if you have a"
$ECHO "*** shared version of the library, which you do not appear to have"
$ECHO "*** because a test_compile did reveal that the linker did not use this one"
$ECHO "*** as a dynamic dependency that programs can get resolved with at runtime."
fi
fi
else
droppeddeps=yes
$ECHO
$ECHO "*** Warning! Library $i is needed by this library but I was not able to"
$ECHO "*** make it link in! You will probably need to install it or some"
$ECHO "*** library that it depends on before this library will be fully"
$ECHO "*** functional. Installing it before continuing would be even better."
fi
;;
*)
newdeplibs="$newdeplibs $i"
;;
esac
done
fi
;;
file_magic*)
set dummy $deplibs_check_method; shift
file_magic_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
for a_deplib in $deplibs; do
case $a_deplib in
-l*)
func_stripname -l '' "$a_deplib"
name=$func_stripname_result
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $a_deplib "*)
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
;;
esac
fi
if test -n "$a_deplib" ; then
libname=`eval "\\$ECHO \"$libname_spec\""`
for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
# Follow soft links.
if ls -lLd "$potent_lib" 2>/dev/null |
$GREP " -> " >/dev/null; then
continue
fi
# The statement above tries to avoid entering an
# endless loop below, in case of cyclic links.
# We might still enter an endless loop, since a link
# loop can be closed while we follow links,
# but so what?
potlib="$potent_lib"
while test -h "$potlib" 2>/dev/null; do
potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'`
case $potliblink in
[\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
*) potlib=`$ECHO "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";;
esac
done
if eval $file_magic_cmd \"\$potlib\" 2>/dev/null |
$SED -e 10q |
$EGREP "$file_magic_regex" > /dev/null; then
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
break 2
fi
done
done
fi
if test -n "$a_deplib" ; then
droppeddeps=yes
$ECHO
$ECHO "*** Warning: linker path does not have real file for library $a_deplib."
$ECHO "*** I have the capability to make that library automatically link in when"
$ECHO "*** you link to this library. But I can only do this if you have a"
$ECHO "*** shared version of the library, which you do not appear to have"
$ECHO "*** because I did check the linker path looking for a file starting"
if test -z "$potlib" ; then
$ECHO "*** with $libname but no candidates were found. (...for file magic test)"
else
$ECHO "*** with $libname and none of the candidates passed a file format test"
$ECHO "*** using a file magic. Last file checked: $potlib"
fi
fi
;;
*)
# Add a -L argument.
newdeplibs="$newdeplibs $a_deplib"
;;
esac
done # Gone through all deplibs.
;;
match_pattern*)
set dummy $deplibs_check_method; shift
match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
for a_deplib in $deplibs; do
case $a_deplib in
-l*)
func_stripname -l '' "$a_deplib"
name=$func_stripname_result
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $a_deplib "*)
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
;;
esac
fi
if test -n "$a_deplib" ; then
libname=`eval "\\$ECHO \"$libname_spec\""`
for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
potlib="$potent_lib" # see symlink-check above in file_magic test
if eval "\$ECHO \"X$potent_lib\"" 2>/dev/null | $Xsed -e 10q | \
$EGREP "$match_pattern_regex" > /dev/null; then
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
break 2
fi
done
done
fi
if test -n "$a_deplib" ; then
droppeddeps=yes
$ECHO
$ECHO "*** Warning: linker path does not have real file for library $a_deplib."
$ECHO "*** I have the capability to make that library automatically link in when"
$ECHO "*** you link to this library. But I can only do this if you have a"
$ECHO "*** shared version of the library, which you do not appear to have"
$ECHO "*** because I did check the linker path looking for a file starting"
if test -z "$potlib" ; then
$ECHO "*** with $libname but no candidates were found. (...for regex pattern test)"
else
$ECHO "*** with $libname and none of the candidates passed a file format test"
$ECHO "*** using a regex pattern. Last file checked: $potlib"
fi
fi
;;
*)
# Add a -L argument.
newdeplibs="$newdeplibs $a_deplib"
;;
esac
done # Gone through all deplibs.
;;
none | unknown | *)
newdeplibs=""
tmp_deplibs=`$ECHO "X $deplibs" | $Xsed \
-e 's/ -lc$//' -e 's/ -[LR][^ ]*//g'`
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
for i in $predeps $postdeps ; do
# can't use Xsed below, because $i might contain '/'
tmp_deplibs=`$ECHO "X $tmp_deplibs" | $Xsed -e "s,$i,,"`
done
fi
if $ECHO "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' |
$GREP . >/dev/null; then
$ECHO
if test "X$deplibs_check_method" = "Xnone"; then
$ECHO "*** Warning: inter-library dependencies are not supported in this platform."
else
$ECHO "*** Warning: inter-library dependencies are not known to be supported."
fi
$ECHO "*** All declared inter-library dependencies are being dropped."
droppeddeps=yes
fi
;;
esac
versuffix=$versuffix_save
major=$major_save
release=$release_save
libname=$libname_save
name=$name_save
case $host in
*-*-rhapsody* | *-*-darwin1.[012])
# On Rhapsody replace the C library with the System framework
newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's/ -lc / System.ltframework /'`
;;
esac
if test "$droppeddeps" = yes; then
if test "$module" = yes; then
$ECHO
$ECHO "*** Warning: libtool could not satisfy all declared inter-library"
$ECHO "*** dependencies of module $libname. Therefore, libtool will create"
$ECHO "*** a static module, that should work as long as the dlopening"
$ECHO "*** application is linked with the -dlopen flag."
if test -z "$global_symbol_pipe"; then
$ECHO
$ECHO "*** However, this would only work if libtool was able to extract symbol"
$ECHO "*** lists from a program, using \`nm' or equivalent, but libtool could"
$ECHO "*** not find such a program. So, this module is probably useless."
$ECHO "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
else
$ECHO "*** The inter-library dependencies that have been dropped here will be"
$ECHO "*** automatically added whenever a program is linked with this library"
$ECHO "*** or is declared to -dlopen it."
if test "$allow_undefined" = no; then
$ECHO
$ECHO "*** Since this library must not contain undefined symbols,"
$ECHO "*** because either the platform does not support them or"
$ECHO "*** it was explicitly requested with -no-undefined,"
$ECHO "*** libtool will only create a static version of it."
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
fi
fi
fi
# Done checking deplibs!
deplibs=$newdeplibs
fi
# Time to change all our "foo.ltframework" stuff back to "-framework foo"
case $host in
*-*-darwin*)
newdeplibs=`$ECHO "X $newdeplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
new_inherited_linker_flags=`$ECHO "X $new_inherited_linker_flags" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
deplibs=`$ECHO "X $deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
;;
esac
# move library search paths that coincide with paths to not yet
# installed libraries to the beginning of the library search list
new_libs=
for path in $notinst_path; do
case " $new_libs " in
*" -L$path/$objdir "*) ;;
*)
case " $deplibs " in
*" -L$path/$objdir "*)
new_libs="$new_libs -L$path/$objdir" ;;
esac
;;
esac
done
for deplib in $deplibs; do
case $deplib in
-L*)
case " $new_libs " in
*" $deplib "*) ;;
*) new_libs="$new_libs $deplib" ;;
esac
;;
*) new_libs="$new_libs $deplib" ;;
esac
done
deplibs="$new_libs"
# All the library-specific variables (install_libdir is set above).
library_names=
old_library=
dlname=
# Test again, we may have decided not to build it any more
if test "$build_libtool_libs" = yes; then
if test "$hardcode_into_libs" = yes; then
# Hardcode the library paths
hardcode_libdirs=
dep_rpath=
rpath="$finalize_rpath"
test "$mode" != relink && rpath="$compile_rpath$rpath"
for libdir in $rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
dep_rpath="$dep_rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
if test -n "$hardcode_libdir_flag_spec_ld"; then
eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\"
else
eval dep_rpath=\"$hardcode_libdir_flag_spec\"
fi
fi
if test -n "$runpath_var" && test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
rpath="$rpath$dir:"
done
eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
fi
test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
fi
shlibpath="$finalize_shlibpath"
test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
if test -n "$shlibpath"; then
eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
fi
# Get the real and link names of the library.
eval shared_ext=\"$shrext_cmds\"
eval library_names=\"$library_names_spec\"
set dummy $library_names
shift
realname="$1"
shift
if test -n "$soname_spec"; then
eval soname=\"$soname_spec\"
else
soname="$realname"
fi
if test -z "$dlname"; then
dlname=$soname
fi
lib="$output_objdir/$realname"
linknames=
for link
do
linknames="$linknames $link"
done
# Use standard objects if they are pic
test -z "$pic_flag" && libobjs=`$ECHO "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
test "X$libobjs" = "X " && libobjs=
delfiles=
if test -n "$export_symbols" && test -n "$include_expsyms"; then
$opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp"
export_symbols="$output_objdir/$libname.uexp"
delfiles="$delfiles $export_symbols"
fi
orig_export_symbols=
case $host_os in
cygwin* | mingw*)
if test -n "$export_symbols" && test -z "$export_symbols_regex"; then
# exporting using user supplied symfile
if test "x`$SED 1q $export_symbols`" != xEXPORTS; then
# and it's NOT already a .def file. Must figure out
# which of the given symbols are data symbols and tag
# them as such. So, trigger use of export_symbols_cmds.
# export_symbols gets reassigned inside the "prepare
# the list of exported symbols" if statement, so the
# include_expsyms logic still works.
orig_export_symbols="$export_symbols"
export_symbols=
always_export_symbols=yes
fi
fi
;;
esac
# Prepare the list of exported symbols
if test -z "$export_symbols"; then
if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then
func_verbose "generating symbol list for \`$libname.la'"
export_symbols="$output_objdir/$libname.exp"
$opt_dry_run || $RM $export_symbols
cmds=$export_symbols_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
func_len " $cmd"
len=$func_len_result
if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
func_show_eval "$cmd" 'exit $?'
skipped_export=false
else
# The command line is too long to execute in one step.
func_verbose "using reloadable object file for export list..."
skipped_export=:
# Break out early, otherwise skipped_export may be
# set to false by a later but shorter cmd.
break
fi
done
IFS="$save_ifs"
if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then
func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
fi
fi
fi
if test -n "$export_symbols" && test -n "$include_expsyms"; then
tmp_export_symbols="$export_symbols"
test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols"
$opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"'
fi
if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then
# The given exports_symbols file has to be filtered, so filter it.
func_verbose "filter symbol list for \`$libname.la' to tag DATA exports"
# FIXME: $output_objdir/$libname.filter potentially contains lots of
# 's' commands which not all seds can handle. GNU sed should be fine
# though. Also, the filter scales superlinearly with the number of
# global variables. join(1) would be nice here, but unfortunately
# isn't a blessed tool.
$opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
delfiles="$delfiles $export_symbols $output_objdir/$libname.filter"
export_symbols=$output_objdir/$libname.def
$opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
fi
tmp_deplibs=
for test_deplib in $deplibs; do
case " $convenience " in
*" $test_deplib "*) ;;
*)
tmp_deplibs="$tmp_deplibs $test_deplib"
;;
esac
done
deplibs="$tmp_deplibs"
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec" &&
test "$compiler_needs_object" = yes &&
test -z "$libobjs"; then
# extract the archives, so we have objects to list.
# TODO: could optimize this to just extract one archive.
whole_archive_flag_spec=
fi
if test -n "$whole_archive_flag_spec"; then
save_libobjs=$libobjs
eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
test "X$libobjs" = "X " && libobjs=
else
gentop="$output_objdir/${outputname}x"
generated="$generated $gentop"
func_extract_archives $gentop $convenience
libobjs="$libobjs $func_extract_archives_result"
test "X$libobjs" = "X " && libobjs=
fi
fi
if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
eval flag=\"$thread_safe_flag_spec\"
linker_flags="$linker_flags $flag"
fi
# Make a backup of the uninstalled library when relinking
if test "$mode" = relink; then
$opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $?
fi
# Do each of the archive commands.
if test "$module" = yes && test -n "$module_cmds" ; then
if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
eval test_cmds=\"$module_expsym_cmds\"
cmds=$module_expsym_cmds
else
eval test_cmds=\"$module_cmds\"
cmds=$module_cmds
fi
else
if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
eval test_cmds=\"$archive_expsym_cmds\"
cmds=$archive_expsym_cmds
else
eval test_cmds=\"$archive_cmds\"
cmds=$archive_cmds
fi
fi
if test "X$skipped_export" != "X:" &&
func_len " $test_cmds" &&
len=$func_len_result &&
test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
:
else
# The command line is too long to link in one step, link piecewise
# or, if using GNU ld and skipped_export is not :, use a linker
# script.
# Save the value of $output and $libobjs because we want to
# use them later. If we have whole_archive_flag_spec, we
# want to use save_libobjs as it was before
# whole_archive_flag_spec was expanded, because we can't
# assume the linker understands whole_archive_flag_spec.
# This may have to be revisited, in case too many
# convenience libraries get linked in and end up exceeding
# the spec.
if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then
save_libobjs=$libobjs
fi
save_output=$output
output_la=`$ECHO "X$output" | $Xsed -e "$basename"`
# Clear the reloadable object creation command queue and
# initialize k to one.
test_cmds=
concat_cmds=
objlist=
last_robj=
k=1
if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then
output=${output_objdir}/${output_la}.lnkscript
func_verbose "creating GNU ld script: $output"
$ECHO 'INPUT (' > $output
for obj in $save_libobjs
do
$ECHO "$obj" >> $output
done
$ECHO ')' >> $output
delfiles="$delfiles $output"
elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then
output=${output_objdir}/${output_la}.lnk
func_verbose "creating linker input file list: $output"
: > $output
set x $save_libobjs
shift
firstobj=
if test "$compiler_needs_object" = yes; then
firstobj="$1 "
shift
fi
for obj
do
$ECHO "$obj" >> $output
done
delfiles="$delfiles $output"
output=$firstobj\"$file_list_spec$output\"
else
if test -n "$save_libobjs"; then
func_verbose "creating reloadable object files..."
output=$output_objdir/$output_la-${k}.$objext
eval test_cmds=\"$reload_cmds\"
func_len " $test_cmds"
len0=$func_len_result
len=$len0
# Loop over the list of objects to be linked.
for obj in $save_libobjs
do
func_len " $obj"
func_arith $len + $func_len_result
len=$func_arith_result
if test "X$objlist" = X ||
test "$len" -lt "$max_cmd_len"; then
func_append objlist " $obj"
else
# The command $test_cmds is almost too long, add a
# command to the queue.
if test "$k" -eq 1 ; then
# The first file doesn't have a previous command to add.
eval concat_cmds=\"$reload_cmds $objlist $last_robj\"
else
# All subsequent reloadable object files will link in
# the last one created.
eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj~\$RM $last_robj\"
fi
last_robj=$output_objdir/$output_la-${k}.$objext
func_arith $k + 1
k=$func_arith_result
output=$output_objdir/$output_la-${k}.$objext
objlist=$obj
func_len " $last_robj"
func_arith $len0 + $func_len_result
len=$func_arith_result
fi
done
# Handle the remaining objects by creating one last
# reloadable object file. All subsequent reloadable object
# files will link in the last one created.
test -z "$concat_cmds" || concat_cmds=$concat_cmds~
eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\"
if test -n "$last_robj"; then
eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\"
fi
delfiles="$delfiles $output"
else
output=
fi
if ${skipped_export-false}; then
func_verbose "generating symbol list for \`$libname.la'"
export_symbols="$output_objdir/$libname.exp"
$opt_dry_run || $RM $export_symbols
libobjs=$output
# Append the command to create the export file.
test -z "$concat_cmds" || concat_cmds=$concat_cmds~
eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\"
if test -n "$last_robj"; then
eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\"
fi
fi
test -n "$save_libobjs" &&
func_verbose "creating a temporary reloadable object file: $output"
# Loop through the commands generated above and execute them.
save_ifs="$IFS"; IFS='~'
for cmd in $concat_cmds; do
IFS="$save_ifs"
$opt_silent || {
func_quote_for_expand "$cmd"
eval "func_echo $func_quote_for_expand_result"
}
$opt_dry_run || eval "$cmd" || {
lt_exit=$?
# Restore the uninstalled library and exit
if test "$mode" = relink; then
( cd "$output_objdir" && \
$RM "${realname}T" && \
$MV "${realname}U" "$realname" )
fi
exit $lt_exit
}
done
IFS="$save_ifs"
if test -n "$export_symbols_regex" && ${skipped_export-false}; then
func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
fi
fi
if ${skipped_export-false}; then
if test -n "$export_symbols" && test -n "$include_expsyms"; then
tmp_export_symbols="$export_symbols"
test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols"
$opt_dry_run || eval '$ECHO "X$include_expsyms" | $Xsed | $SP2NL >> "$tmp_export_symbols"'
fi
if test -n "$orig_export_symbols"; then
# The given exports_symbols file has to be filtered, so filter it.
func_verbose "filter symbol list for \`$libname.la' to tag DATA exports"
# FIXME: $output_objdir/$libname.filter potentially contains lots of
# 's' commands which not all seds can handle. GNU sed should be fine
# though. Also, the filter scales superlinearly with the number of
# global variables. join(1) would be nice here, but unfortunately
# isn't a blessed tool.
$opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
delfiles="$delfiles $export_symbols $output_objdir/$libname.filter"
export_symbols=$output_objdir/$libname.def
$opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
fi
fi
libobjs=$output
# Restore the value of output.
output=$save_output
if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then
eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
test "X$libobjs" = "X " && libobjs=
fi
# Expand the library linking commands again to reset the
# value of $libobjs for piecewise linking.
# Do each of the archive commands.
if test "$module" = yes && test -n "$module_cmds" ; then
if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
cmds=$module_expsym_cmds
else
cmds=$module_cmds
fi
else
if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
cmds=$archive_expsym_cmds
else
cmds=$archive_cmds
fi
fi
fi
if test -n "$delfiles"; then
# Append the command to remove temporary files to $cmds.
eval cmds=\"\$cmds~\$RM $delfiles\"
fi
# Add any objects from preloaded convenience libraries
if test -n "$dlprefiles"; then
gentop="$output_objdir/${outputname}x"
generated="$generated $gentop"
func_extract_archives $gentop $dlprefiles
libobjs="$libobjs $func_extract_archives_result"
test "X$libobjs" = "X " && libobjs=
fi
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$opt_silent || {
func_quote_for_expand "$cmd"
eval "func_echo $func_quote_for_expand_result"
}
$opt_dry_run || eval "$cmd" || {
lt_exit=$?
# Restore the uninstalled library and exit
if test "$mode" = relink; then
( cd "$output_objdir" && \
$RM "${realname}T" && \
$MV "${realname}U" "$realname" )
fi
exit $lt_exit
}
done
IFS="$save_ifs"
# Restore the uninstalled library and exit
if test "$mode" = relink; then
$opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $?
if test -n "$convenience"; then
if test -z "$whole_archive_flag_spec"; then
func_show_eval '${RM}r "$gentop"'
fi
fi
exit $EXIT_SUCCESS
fi
# Create links to the real library.
for linkname in $linknames; do
if test "$realname" != "$linkname"; then
func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?'
fi
done
# If -module or -export-dynamic was specified, set the dlname.
if test "$module" = yes || test "$export_dynamic" = yes; then
# On all known operating systems, these are identical.
dlname="$soname"
fi
fi
;;
obj)
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
func_warning "\`-dlopen' is ignored for objects"
fi
case " $deplibs" in
*\ -l* | *\ -L*)
func_warning "\`-l' and \`-L' are ignored for objects" ;;
esac
test -n "$rpath" && \
func_warning "\`-rpath' is ignored for objects"
test -n "$xrpath" && \
func_warning "\`-R' is ignored for objects"
test -n "$vinfo" && \
func_warning "\`-version-info' is ignored for objects"
test -n "$release" && \
func_warning "\`-release' is ignored for objects"
case $output in
*.lo)
test -n "$objs$old_deplibs" && \
func_fatal_error "cannot build library object \`$output' from non-libtool objects"
libobj=$output
func_lo2o "$libobj"
obj=$func_lo2o_result
;;
*)
libobj=
obj="$output"
;;
esac
# Delete the old objects.
$opt_dry_run || $RM $obj $libobj
# Objects from convenience libraries. This assumes
# single-version convenience libraries. Whenever we create
# different ones for PIC/non-PIC, this we'll have to duplicate
# the extraction.
reload_conv_objs=
gentop=
# reload_cmds runs $LD directly, so let us get rid of
# -Wl from whole_archive_flag_spec and hope we can get by with
# turning comma into space..
wl=
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\"
reload_conv_objs=$reload_objs\ `$ECHO "X$tmp_whole_archive_flags" | $Xsed -e 's|,| |g'`
else
gentop="$output_objdir/${obj}x"
generated="$generated $gentop"
func_extract_archives $gentop $convenience
reload_conv_objs="$reload_objs $func_extract_archives_result"
fi
fi
# Create the old-style object.
reload_objs="$objs$old_deplibs "`$ECHO "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
output="$obj"
func_execute_cmds "$reload_cmds" 'exit $?'
# Exit if we aren't doing a library object file.
if test -z "$libobj"; then
if test -n "$gentop"; then
func_show_eval '${RM}r "$gentop"'
fi
exit $EXIT_SUCCESS
fi
if test "$build_libtool_libs" != yes; then
if test -n "$gentop"; then
func_show_eval '${RM}r "$gentop"'
fi
# Create an invalid libtool object if no PIC, so that we don't
# accidentally link it into a program.
# $show "echo timestamp > $libobj"
# $opt_dry_run || eval "echo timestamp > $libobj" || exit $?
exit $EXIT_SUCCESS
fi
if test -n "$pic_flag" || test "$pic_mode" != default; then
# Only do commands if we really have different PIC objects.
reload_objs="$libobjs $reload_conv_objs"
output="$libobj"
func_execute_cmds "$reload_cmds" 'exit $?'
fi
if test -n "$gentop"; then
func_show_eval '${RM}r "$gentop"'
fi
exit $EXIT_SUCCESS
;;
prog)
case $host in
*cygwin*) func_stripname '' '.exe' "$output"
output=$func_stripname_result.exe;;
esac
test -n "$vinfo" && \
func_warning "\`-version-info' is ignored for programs"
test -n "$release" && \
func_warning "\`-release' is ignored for programs"
test "$preload" = yes \
&& test "$dlopen_support" = unknown \
&& test "$dlopen_self" = unknown \
&& test "$dlopen_self_static" = unknown && \
func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support."
case $host in
*-*-rhapsody* | *-*-darwin1.[012])
# On Rhapsody replace the C library is the System framework
compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'`
finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's/ -lc / System.ltframework /'`
;;
esac
case $host in
*-*-darwin*)
# Don't allow lazy linking, it breaks C++ global constructors
# But is supposedly fixed on 10.4 or later (yay!).
if test "$tagname" = CXX ; then
case ${MACOSX_DEPLOYMENT_TARGET-10.0} in
10.[0123])
compile_command="$compile_command ${wl}-bind_at_load"
finalize_command="$finalize_command ${wl}-bind_at_load"
;;
esac
fi
# Time to change all our "foo.ltframework" stuff back to "-framework foo"
compile_deplibs=`$ECHO "X $compile_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
finalize_deplibs=`$ECHO "X $finalize_deplibs" | $Xsed -e 's% \([^ $]*\).ltframework% -framework \1%g'`
;;
esac
# move library search paths that coincide with paths to not yet
# installed libraries to the beginning of the library search list
new_libs=
for path in $notinst_path; do
case " $new_libs " in
*" -L$path/$objdir "*) ;;
*)
case " $compile_deplibs " in
*" -L$path/$objdir "*)
new_libs="$new_libs -L$path/$objdir" ;;
esac
;;
esac
done
for deplib in $compile_deplibs; do
case $deplib in
-L*)
case " $new_libs " in
*" $deplib "*) ;;
*) new_libs="$new_libs $deplib" ;;
esac
;;
*) new_libs="$new_libs $deplib" ;;
esac
done
compile_deplibs="$new_libs"
compile_command="$compile_command $compile_deplibs"
finalize_command="$finalize_command $finalize_deplibs"
if test -n "$rpath$xrpath"; then
# If the user specified any rpath flags, then add them.
for libdir in $rpath $xrpath; do
# This is the magic to use -rpath.
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
esac
done
fi
# Now hardcode the library paths
rpath=
hardcode_libdirs=
for libdir in $compile_rpath $finalize_rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
rpath="$rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'`
case :$dllsearchpath: in
*":$libdir:"*) ;;
*) dllsearchpath="$dllsearchpath:$libdir";;
esac
case :$dllsearchpath: in
*":$testbindir:"*) ;;
*) dllsearchpath="$dllsearchpath:$testbindir";;
esac
;;
esac
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval rpath=\" $hardcode_libdir_flag_spec\"
fi
compile_rpath="$rpath"
rpath=
hardcode_libdirs=
for libdir in $finalize_rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
rpath="$rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$finalize_perm_rpath " in
*" $libdir "*) ;;
*) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;;
esac
fi
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval rpath=\" $hardcode_libdir_flag_spec\"
fi
finalize_rpath="$rpath"
if test -n "$libobjs" && test "$build_old_libs" = yes; then
# Transform all the library objects into standard objects.
compile_command=`$ECHO "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
finalize_command=`$ECHO "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
fi
func_generate_dlsyms "$outputname" "@PROGRAM@" "no"
# template prelinking step
if test -n "$prelink_cmds"; then
func_execute_cmds "$prelink_cmds" 'exit $?'
fi
wrappers_required=yes
case $host in
*cygwin* | *mingw* )
if test "$build_libtool_libs" != yes; then
wrappers_required=no
fi
;;
*)
if test "$need_relink" = no || test "$build_libtool_libs" != yes; then
wrappers_required=no
fi
;;
esac
if test "$wrappers_required" = no; then
# Replace the output file specification.
compile_command=`$ECHO "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
link_command="$compile_command$compile_rpath"
# We have no uninstalled library dependencies, so finalize right now.
exit_status=0
func_show_eval "$link_command" 'exit_status=$?'
# Delete the generated files.
if test -f "$output_objdir/${outputname}S.${objext}"; then
func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"'
fi
exit $exit_status
fi
if test -n "$compile_shlibpath$finalize_shlibpath"; then
compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
fi
if test -n "$finalize_shlibpath"; then
finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
fi
compile_var=
finalize_var=
if test -n "$runpath_var"; then
if test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
rpath="$rpath$dir:"
done
compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
if test -n "$finalize_perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $finalize_perm_rpath; do
rpath="$rpath$dir:"
done
finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
fi
if test "$no_install" = yes; then
# We don't need to create a wrapper script.
link_command="$compile_var$compile_command$compile_rpath"
# Replace the output file specification.
link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
# Delete the old output file.
$opt_dry_run || $RM $output
# Link the executable and exit
func_show_eval "$link_command" 'exit $?'
exit $EXIT_SUCCESS
fi
if test "$hardcode_action" = relink; then
# Fast installation is not supported
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
func_warning "this platform does not like uninstalled shared libraries"
func_warning "\`$output' will be relinked during installation"
else
if test "$fast_install" != no; then
link_command="$finalize_var$compile_command$finalize_rpath"
if test "$fast_install" = yes; then
relink_command=`$ECHO "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'`
else
# fast_install is set to needless
relink_command=
fi
else
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
fi
fi
# Replace the output file specification.
link_command=`$ECHO "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
# Delete the old output files.
$opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname
func_show_eval "$link_command" 'exit $?'
# Now create the wrapper script.
func_verbose "creating $output"
# Quote the relink command for shipping.
if test -n "$relink_command"; then
# Preserve any variables that may affect compiler behavior
for var in $variables_saved_for_relink; do
if eval test -z \"\${$var+set}\"; then
relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command"
elif eval var_value=\$$var; test -z "$var_value"; then
relink_command="$var=; export $var; $relink_command"
else
func_quote_for_eval "$var_value"
relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command"
fi
done
relink_command="(cd `pwd`; $relink_command)"
relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"`
fi
# Quote $ECHO for shipping.
if test "X$ECHO" = "X$SHELL $progpath --fallback-echo"; then
case $progpath in
[\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";;
*) qecho="$SHELL `pwd`/$progpath --fallback-echo";;
esac
qecho=`$ECHO "X$qecho" | $Xsed -e "$sed_quote_subst"`
else
qecho=`$ECHO "X$ECHO" | $Xsed -e "$sed_quote_subst"`
fi
# Only actually do things if not in dry run mode.
$opt_dry_run || {
# win32 will think the script is a binary if it has
# a .exe suffix, so we strip it off here.
case $output in
*.exe) func_stripname '' '.exe' "$output"
output=$func_stripname_result ;;
esac
# test for cygwin because mv fails w/o .exe extensions
case $host in
*cygwin*)
exeext=.exe
func_stripname '' '.exe' "$outputname"
outputname=$func_stripname_result ;;
*) exeext= ;;
esac
case $host in
*cygwin* | *mingw* )
func_dirname_and_basename "$output" "" "."
output_name=$func_basename_result
output_path=$func_dirname_result
cwrappersource="$output_path/$objdir/lt-$output_name.c"
cwrapper="$output_path/$output_name.exe"
$RM $cwrappersource $cwrapper
trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15
func_emit_cwrapperexe_src > $cwrappersource
# we should really use a build-platform specific compiler
# here, but OTOH, the wrappers (shell script and this C one)
# are only useful if you want to execute the "real" binary.
# Since the "real" binary is built for $host, then this
# wrapper might as well be built for $host, too.
$opt_dry_run || {
$LTCC $LTCFLAGS -o $cwrapper $cwrappersource
$STRIP $cwrapper
}
# Now, create the wrapper script for func_source use:
func_ltwrapper_scriptname $cwrapper
$RM $func_ltwrapper_scriptname_result
trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15
$opt_dry_run || {
# note: this script will not be executed, so do not chmod.
if test "x$build" = "x$host" ; then
$cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result
else
func_emit_wrapper no > $func_ltwrapper_scriptname_result
fi
}
;;
* )
$RM $output
trap "$RM $output; exit $EXIT_FAILURE" 1 2 15
func_emit_wrapper no > $output
chmod +x $output
;;
esac
}
exit $EXIT_SUCCESS
;;
esac
# See if we need to build an old-fashioned archive.
for oldlib in $oldlibs; do
if test "$build_libtool_libs" = convenience; then
oldobjs="$libobjs_save $symfileobj"
addlibs="$convenience"
build_libtool_libs=no
else
if test "$build_libtool_libs" = module; then
oldobjs="$libobjs_save"
build_libtool_libs=no
else
oldobjs="$old_deplibs $non_pic_objects"
if test "$preload" = yes && test -f "$symfileobj"; then
oldobjs="$oldobjs $symfileobj"
fi
fi
addlibs="$old_convenience"
fi
if test -n "$addlibs"; then
gentop="$output_objdir/${outputname}x"
generated="$generated $gentop"
func_extract_archives $gentop $addlibs
oldobjs="$oldobjs $func_extract_archives_result"
fi
# Do each command in the archive commands.
if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
cmds=$old_archive_from_new_cmds
else
# Add any objects from preloaded convenience libraries
if test -n "$dlprefiles"; then
gentop="$output_objdir/${outputname}x"
generated="$generated $gentop"
func_extract_archives $gentop $dlprefiles
oldobjs="$oldobjs $func_extract_archives_result"
fi
# POSIX demands no paths to be encoded in archives. We have
# to avoid creating archives with duplicate basenames if we
# might have to extract them afterwards, e.g., when creating a
# static archive out of a convenience library, or when linking
# the entirety of a libtool archive into another (currently
# not supported by libtool).
if (for obj in $oldobjs
do
func_basename "$obj"
$ECHO "$func_basename_result"
done | sort | sort -uc >/dev/null 2>&1); then
:
else
$ECHO "copying selected object files to avoid basename conflicts..."
gentop="$output_objdir/${outputname}x"
generated="$generated $gentop"
func_mkdir_p "$gentop"
save_oldobjs=$oldobjs
oldobjs=
counter=1
for obj in $save_oldobjs
do
func_basename "$obj"
objbase="$func_basename_result"
case " $oldobjs " in
" ") oldobjs=$obj ;;
*[\ /]"$objbase "*)
while :; do
# Make sure we don't pick an alternate name that also
# overlaps.
newobj=lt$counter-$objbase
func_arith $counter + 1
counter=$func_arith_result
case " $oldobjs " in
*[\ /]"$newobj "*) ;;
*) if test ! -f "$gentop/$newobj"; then break; fi ;;
esac
done
func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj"
oldobjs="$oldobjs $gentop/$newobj"
;;
*) oldobjs="$oldobjs $obj" ;;
esac
done
fi
eval cmds=\"$old_archive_cmds\"
func_len " $cmds"
len=$func_len_result
if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
cmds=$old_archive_cmds
else
# the command line is too long to link in one step, link in parts
func_verbose "using piecewise archive linking..."
save_RANLIB=$RANLIB
RANLIB=:
objlist=
concat_cmds=
save_oldobjs=$oldobjs
oldobjs=
# Is there a better way of finding the last object in the list?
for obj in $save_oldobjs
do
last_oldobj=$obj
done
eval test_cmds=\"$old_archive_cmds\"
func_len " $test_cmds"
len0=$func_len_result
len=$len0
for obj in $save_oldobjs
do
func_len " $obj"
func_arith $len + $func_len_result
len=$func_arith_result
func_append objlist " $obj"
if test "$len" -lt "$max_cmd_len"; then
:
else
# the above command should be used before it gets too long
oldobjs=$objlist
if test "$obj" = "$last_oldobj" ; then
RANLIB=$save_RANLIB
fi
test -z "$concat_cmds" || concat_cmds=$concat_cmds~
eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\"
objlist=
len=$len0
fi
done
RANLIB=$save_RANLIB
oldobjs=$objlist
if test "X$oldobjs" = "X" ; then
eval cmds=\"\$concat_cmds\"
else
eval cmds=\"\$concat_cmds~\$old_archive_cmds\"
fi
fi
fi
func_execute_cmds "$cmds" 'exit $?'
done
test -n "$generated" && \
func_show_eval "${RM}r$generated"
# Now create the libtool archive.
case $output in
*.la)
old_library=
test "$build_old_libs" = yes && old_library="$libname.$libext"
func_verbose "creating $output"
# Preserve any variables that may affect compiler behavior
for var in $variables_saved_for_relink; do
if eval test -z \"\${$var+set}\"; then
relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command"
elif eval var_value=\$$var; test -z "$var_value"; then
relink_command="$var=; export $var; $relink_command"
else
func_quote_for_eval "$var_value"
relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command"
fi
done
# Quote the link command for shipping.
relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)"
relink_command=`$ECHO "X$relink_command" | $Xsed -e "$sed_quote_subst"`
if test "$hardcode_automatic" = yes ; then
relink_command=
fi
# Only create the output if not a dry run.
$opt_dry_run || {
for installed in no yes; do
if test "$installed" = yes; then
if test -z "$install_libdir"; then
break
fi
output="$output_objdir/$outputname"i
# Replace all uninstalled libtool libraries with the installed ones
newdependency_libs=
for deplib in $dependency_libs; do
case $deplib in
*.la)
func_basename "$deplib"
name="$func_basename_result"
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
test -z "$libdir" && \
func_fatal_error "\`$deplib' is not a valid libtool archive"
if test "x$EGREP" = x ; then
EGREP=egrep
fi
# We do not want portage's install root ($D) present. Check only for
# this if the .la is being installed.
if test "$installed" = yes && test "$D"; then
eval mynewdependency_lib=`echo "$libdir/$name" |sed -e "s:$D:/:g" -e 's:/\+:/:g'`
else
mynewdependency_lib="$libdir/$name"
fi
# Do not add duplicates
if test "$mynewdependency_lib"; then
my_little_ninja_foo_1=`echo $newdependency_libs |$EGREP -e "$mynewdependency_lib"`
if test -z "$my_little_ninja_foo_1"; then
newdependency_libs="$newdependency_libs $mynewdependency_lib"
fi
fi
;;
*)
if test "$installed" = yes; then
# Rather use S=WORKDIR if our version of portage supports it.
# This is because some ebuild (gcc) do not use $S as buildroot.
if test "$PWORKDIR"; then
S="$PWORKDIR"
fi
# We do not want portage's build root ($S) present.
my_little_ninja_foo_2=`echo $deplib |$EGREP -e "$S"`
# We do not want portage's install root ($D) present.
my_little_ninja_foo_3=`echo $deplib |$EGREP -e "$D"`
if test -n "$my_little_ninja_foo_2" && test "$S"; then
mynewdependency_lib=""
elif test -n "$my_little_ninja_foo_3" && test "$D"; then
eval mynewdependency_lib=`echo "$deplib" |sed -e "s:$D:/:g" -e 's:/\+:/:g'`
else
mynewdependency_lib="$deplib"
fi
else
mynewdependency_lib="$deplib"
fi
# Do not add duplicates
if test "$mynewdependency_lib"; then
my_little_ninja_foo_4=`echo $newdependency_libs |$EGREP -e "$mynewdependency_lib"`
if test -z "$my_little_ninja_foo_4"; then
newdependency_libs="$newdependency_libs $mynewdependency_lib"
fi
fi
;;
esac
done
dependency_libs="$newdependency_libs"
newdlfiles=
for lib in $dlfiles; do
case $lib in
*.la)
func_basename "$lib"
name="$func_basename_result"
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
test -z "$libdir" && \
func_fatal_error "\`$lib' is not a valid libtool archive"
newdlfiles="$newdlfiles $libdir/$name"
;;
*) newdlfiles="$newdlfiles $lib" ;;
esac
done
dlfiles="$newdlfiles"
newdlprefiles=
for lib in $dlprefiles; do
case $lib in
*.la)
# Only pass preopened files to the pseudo-archive (for
# eventual linking with the app. that links it) if we
# didn't already link the preopened objects directly into
# the library:
func_basename "$lib"
name="$func_basename_result"
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
test -z "$libdir" && \
func_fatal_error "\`$lib' is not a valid libtool archive"
newdlprefiles="$newdlprefiles $libdir/$name"
;;
esac
done
dlprefiles="$newdlprefiles"
else
newdlfiles=
for lib in $dlfiles; do
case $lib in
[\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
*) abs=`pwd`"/$lib" ;;
esac
newdlfiles="$newdlfiles $abs"
done
dlfiles="$newdlfiles"
newdlprefiles=
for lib in $dlprefiles; do
case $lib in
[\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
*) abs=`pwd`"/$lib" ;;
esac
newdlprefiles="$newdlprefiles $abs"
done
dlprefiles="$newdlprefiles"
fi
$RM $output
# place dlname in correct position for cygwin
tdlname=$dlname
case $host,$output,$installed,$module,$dlname in
*cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;;
esac
# Do not add duplicates
if test "$installed" = yes && test "$D"; then
install_libdir=`echo "$install_libdir" |sed -e "s:$D:/:g" -e 's:/\+:/:g'`
fi
$ECHO > $output "\
# $outputname - a libtool library file
# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# The name that we can dlopen(3).
dlname='$tdlname'
# Names of this library.
library_names='$library_names'
# The name of the static archive.
old_library='$old_library'
# Linker flags that can not go in dependency_libs.
inherited_linker_flags='$new_inherited_linker_flags'
# Libraries that this one depends upon.
dependency_libs='$dependency_libs'
# Names of additional weak libraries provided by this library
weak_library_names='$weak_libs'
# Version information for $libname.
current=$current
age=$age
revision=$revision
# Is this an already installed library?
installed=$installed
# Should we warn about portability when linking against -modules?
shouldnotlink=$module
# Files to dlopen/dlpreopen
dlopen='$dlfiles'
dlpreopen='$dlprefiles'
# Directory that this library needs to be installed in:
libdir='$install_libdir'"
if test "$installed" = no && test "$need_relink" = yes; then
$ECHO >> $output "\
relink_command=\"$relink_command\""
fi
done
}
# Do a symbolic link so that the libtool archive can be found in
# LD_LIBRARY_PATH before the program is installed.
func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?'
;;
esac
exit $EXIT_SUCCESS
}
{ test "$mode" = link || test "$mode" = relink; } &&
func_mode_link ${1+"$@"}
# func_mode_uninstall arg...
func_mode_uninstall ()
{
$opt_debug
RM="$nonopt"
files=
rmforce=
exit_status=0
# This variable tells wrapper scripts just to set variables rather
# than running their programs.
libtool_install_magic="$magic"
for arg
do
case $arg in
-f) RM="$RM $arg"; rmforce=yes ;;
-*) RM="$RM $arg" ;;
*) files="$files $arg" ;;
esac
done
test -z "$RM" && \
func_fatal_help "you must specify an RM program"
rmdirs=
origobjdir="$objdir"
for file in $files; do
func_dirname "$file" "" "."
dir="$func_dirname_result"
if test "X$dir" = X.; then
objdir="$origobjdir"
else
objdir="$dir/$origobjdir"
fi
func_basename "$file"
name="$func_basename_result"
test "$mode" = uninstall && objdir="$dir"
# Remember objdir for removal later, being careful to avoid duplicates
if test "$mode" = clean; then
case " $rmdirs " in
*" $objdir "*) ;;
*) rmdirs="$rmdirs $objdir" ;;
esac
fi
# Don't error if the file doesn't exist and rm -f was used.
if { test -L "$file"; } >/dev/null 2>&1 ||
{ test -h "$file"; } >/dev/null 2>&1 ||
test -f "$file"; then
:
elif test -d "$file"; then
exit_status=1
continue
elif test "$rmforce" = yes; then
continue
fi
rmfiles="$file"
case $name in
*.la)
# Possibly a libtool archive, so verify it.
if func_lalib_p "$file"; then
func_source $dir/$name
# Delete the libtool libraries and symlinks.
for n in $library_names; do
rmfiles="$rmfiles $objdir/$n"
done
test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library"
case "$mode" in
clean)
case " $library_names " in
# " " in the beginning catches empty $dlname
*" $dlname "*) ;;
*) rmfiles="$rmfiles $objdir/$dlname" ;;
esac
test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i"
;;
uninstall)
if test -n "$library_names"; then
# Do each command in the postuninstall commands.
func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1'
fi
if test -n "$old_library"; then
# Do each command in the old_postuninstall commands.
func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1'
fi
# FIXME: should reinstall the best remaining shared library.
;;
esac
fi
;;
*.lo)
# Possibly a libtool object, so verify it.
if func_lalib_p "$file"; then
# Read the .lo file
func_source $dir/$name
# Add PIC object to the list of files to remove.
if test -n "$pic_object" &&
test "$pic_object" != none; then
rmfiles="$rmfiles $dir/$pic_object"
fi
# Add non-PIC object to the list of files to remove.
if test -n "$non_pic_object" &&
test "$non_pic_object" != none; then
rmfiles="$rmfiles $dir/$non_pic_object"
fi
fi
;;
*)
if test "$mode" = clean ; then
noexename=$name
case $file in
*.exe)
func_stripname '' '.exe' "$file"
file=$func_stripname_result
func_stripname '' '.exe' "$name"
noexename=$func_stripname_result
# $file with .exe has already been added to rmfiles,
# add $file without .exe
rmfiles="$rmfiles $file"
;;
esac
# Do a test to see if this is a libtool program.
if func_ltwrapper_p "$file"; then
if func_ltwrapper_executable_p "$file"; then
func_ltwrapper_scriptname "$file"
relink_command=
func_source $func_ltwrapper_scriptname_result
rmfiles="$rmfiles $func_ltwrapper_scriptname_result"
else
relink_command=
func_source $dir/$noexename
fi
# note $name still contains .exe if it was in $file originally
# as does the version of $file that was added into $rmfiles
rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}"
if test "$fast_install" = yes && test -n "$relink_command"; then
rmfiles="$rmfiles $objdir/lt-$name"
fi
if test "X$noexename" != "X$name" ; then
rmfiles="$rmfiles $objdir/lt-${noexename}.c"
fi
fi
fi
;;
esac
func_show_eval "$RM $rmfiles" 'exit_status=1'
done
objdir="$origobjdir"
# Try to remove the ${objdir}s in the directories where we deleted files
for dir in $rmdirs; do
if test -d "$dir"; then
func_show_eval "rmdir $dir >/dev/null 2>&1"
fi
done
exit $exit_status
}
{ test "$mode" = uninstall || test "$mode" = clean; } &&
func_mode_uninstall ${1+"$@"}
test -z "$mode" && {
help="$generic_help"
func_fatal_help "you must specify a MODE"
}
test -z "$exec_cmd" && \
func_fatal_help "invalid operation mode \`$mode'"
if test -n "$exec_cmd"; then
eval exec "$exec_cmd"
exit $EXIT_FAILURE
fi
exit $exit_status
# The TAGs below are defined such that we never get into a situation
# in which we disable both kinds of libraries. Given conflicting
# choices, we go for a static library, that is the most portable,
# since we can't tell whether shared libraries were disabled because
# the user asked for that or because the platform doesn't support
# them. This is particularly important on AIX, because we don't
# support having both static and shared libraries enabled at the same
# time on that platform, so we default to a shared-only configuration.
# If a disable-shared tag is given, we'll fallback to a static-only
# configuration. But we'll never go from static-only to shared-only.
# ### BEGIN LIBTOOL TAG CONFIG: disable-shared
build_libtool_libs=no
build_old_libs=yes
# ### END LIBTOOL TAG CONFIG: disable-shared
# ### BEGIN LIBTOOL TAG CONFIG: disable-static
build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac`
# ### END LIBTOOL TAG CONFIG: disable-static
# Local Variables:
# mode:shell-script
# sh-indentation:2
# End:
# vi:sw=2
| amyvmiwei/chromium | third_party/sqlite/ltmain.sh | Shell | bsd-3-clause | 227,001 |
#!/bin/sh
./psk-v2 2>&1
| y-trudeau/openswan-patch-meraki | testing/crypto/psk-v2-10/runit.sh | Shell | gpl-2.0 | 26 |
#!/bin/sh
cachedir=/tmp/.webcache
uci_load network
subcategories() {
[ -s "$cachedir/graphs" ] || { # create a cache if it does not exists already
[ -d "$cachedir" ] || mkdir "$cachedir" 2>/dev/null 1>&2
(echo "Graphs:10:CPU:graphs-cpu.sh"
echo "Graphs:20:Bandwidth:graphs-bandwidth.sh"
echo "Graphs:30:Vnstat:graphs-vnstat.sh"
sed -n -e "/:/"'{s/:.*//;s/^ *\(.*\)/Graphs:50:Traffic\>\> \1@TR\<\<:graphs-if.sh?if=\1/;p}' /proc/net/dev 2>/dev/null
) | sort -n >$cachedir/graphs
}
awk -F: -v "category=Graphs" -v "selected=$2" -f /usr/lib/webif/common.awk -f /usr/lib/webif/subcategories.awk $cachedir/graphs
}
| innodrivers/p4a-openwrt | feeds/xwrt/webif/files/www/cgi-bin/webif/graphs-subcategories.sh | Shell | gpl-2.0 | 639 |
rm -f $MYSQL_TEST_DIR/var/slave-data/master.info
rm -f $MYSQL_TEST_DIR/var/slave-data/*relay*
| rhuitl/uClinux | user/mysql/mysql-test/t/rpl_rotate_logs-slave.sh | Shell | gpl-2.0 | 94 |
#!/bin/bash
load ../../helpers
export SWARM_MESOS_TASK_TIMEOUT=30s
export SWARM_MESOS_USER=daemon
MESOS_IMAGE=dockerswarm/mesos:0.24.1
MESOS_MASTER_PORT=$(( ( RANDOM % 1000 ) + 10000 ))
# Start mesos master and slave.
function start_mesos() {
local current=${#DOCKER_CONTAINERS[@]}
MESOS_MASTER=$(
docker_host run -d --name mesos-master --net=host \
$MESOS_IMAGE mesos-master --ip=127.0.0.1 --work_dir=/ --registry=in_memory --port=$MESOS_MASTER_PORT
)
retry 10 1 eval "docker_host ps | grep 'mesos-master'"
for ((i=0; i < current; i++)); do
local docker_port=$(echo ${HOSTS[$i]} | cut -d: -f2)
MESOS_SLAVES[$i]=$(
docker_host run --privileged -d --name mesos-slave-$i --volumes-from node-$i -e DOCKER_HOST="${HOSTS[$i]}" -v /sys/fs/cgroup:/sys/fs/cgroup --net=host \
$MESOS_IMAGE mesos-slave --master=127.0.0.1:$MESOS_MASTER_PORT --containerizers=docker --attributes="docker_port:$docker_port" --hostname=127.0.0.1 --port=$(($MESOS_MASTER_PORT + (1 + $i))) --docker=/usr/local/bin/docker --executor_environment_variables="{\"DOCKER_HOST\":\"${HOSTS[$i]}\"}"
)
retry 10 1 eval "docker_host ps | grep 'mesos-slave-$i'"
done
}
# Stop mesos master and slave.
function stop_mesos() {
echo "Stopping $MESOS_MASTER"
docker_host rm -f -v $MESOS_MASTER > /dev/null;
for id in ${MESOS_SLAVES[@]}; do
echo "Stopping $id"
docker_host rm -f -v $id > /dev/null;
done
}
| pdevine/swarm | test/integration/mesos/mesos_helpers.bash | Shell | apache-2.0 | 1,414 |
#! /bin/sh
mkdir -p $out/nix-support
echo "Hello" > $out/text.txt
echo "doc none $out/text.txt" > $out/nix-support/hydra-build-products
| rohitasva/hydra | tests/jobs/build-product-simple.sh | Shell | gpl-3.0 | 137 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions and constant for the local config.
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gce/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
if [[ "${OS_DISTRIBUTION}" == "debian" || "${OS_DISTRIBUTION}" == "coreos" ]]; then
source "${KUBE_ROOT}/cluster/gce/${OS_DISTRIBUTION}/helper.sh"
else
echo "Cannot operate on cluster using os distro: ${OS_DISTRIBUTION}" >&2
exit 1
fi
NODE_INSTANCE_PREFIX="${INSTANCE_PREFIX}-minion"
ALLOCATE_NODE_CIDRS=true
KUBE_PROMPT_FOR_UPDATE=y
KUBE_SKIP_UPDATE=${KUBE_SKIP_UPDATE-"n"}
# Suffix to append to the staging path used for the server tars. Useful if
# multiple versions of the server are being used in the same project
# simultaneously (e.g. on Jenkins).
KUBE_GCS_STAGING_PATH_SUFFIX=${KUBE_GCS_STAGING_PATH_SUFFIX-""}
# VERSION_REGEX matches things like "v0.13.1"
readonly KUBE_VERSION_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)$"
# CI_VERSION_REGEX matches things like "v0.14.1-341-ge0c9d9e"
readonly KUBE_CI_VERSION_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)-(.*)$"
function join_csv {
local IFS=','; echo "$*";
}
# Verify prereqs
function verify-prereqs {
local cmd
for cmd in gcloud gsutil; do
if ! which "${cmd}" >/dev/null; then
local resp
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
echo "Can't find ${cmd} in PATH. Do you wish to install the Google Cloud SDK? [Y/n]"
read resp
else
resp="y"
fi
if [[ "${resp}" != "n" && "${resp}" != "N" ]]; then
curl https://sdk.cloud.google.com | bash
fi
if ! which "${cmd}" >/dev/null; then
echo "Can't find ${cmd} in PATH, please fix and retry. The Google Cloud "
echo "SDK can be downloaded from https://cloud.google.com/sdk/."
exit 1
fi
fi
done
if [[ "${KUBE_SKIP_UPDATE}" == "y" ]]; then
return
fi
# update and install components as needed
if [[ "${KUBE_PROMPT_FOR_UPDATE}" != "y" ]]; then
gcloud_prompt="-q"
fi
local sudo_prefix=""
if [ ! -w $(dirname `which gcloud`) ]; then
sudo_prefix="sudo"
fi
${sudo_prefix} gcloud ${gcloud_prompt:-} components update preview || true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update alpha || true
${sudo_prefix} gcloud ${gcloud_prompt:-} components update || true
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
# Verify and find the various tar files that we are going to use on the server.
#
# Vars set:
# SERVER_BINARY_TAR
# SALT_TAR
function find-release-tars {
SERVER_BINARY_TAR="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
SERVER_BINARY_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
fi
if [[ ! -f "$SERVER_BINARY_TAR" ]]; then
echo "!!! Cannot find kubernetes-server-linux-amd64.tar.gz"
exit 1
fi
SALT_TAR="${KUBE_ROOT}/server/kubernetes-salt.tar.gz"
if [[ ! -f "$SALT_TAR" ]]; then
SALT_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-salt.tar.gz"
fi
if [[ ! -f "$SALT_TAR" ]]; then
echo "!!! Cannot find kubernetes-salt.tar.gz"
exit 1
fi
}
# Use the gcloud defaults to find the project. If it is already set in the
# environment then go with that.
#
# Vars set:
# PROJECT
# PROJECT_REPORTED
function detect-project () {
if [[ -z "${PROJECT-}" ]]; then
PROJECT=$(gcloud config list project | tail -n 1 | cut -f 3 -d ' ')
fi
if [[ -z "${PROJECT-}" ]]; then
echo "Could not detect Google Cloud Platform project. Set the default project using " >&2
echo "'gcloud config set project <PROJECT>'" >&2
exit 1
fi
if [[ -z "${PROJECT_REPORTED-}" ]]; then
echo "Project: ${PROJECT}" >&2
echo "Zone: ${ZONE}" >&2
PROJECT_REPORTED=true
fi
}
function sha1sum-file() {
if which shasum >/dev/null 2>&1; then
shasum -a1 "$1" | awk '{ print $1 }'
else
sha1sum "$1" | awk '{ print $1 }'
fi
}
function already-staged() {
local -r file=$1
local -r newsum=$2
[[ -e "${file}.uploaded.sha1" ]] || return 1
local oldsum
oldsum=$(cat "${file}.uploaded.sha1")
[[ "${oldsum}" == "${newsum}" ]]
}
# Copy a release tar, if we don't already think it's staged in GCS
function copy-if-not-staged() {
local -r staging_path=$1
local -r gs_url=$2
local -r tar=$3
local -r hash=$4
if already-staged "${tar}" "${hash}"; then
echo "+++ $(basename ${tar}) already staged ('rm ${tar}.sha1' to force)"
else
echo "${hash}" > "${tar}.sha1"
gsutil -m -q -h "Cache-Control:private, max-age=0" cp "${tar}" "${tar}.sha1" "${staging_path}"
gsutil -m acl ch -g all:R "${gs_url}" "${gs_url}.sha1" >/dev/null 2>&1
echo "${hash}" > "${tar}.uploaded.sha1"
echo "+++ $(basename ${tar}) uploaded (sha1 = ${hash})"
fi
}
# Take the local tar files and upload them to Google Storage. They will then be
# downloaded by the master as part of the start up script for the master.
#
# Assumed vars:
# PROJECT
# SERVER_BINARY_TAR
# SALT_TAR
# Vars set:
# SERVER_BINARY_TAR_URL
# SERVER_BINARY_TAR_HASH
# SALT_TAR_URL
# SALT_TAR_HASH
function upload-server-tars() {
SERVER_BINARY_TAR_URL=
SERVER_BINARY_TAR_HASH=
SALT_TAR_URL=
SALT_TAR_HASH=
local project_hash
if which md5 > /dev/null 2>&1; then
project_hash=$(md5 -q -s "$PROJECT")
else
project_hash=$(echo -n "$PROJECT" | md5sum | awk '{ print $1 }')
fi
# This requires 1 million projects before the probability of collision is 50%
# that's probably good enough for now :P
project_hash=${project_hash:0:10}
local -r staging_bucket="gs://kubernetes-staging-${project_hash}"
# Ensure the bucket is created
if ! gsutil ls "$staging_bucket" > /dev/null 2>&1 ; then
echo "Creating $staging_bucket"
gsutil mb "${staging_bucket}"
fi
local -r staging_path="${staging_bucket}/devel${KUBE_GCS_STAGING_PATH_SUFFIX}"
SERVER_BINARY_TAR_HASH=$(sha1sum-file "${SERVER_BINARY_TAR}")
SALT_TAR_HASH=$(sha1sum-file "${SALT_TAR}")
echo "+++ Staging server tars to Google Storage: ${staging_path}"
local server_binary_gs_url="${staging_path}/${SERVER_BINARY_TAR##*/}"
local salt_gs_url="${staging_path}/${SALT_TAR##*/}"
copy-if-not-staged "${staging_path}" "${server_binary_gs_url}" "${SERVER_BINARY_TAR}" "${SERVER_BINARY_TAR_HASH}"
copy-if-not-staged "${staging_path}" "${salt_gs_url}" "${SALT_TAR}" "${SALT_TAR_HASH}"
# Convert from gs:// URL to an https:// URL
SERVER_BINARY_TAR_URL="${server_binary_gs_url/gs:\/\//https://storage.googleapis.com/}"
SALT_TAR_URL="${salt_gs_url/gs:\/\//https://storage.googleapis.com/}"
}
# Detect minions created in the minion group
#
# Assumed vars:
# NODE_INSTANCE_PREFIX
# Vars set:
# MINION_NAMES
function detect-minion-names {
detect-project
MINION_NAMES=($(gcloud preview --project "${PROJECT}" instance-groups \
--zone "${ZONE}" instances --group "${NODE_INSTANCE_PREFIX}-group" list \
| cut -d'/' -f11))
echo "MINION_NAMES=${MINION_NAMES[*]}" >&2
}
# Waits until the number of running nodes in the instance group is equal to NUM_NODES
#
# Assumed vars:
# NODE_INSTANCE_PREFIX
# NUM_MINIONS
function wait-for-minions-to-run {
detect-project
local running_minions=0
while [[ "${NUM_MINIONS}" != "${running_minions}" ]]; do
echo -e -n "${color_yellow}Waiting for minions to run. "
echo -e "${running_minions} out of ${NUM_MINIONS} running. Retrying.${color_norm}"
sleep 5
running_minions=$((gcloud preview --project "${PROJECT}" instance-groups \
--zone "${ZONE}" instances --group "${NODE_INSTANCE_PREFIX}-group" list \
--running || true) | wc -l | xargs)
done
}
# Detect the information about the minions
#
# Assumed vars:
# ZONE
# Vars set:
# MINION_NAMES
# KUBE_MINION_IP_ADDRESSES (array)
function detect-minions () {
detect-project
detect-minion-names
KUBE_MINION_IP_ADDRESSES=()
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
local minion_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
"${MINION_NAMES[$i]}" --fields networkInterfaces[0].accessConfigs[0].natIP \
--format=text | awk '{ print $2 }')
if [[ -z "${minion_ip-}" ]] ; then
echo "Did not find ${MINION_NAMES[$i]}" >&2
else
echo "Found ${MINION_NAMES[$i]} at ${minion_ip}"
KUBE_MINION_IP_ADDRESSES+=("${minion_ip}")
fi
done
if [[ -z "${KUBE_MINION_IP_ADDRESSES-}" ]]; then
echo "Could not detect Kubernetes minion nodes. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
}
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# ZONE
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master () {
detect-project
KUBE_MASTER=${MASTER_NAME}
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
KUBE_MASTER_IP=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
"${MASTER_NAME}" --fields networkInterfaces[0].accessConfigs[0].natIP \
--format=text | awk '{ print $2 }')
fi
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP)"
}
# Ensure that we have a password created for validating to the master. Will
# read from kubeconfig for the current context if available.
#
# Assumed vars
# KUBE_ROOT
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-password {
get-kubeconfig-basicauth
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
fi
}
# Ensure that we have a bearer token created for validating to the master.
# Will read from kubeconfig for the current context if available.
#
# Assumed vars
# KUBE_ROOT
#
# Vars set:
# KUBE_BEARER_TOKEN
function get-bearer-token() {
get-kubeconfig-bearertoken
if [[ -z "${KUBE_BEARER_TOKEN:-}" ]]; then
KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
fi
}
# Wait for background jobs to finish. Exit with
# an error status if any of the jobs failed.
function wait-for-jobs {
local fail=0
local job
for job in $(jobs -p); do
wait "${job}" || fail=$((fail + 1))
done
if (( fail != 0 )); then
echo -e "${color_red}${fail} commands failed. Exiting.${color_norm}" >&2
# Ignore failures for now.
# exit 2
fi
}
# Robustly try to create a firewall rule.
# $1: The name of firewall rule.
# $2: IP ranges.
# $3: Target tags for this firewall rule.
function create-firewall-rule {
detect-project
local attempt=0
while true; do
if ! gcloud compute firewall-rules create "$1" \
--project "${PROJECT}" \
--network "${NETWORK}" \
--source-ranges "$2" \
--target-tags "$3" \
--allow tcp,udp,icmp,esp,ah,sctp; then
if (( attempt > 5 )); then
echo -e "${color_red}Failed to create firewall rule $1 ${color_norm}"
exit 2
fi
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to create firewall rule $1. Retrying.${color_norm}"
attempt=$(($attempt+1))
else
break
fi
done
}
# Robustly try to create an instance template.
# $1: The name of the instance template.
# $2: The scopes flag.
# $3: The minion start script metadata from file.
# $4: The kube-env metadata.
function create-node-template {
detect-project
# First, ensure the template doesn't exist.
# TODO(mbforbes): To make this really robust, we need to parse the output and
# add retries. Just relying on a non-zero exit code doesn't
# distinguish an ephemeral failed call from a "not-exists".
if gcloud compute instance-templates describe "$1" --project "${PROJECT}" &>/dev/null; then
echo "Instance template ${1} already exists; deleting." >&2
if ! gcloud compute instance-templates delete "$1" --project "${PROJECT}" &>/dev/null; then
echo -e "${color_yellow}Failed to delete existing instance template${color_norm}" >&2
exit 2
fi
fi
local attempt=1
while true; do
echo "Attempt ${attempt} to create ${1}" >&2
if ! gcloud compute instance-templates create "$1" \
--project "${PROJECT}" \
--machine-type "${MINION_SIZE}" \
--boot-disk-type "${MINION_DISK_TYPE}" \
--boot-disk-size "${MINION_DISK_SIZE}" \
--image-project="${MINION_IMAGE_PROJECT}" \
--image "${MINION_IMAGE}" \
--tags "${MINION_TAG}" \
--network "${NETWORK}" \
$2 \
--can-ip-forward \
--metadata-from-file "$3","$4" >&2; then
if (( attempt > 5 )); then
echo -e "${color_red}Failed to create instance template $1 ${color_norm}" >&2
exit 2
fi
echo -e "${color_yellow}Attempt ${attempt} failed to create instance template $1. Retrying.${color_norm}" >&2
attempt=$(($attempt+1))
else
break
fi
done
}
# Robustly try to add metadata on an instance.
# $1: The name of the instace.
# $2...$n: The metadata key=value pairs to add.
function add-instance-metadata {
local -r instance=$1
shift 1
local -r kvs=( "$@" )
detect-project
local attempt=0
while true; do
if ! gcloud compute instances add-metadata "${instance}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--metadata "${kvs[@]}"; then
if (( attempt > 5 )); then
echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}"
exit 2
fi
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}"
attempt=$(($attempt+1))
else
break
fi
done
}
# Robustly try to add metadata on an instance, from a file.
# $1: The name of the instance.
# $2...$n: The metadata key=file pairs to add.
function add-instance-metadata-from-file {
local -r instance=$1
shift 1
local -r kvs=( "$@" )
detect-project
local attempt=0
while true; do
echo "${kvs[@]}"
if ! gcloud compute instances add-metadata "${instance}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--metadata-from-file "$(join_csv ${kvs[@]})"; then
if (( attempt > 5 )); then
echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}"
exit 2
fi
echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}"
attempt=$(($attempt+1))
else
break
fi
done
}
# Quote something appropriate for a yaml string.
#
# TODO(zmerlynn): Note that this function doesn't so much "quote" as
# "strip out quotes", and we really should be using a YAML library for
# this, but PyYAML isn't shipped by default, and *rant rant rant ... SIGH*
function yaml-quote {
echo "'$(echo "${@}" | sed -e "s/'/''/g")'"
}
function write-master-env {
build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml"
}
function write-node-env {
build-kube-env false "${KUBE_TEMP}/node-kube-env.yaml"
}
# Create certificate pairs for the cluster.
# $1: The public IP for the master.
#
# These are used for static cert distribution (e.g. static clustering) at
# cluster creation time. This will be obsoleted once we implement dynamic
# clustering.
#
# The following certificate pairs are created:
#
# - ca (the cluster's certificate authority)
# - server
# - kubelet
# - kubecfg (for kubectl)
#
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
# the certs that we need.
#
# Assumed vars
# KUBE_TEMP
#
# Vars set:
# CERT_DIR
# CA_CERT_BASE64
# MASTER_CERT_BASE64
# MASTER_KEY_BASE64
# KUBELET_CERT_BASE64
# KUBELET_KEY_BASE64
# KUBECFG_CERT_BASE64
# KUBECFG_KEY_BASE64
function create-certs {
local -r cert_ip="${1}"
local octects=($(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e 's|/.*||' -e 's/\./ /g'))
((octects[3]+=1))
local -r service_ip=$(echo "${octects[*]}" | sed 's/ /./g')
local -r sans="IP:${cert_ip},IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}"
# Note: This was heavily cribbed from make-ca-cert.sh
(cd "${KUBE_TEMP}"
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
tar xzf easy-rsa.tar.gz > /dev/null 2>&1
cd easy-rsa-master/easyrsa3
./easyrsa init-pki > /dev/null 2>&1
./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1
./easyrsa --subject-alt-name="${sans}" build-server-full "${MASTER_NAME}" nopass > /dev/null 2>&1
./easyrsa build-client-full kubelet nopass > /dev/null 2>&1
./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1) || {
# If there was an error in the subshell, just die.
# TODO(roberthbailey): add better error handling here
echo "=== Failed to generate certificates: Aborting ==="
exit 2
}
CERT_DIR="${KUBE_TEMP}/easy-rsa-master/easyrsa3"
# By default, linux wraps base64 output every 76 cols, so we use 'tr -d' to remove whitespaces.
# Note 'base64 -w0' doesn't work on Mac OS X, which has different flags.
CA_CERT_BASE64=$(cat "${CERT_DIR}/pki/ca.crt" | base64 | tr -d '\r\n')
MASTER_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/${MASTER_NAME}.crt" | base64 | tr -d '\r\n')
MASTER_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/${MASTER_NAME}.key" | base64 | tr -d '\r\n')
KUBELET_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubelet.crt" | base64 | tr -d '\r\n')
KUBELET_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubelet.key" | base64 | tr -d '\r\n')
KUBECFG_CERT_BASE64=$(cat "${CERT_DIR}/pki/issued/kubecfg.crt" | base64 | tr -d '\r\n')
KUBECFG_KEY_BASE64=$(cat "${CERT_DIR}/pki/private/kubecfg.key" | base64 | tr -d '\r\n')
}
# Instantiate a kubernetes cluster
#
# Assumed vars
# KUBE_ROOT
# <Various vars set in config file>
function kube-up {
ensure-temp-dir
detect-project
get-password
get-bearer-token
# Make sure we have the tar files staged on Google Storage
find-release-tars
upload-server-tars
local running_in_terminal=false
# May be false if tty is not allocated (for example with ssh -T).
if [ -t 1 ]; then
running_in_terminal=true
fi
if [[ ${running_in_terminal} == "true" || ${KUBE_UP_AUTOMATIC_CLEANUP} == "true" ]]; then
if ! check-resources; then
local run_kube_down="n"
echo "${KUBE_RESOURCE_FOUND} found." >&2
# Get user input only if running in terminal.
if [[ ${running_in_terminal} == "true" && ${KUBE_UP_AUTOMATIC_CLEANUP} == "false" ]]; then
read -p "Would you like to shut down the old cluster (call kube-down)? [y/N] " run_kube_down
fi
if [[ ${run_kube_down} == "y" || ${run_kube_down} == "Y" || ${KUBE_UP_AUTOMATIC_CLEANUP} == "true" ]]; then
echo "... calling kube-down" >&2
kube-down
fi
fi
fi
if ! gcloud compute networks --project "${PROJECT}" describe "${NETWORK}" &>/dev/null; then
echo "Creating new network: ${NETWORK}"
# The network needs to be created synchronously or we have a race. The
# firewalls can be added concurrent with instance creation.
gcloud compute networks create --project "${PROJECT}" "${NETWORK}" --range "10.240.0.0/16"
fi
if ! gcloud compute firewall-rules --project "${PROJECT}" describe "${NETWORK}-default-internal" &>/dev/null; then
gcloud compute firewall-rules create "${NETWORK}-default-internal" \
--project "${PROJECT}" \
--network "${NETWORK}" \
--source-ranges "10.0.0.0/8" \
--allow "tcp:1-65535,udp:1-65535,icmp" &
fi
if ! gcloud compute firewall-rules describe --project "${PROJECT}" "${NETWORK}-default-ssh" &>/dev/null; then
gcloud compute firewall-rules create "${NETWORK}-default-ssh" \
--project "${PROJECT}" \
--network "${NETWORK}" \
--source-ranges "0.0.0.0/0" \
--allow "tcp:22" &
fi
echo "Starting master and configuring firewalls"
gcloud compute firewall-rules create "${MASTER_NAME}-https" \
--project "${PROJECT}" \
--network "${NETWORK}" \
--target-tags "${MASTER_TAG}" \
--allow tcp:443 &
# We have to make sure the disk is created before creating the master VM, so
# run this in the foreground.
gcloud compute disks create "${MASTER_NAME}-pd" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
# Generate a bearer token for this cluster. We push this separately
# from the other cluster variables so that the client (this
# computer) can forget it later. This should disappear with
# https://github.com/GoogleCloudPlatform/kubernetes/issues/3168
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
# Reserve the master's IP so that it can later be transferred to another VM
# without disrupting the kubelets. IPs are associated with regions, not zones,
# so extract the region name, which is the same as the zone but with the final
# dash and characters trailing the dash removed.
local REGION=${ZONE%-*}
MASTER_RESERVED_IP=$(gcloud compute addresses create "${MASTER_NAME}-ip" \
--project "${PROJECT}" \
--region "${REGION}" -q --format yaml | awk '/^address:/ { print $2 }')
create-certs "${MASTER_RESERVED_IP}"
create-master-instance "${MASTER_RESERVED_IP}" &
# Create a single firewall rule for all minions.
create-firewall-rule "${MINION_TAG}-all" "${CLUSTER_IP_RANGE}" "${MINION_TAG}" &
# Report logging choice (if any).
if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]]; then
echo "+++ Logging using Fluentd to ${LOGGING_DESTINATION:-unknown}"
fi
# Wait for last batch of jobs
wait-for-jobs
echo "Creating minions."
# TODO(mbforbes): Refactor setting scope flags.
local -a scope_flags=()
if (( "${#MINION_SCOPES[@]}" > 0 )); then
scope_flags=("--scopes" "$(join_csv ${MINION_SCOPES[@]})")
else
scope_flags=("--no-scopes")
fi
write-node-env
create-node-instance-template
gcloud preview managed-instance-groups --zone "${ZONE}" \
create "${NODE_INSTANCE_PREFIX}-group" \
--project "${PROJECT}" \
--base-instance-name "${NODE_INSTANCE_PREFIX}" \
--size "${NUM_MINIONS}" \
--template "${NODE_INSTANCE_PREFIX}-template" || true;
# TODO: this should be true when the above create managed-instance-group
# command returns, but currently it returns before the instances come up due
# to gcloud's deficiency.
wait-for-minions-to-run
detect-minion-names
detect-master
echo "Waiting for cluster initialization."
echo
echo " This will continually check to see if the API for kubernetes is reachable."
echo " This might loop forever if there was some uncaught error during start"
echo " up."
echo
# curl in mavericks is borked.
secure=""
if which sw_vers > /dev/null; then
if [[ $(sw_vers | grep ProductVersion | awk '{print $2}') = "10.9."* ]]; then
secure="--insecure"
fi
fi
until curl --cacert "${CERT_DIR}/pki/ca.crt" \
-H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \
${secure} \
--max-time 5 --fail --output /dev/null --silent \
"https://${KUBE_MASTER_IP}/api/v1beta3/pods"; do
printf "."
sleep 2
done
echo "Kubernetes cluster created."
export KUBE_CERT="${CERT_DIR}/pki/issued/kubecfg.crt"
export KUBE_KEY="${CERT_DIR}/pki/private/kubecfg.key"
export CA_CERT="${CERT_DIR}/pki/ca.crt"
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
(
umask 077
create-kubeconfig
)
echo
echo -e "${color_green}Kubernetes cluster is running. The master is running at:"
echo
echo -e "${color_yellow} https://${KUBE_MASTER_IP}"
echo
echo -e "${color_green}The user name and password to use is located in ${KUBECONFIG}.${color_norm}"
echo
}
# Delete a kubernetes cluster. This is called from test-teardown.
#
# Assumed vars:
# MASTER_NAME
# NODE_INSTANCE_PREFIX
# ZONE
# This function tears down cluster resources 10 at a time to avoid issuing too many
# API calls and exceeding API quota. It is important to bring down the instances before bringing
# down the firewall rules and routes.
function kube-down {
detect-project
echo "Bringing down cluster"
set +e # Do not stop on error
# The gcloud APIs don't return machine parsable error codes/retry information. Therefore the best we can
# do is parse the output and special case particular responses we are interested in.
if gcloud preview managed-instance-groups --project "${PROJECT}" --zone "${ZONE}" describe "${NODE_INSTANCE_PREFIX}-group" &>/dev/null; then
deleteCmdOutput=$(gcloud preview managed-instance-groups --zone "${ZONE}" delete \
--project "${PROJECT}" \
--quiet \
"${NODE_INSTANCE_PREFIX}-group")
if [[ "$deleteCmdOutput" != "" ]]; then
# Managed instance group deletion is done asyncronously, we must wait for it to complete, or subsequent steps fail
deleteCmdOperationId=$(echo $deleteCmdOutput | grep "Operation:" | sed "s/.*Operation:[[:space:]]*\([^[:space:]]*\).*/\1/g")
if [[ "$deleteCmdOperationId" != "" ]]; then
deleteCmdStatus="PENDING"
while [[ "$deleteCmdStatus" != "DONE" ]]
do
sleep 5
deleteCmdOperationOutput=$(gcloud preview managed-instance-groups --zone "${ZONE}" --project "${PROJECT}" get-operation $deleteCmdOperationId)
deleteCmdStatus=$(echo $deleteCmdOperationOutput | grep -i "status:" | sed "s/.*status:[[:space:]]*\([^[:space:]]*\).*/\1/g")
echo "Waiting for MIG deletion to complete. Current status: " $deleteCmdStatus
done
fi
fi
fi
if gcloud compute instance-templates describe --project "${PROJECT}" "${NODE_INSTANCE_PREFIX}-template" &>/dev/null; then
gcloud compute instance-templates delete \
--project "${PROJECT}" \
--quiet \
"${NODE_INSTANCE_PREFIX}-template"
fi
# First delete the master (if it exists).
if gcloud compute instances describe "${MASTER_NAME}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
gcloud compute instances delete \
--project "${PROJECT}" \
--quiet \
--delete-disks all \
--zone "${ZONE}" \
"${MASTER_NAME}"
fi
# Delete the master pd (possibly leaked by kube-up if master create failed).
if gcloud compute disks describe "${MASTER_NAME}"-pd --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
gcloud compute disks delete \
--project "${PROJECT}" \
--quiet \
--zone "${ZONE}" \
"${MASTER_NAME}"-pd
fi
# Find out what minions are running.
local -a minions
minions=( $(gcloud compute instances list \
--project "${PROJECT}" --zone "${ZONE}" \
--regexp "${NODE_INSTANCE_PREFIX}-.+" \
| awk 'NR >= 2 { print $1 }') )
# If any minions are running, delete them in batches.
while (( "${#minions[@]}" > 0 )); do
echo Deleting nodes "${minions[*]::10}"
gcloud compute instances delete \
--project "${PROJECT}" \
--quiet \
--delete-disks boot \
--zone "${ZONE}" \
"${minions[@]::10}"
minions=( "${minions[@]:10}" )
done
# Delete firewall rule for the master.
if gcloud compute firewall-rules describe --project "${PROJECT}" "${MASTER_NAME}-https" &>/dev/null; then
gcloud compute firewall-rules delete \
--project "${PROJECT}" \
--quiet \
"${MASTER_NAME}-https"
fi
# Delete firewall rule for minions.
if gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-all" &>/dev/null; then
gcloud compute firewall-rules delete \
--project "${PROJECT}" \
--quiet \
"${MINION_TAG}-all"
fi
# Delete routes.
local -a routes
# Clean up all routes w/ names like "<cluster-name>-<node-GUID>"
# e.g. "kubernetes-12345678-90ab-cdef-1234-567890abcdef". The name is
# determined by the node controller on the master.
# Note that this is currently a noop, as synchronously deleting the node MIG
# first allows the master to cleanup routes itself.
local TRUNCATED_PREFIX="${INSTANCE_PREFIX:0:26}"
routes=( $(gcloud compute routes list --project "${PROJECT}" \
--regexp "${TRUNCATED_PREFIX}-.{8}-.{4}-.{4}-.{4}-.{12}" | awk 'NR >= 2 { print $1 }') )
while (( "${#routes[@]}" > 0 )); do
echo Deleting routes "${routes[*]::10}"
gcloud compute routes delete \
--project "${PROJECT}" \
--quiet \
"${routes[@]::10}"
routes=( "${routes[@]:10}" )
done
# Delete the master's reserved IP
local REGION=${ZONE%-*}
if gcloud compute addresses describe "${MASTER_NAME}-ip" --region "${REGION}" --project "${PROJECT}" &>/dev/null; then
gcloud compute addresses delete \
--project "${PROJECT}" \
--region "${REGION}" \
--quiet \
"${MASTER_NAME}-ip"
fi
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
clear-kubeconfig
set -e
}
# Checks if there are any present resources related kubernetes cluster.
#
# Assumed vars:
# MASTER_NAME
# NODE_INSTANCE_PREFIX
# ZONE
# Vars set:
# KUBE_RESOURCE_FOUND
function check-resources {
detect-project
echo "Looking for already existing resources"
KUBE_RESOURCE_FOUND=""
if gcloud preview managed-instance-groups --project "${PROJECT}" --zone "${ZONE}" describe "${NODE_INSTANCE_PREFIX}-group" &>/dev/null; then
KUBE_RESOURCE_FOUND="Managed instance group ${NODE_INSTANCE_PREFIX}-group"
return 1
fi
if gcloud compute instance-templates describe --project "${PROJECT}" "${NODE_INSTANCE_PREFIX}-template" &>/dev/null; then
KUBE_RESOURCE_FOUND="Instance template ${NODE_INSTANCE_PREFIX}-template"
return 1
fi
if gcloud compute instances describe --project "${PROJECT}" "${MASTER_NAME}" --zone "${ZONE}" &>/dev/null; then
KUBE_RESOURCE_FOUND="Kubernetes master ${MASTER_NAME}"
return 1
fi
if gcloud compute disks describe --project "${PROJECT}" "${MASTER_NAME}"-pd --zone "${ZONE}" &>/dev/null; then
KUBE_RESOURCE_FOUND="Persistent disk ${MASTER_NAME}-pd"
return 1
fi
# Find out what minions are running.
local -a minions
minions=( $(gcloud compute instances list \
--project "${PROJECT}" --zone "${ZONE}" \
--regexp "${NODE_INSTANCE_PREFIX}-.+" \
| awk 'NR >= 2 { print $1 }') )
if (( "${#minions[@]}" > 0 )); then
KUBE_RESOURCE_FOUND="${#minions[@]} matching matching ${NODE_INSTANCE_PREFIX}-.+"
return 1
fi
if gcloud compute firewall-rules describe --project "${PROJECT}" "${MASTER_NAME}-https" &>/dev/null; then
KUBE_RESOURCE_FOUND="Firewall rules for ${MASTER_NAME}-https"
return 1
fi
if gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-all" &>/dev/null; then
KUBE_RESOURCE_FOUND="Firewall rules for ${MASTER_NAME}-all"
return 1
fi
local -a routes
routes=( $(gcloud compute routes list --project "${PROJECT}" \
--regexp "${INSTANCE_PREFIX}-minion-.{4}" | awk 'NR >= 2 { print $1 }') )
if (( "${#routes[@]}" > 0 )); then
KUBE_RESOURCE_FOUND="${#routes[@]} routes matching ${INSTANCE_PREFIX}-minion-.{4}"
return 1
fi
local REGION=${ZONE%-*}
if gcloud compute addresses describe --project "${PROJECT}" "${MASTER_NAME}-ip" --region "${REGION}" &>/dev/null; then
KUBE_RESOURCE_FOUND="Master's reserved IP"
return 1
fi
# No resources found.
return 0
}
# Prepare to push new binaries to kubernetes cluster
# $1 - whether prepare push to node
function prepare-push() {
#TODO(dawnchen): figure out how to upgrade coreos node
if [[ "${OS_DISTRIBUTION}" != "debian" ]]; then
echo "Updating a kubernetes cluster with ${OS_DISTRIBUTION} is not supported yet." >&2
exit 1
fi
OUTPUT=${KUBE_ROOT}/_output/logs
mkdir -p ${OUTPUT}
ensure-temp-dir
detect-project
detect-master
detect-minion-names
get-password
get-bearer-token
# Make sure we have the tar files staged on Google Storage
tars_from_version
# Prepare node env vars and update MIG template
if [[ "${1-}" == "true" ]]; then
write-node-env
# TODO(mbforbes): Refactor setting scope flags.
local -a scope_flags=()
if (( "${#MINION_SCOPES[@]}" > 0 )); then
scope_flags=("--scopes" "${MINION_SCOPES[@]}")
else
scope_flags=("--no-scopes")
fi
# Ugly hack: Since it is not possible to delete instance-template that is currently
# being used, create a temp one, then delete the old one and recreate it once again.
create-node-instance-template "tmp"
gcloud preview managed-instance-groups --zone "${ZONE}" \
set-template "${NODE_INSTANCE_PREFIX}-group" \
--project "${PROJECT}" \
--template "${NODE_INSTANCE_PREFIX}-template-tmp" || true;
gcloud compute instance-templates delete \
--project "${PROJECT}" \
--quiet \
"${NODE_INSTANCE_PREFIX}-template" || true
create-node-instance-template
gcloud preview managed-instance-groups --zone "${ZONE}" \
set-template "${NODE_INSTANCE_PREFIX}-group" \
--project "${PROJECT}" \
--template "${NODE_INSTANCE_PREFIX}-template" || true;
gcloud compute instance-templates delete \
--project "${PROJECT}" \
--quiet \
"${NODE_INSTANCE_PREFIX}-template-tmp" || true
fi
}
# Push binaries to kubernetes master
function push-master {
echo "Updating master metadata ..."
write-master-env
add-instance-metadata-from-file "${KUBE_MASTER}" "kube-env=${KUBE_TEMP}/master-kube-env.yaml" "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh"
echo "Pushing to master (log at ${OUTPUT}/push-${KUBE_MASTER}.log) ..."
cat ${KUBE_ROOT}/cluster/gce/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${KUBE_MASTER}" --command "sudo bash -s -- --push" &> ${OUTPUT}/push-"${KUBE_MASTER}".log
}
# Push binaries to kubernetes node
function push-node() {
node=${1}
echo "Updating node ${node} metadata... "
add-instance-metadata-from-file "${node}" "kube-env=${KUBE_TEMP}/node-kube-env.yaml" "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh"
echo "Start upgrading node ${node} (log at ${OUTPUT}/push-${node}.log) ..."
cat ${KUBE_ROOT}/cluster/gce/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${node}" --command "sudo bash -s -- --push" &> ${OUTPUT}/push-"${node}".log
}
# Push binaries to kubernetes cluster
function kube-push {
prepare-push true
push-master
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
push-node "${MINION_NAMES[$i]}" &
done
wait-for-jobs
# TODO(zmerlynn): Re-create instance-template with the new
# node-kube-env. This isn't important until the node-ip-range issue
# is solved (because that's blocking automatic dynamic nodes from
# working). The node-kube-env has to be composed with the KUBELET_TOKEN
# and KUBE_PROXY_TOKEN. Ideally we would have
# https://github.com/GoogleCloudPlatform/kubernetes/issues/3168
# implemented before then, though, so avoiding this mess until then.
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${KUBE_MASTER_IP}"
echo
echo "The user name and password to use is located in ~/.kube/config"
echo
}
# -----------------------------------------------------------------------------
# Cluster specific test helpers used from hack/e2e-test.sh
# Execute prior to running tests to build a release if required for env.
#
# Assumed Vars:
# KUBE_ROOT
function test-build-release {
# Make a release
"${KUBE_ROOT}/build/release.sh"
}
# Execute prior to running tests to initialize required structure. This is
# called from hack/e2e.go only when running -up (it is run after kube-up).
#
# Assumed vars:
# Variables from config.sh
function test-setup {
# Detect the project into $PROJECT if it isn't set
detect-project
# Open up port 80 & 8080 so common containers on minions can be reached
# TODO(roberthbailey): Remove this once we are no longer relying on hostPorts.
local start=`date +%s`
gcloud compute firewall-rules create \
--project "${PROJECT}" \
--target-tags "${MINION_TAG}" \
--allow tcp:80,tcp:8080 \
--network "${NETWORK}" \
"${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || true
# As there is no simple way to wait longer for this operation we need to manually
# wait some additional time (20 minutes altogether).
until gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ]
do sleep 5
done
# Check if the firewall rule exists and fail if it does not.
gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-http-alt"
# Open up the NodePort range
# TODO(justinsb): Move to main setup, if we decide whether we want to do this by default.
start=`date +%s`
gcloud compute firewall-rules create \
--project "${PROJECT}" \
--target-tags "${MINION_TAG}" \
--allow tcp:30000-32767,udp:30000-32767 \
--network "${NETWORK}" \
"${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || true
# As there is no simple way to wait longer for this operation we need to manually
# wait some additional time (20 minutes altogether).
until gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" 2> /dev/null || [ $(($start + 1200)) -lt `date +%s` ]
do sleep 5
done
# Check if the firewall rule exists and fail if it does not.
gcloud compute firewall-rules describe --project "${PROJECT}" "${MINION_TAG}-${INSTANCE_PREFIX}-nodeports"
}
# Execute after running tests to perform any required clean-up. This is called
# from hack/e2e.go
function test-teardown {
detect-project
echo "Shutting down test cluster in background."
gcloud compute firewall-rules delete \
--project "${PROJECT}" \
--quiet \
"${MINION_TAG}-${INSTANCE_PREFIX}-http-alt" || true
gcloud compute firewall-rules delete \
--project "${PROJECT}" \
--quiet \
"${MINION_TAG}-${INSTANCE_PREFIX}-nodeports" || true
"${KUBE_ROOT}/cluster/kube-down.sh"
}
# SSH to a node by name ($1) and run a command ($2).
function ssh-to-node {
local node="$1"
local cmd="$2"
# Loop until we can successfully ssh into the box
for try in $(seq 1 5); do
if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "echo test > /dev/null"; then
break
fi
sleep 5
done
# Then actually try the command.
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "${cmd}"
}
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy {
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
}
# Restart the kube-apiserver on a node ($1)
function restart-apiserver {
ssh-to-node "$1" "sudo docker ps | grep /kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill"
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
detect-project
}
| aaronlevy/kubernetes | cluster/gce/util.sh | Shell | apache-2.0 | 40,613 |
#
# Copyright (C) 2011 OpenWrt.org
#
PART_NAME=firmware
REQUIRE_IMAGE_METADATA=1
platform_check_image() {
return 0
}
platform_do_upgrade() {
local board=$(board_name)
case "$board" in
*)
default_do_upgrade "$1"
;;
esac
}
| chaojin/openwrt | target/linux/ath79/tiny/base-files/lib/upgrade/platform.sh | Shell | gpl-2.0 | 234 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs the integration tests on an OSX machine for the XHyve Driver
# The script expects the following env variables:
# MINIKUBE_LOCATION: GIT_COMMIT from upstream build.
# COMMIT: Actual commit ID from upstream build
# EXTRA_BUILD_ARGS (optional): Extra args to be passed into the minikube integrations tests
# access_token: The Github API access token. Injected by the Jenkins credential provider.
set -e
OS_ARCH="darwin-amd64"
VM_DRIVER="xhyve"
JOB_NAME="OSX-Xhyve"
# Download files and set permissions
source common.sh
| rawlingsj/gofabric8 | vendor/k8s.io/minikube/hack/jenkins/osx_integration_tests_xhyve.sh | Shell | apache-2.0 | 1,164 |
#!/bin/sh
IFS="
"
for line in `cat userlist`; do
test -z "$line" && continue
user=`echo $line | cut -f 1 -d' '`
echo "adding user $user"
useradd -m -s /bin/bash $user
cp -r /srv/ipython/examples /home/$user/examples
chown -R $user /home/$user/examples
done
| jupyterhub/oauthenticator | examples/full/addusers.sh | Shell | bsd-3-clause | 270 |
#!/bin/bash
fw_depends mysql postgresql mongodb nginx mono
sed -i 's|localhost|'"$DBHOST"'|g' src/Web.config
# extra cleaning
rm -rf src/bin src/obj
xbuild src/ServiceStackBenchmark.csproj /t:Clean
xbuild src/ServiceStackBenchmark.csproj /p:Configuration=Release
# xsp
MONO_OPTIONS=--gc=sgen xsp4 --port 8080 -nonstop &
| steveklabnik/FrameworkBenchmarks | frameworks/CSharp/servicestack/setup_xsp.sh | Shell | bsd-3-clause | 322 |
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
#
# Check if current architecture are missing any function calls compared
# to i386.
# i386 define a number of legacy system calls that are i386 specific
# and listed below so they are ignored.
#
# Usage:
# checksyscalls.sh gcc gcc-options
#
ignore_list() {
cat << EOF
#include <asm/types.h>
#include <asm/unistd.h>
/* *at */
#define __IGNORE_open /* openat */
#define __IGNORE_link /* linkat */
#define __IGNORE_unlink /* unlinkat */
#define __IGNORE_mknod /* mknodat */
#define __IGNORE_chmod /* fchmodat */
#define __IGNORE_chown /* fchownat */
#define __IGNORE_mkdir /* mkdirat */
#define __IGNORE_rmdir /* unlinkat */
#define __IGNORE_lchown /* fchownat */
#define __IGNORE_access /* faccessat */
#define __IGNORE_rename /* renameat2 */
#define __IGNORE_readlink /* readlinkat */
#define __IGNORE_symlink /* symlinkat */
#define __IGNORE_utimes /* futimesat */
#define __IGNORE_stat /* fstatat */
#define __IGNORE_lstat /* fstatat */
#define __IGNORE_stat64 /* fstatat64 */
#define __IGNORE_lstat64 /* fstatat64 */
#ifndef __ARCH_WANT_SET_GET_RLIMIT
#define __IGNORE_getrlimit /* getrlimit */
#define __IGNORE_setrlimit /* setrlimit */
#endif
#ifndef __ARCH_WANT_MEMFD_SECRET
#define __IGNORE_memfd_secret
#endif
/* Missing flags argument */
#define __IGNORE_renameat /* renameat2 */
/* CLOEXEC flag */
#define __IGNORE_pipe /* pipe2 */
#define __IGNORE_dup2 /* dup3 */
#define __IGNORE_epoll_create /* epoll_create1 */
#define __IGNORE_inotify_init /* inotify_init1 */
#define __IGNORE_eventfd /* eventfd2 */
#define __IGNORE_signalfd /* signalfd4 */
/* MMU */
#ifndef CONFIG_MMU
#define __IGNORE_madvise
#define __IGNORE_mbind
#define __IGNORE_mincore
#define __IGNORE_mlock
#define __IGNORE_mlockall
#define __IGNORE_munlock
#define __IGNORE_munlockall
#define __IGNORE_mprotect
#define __IGNORE_msync
#define __IGNORE_migrate_pages
#define __IGNORE_move_pages
#define __IGNORE_remap_file_pages
#define __IGNORE_get_mempolicy
#define __IGNORE_set_mempolicy
#define __IGNORE_swapoff
#define __IGNORE_swapon
#endif
/* System calls for 32-bit kernels only */
#if BITS_PER_LONG == 64
#define __IGNORE_sendfile64
#define __IGNORE_ftruncate64
#define __IGNORE_truncate64
#define __IGNORE_stat64
#define __IGNORE_lstat64
#define __IGNORE_fcntl64
#define __IGNORE_fadvise64_64
#define __IGNORE_fstatfs64
#define __IGNORE_statfs64
#define __IGNORE_llseek
#define __IGNORE_mmap2
#define __IGNORE_clock_gettime64
#define __IGNORE_clock_settime64
#define __IGNORE_clock_adjtime64
#define __IGNORE_clock_getres_time64
#define __IGNORE_clock_nanosleep_time64
#define __IGNORE_timer_gettime64
#define __IGNORE_timer_settime64
#define __IGNORE_timerfd_gettime64
#define __IGNORE_timerfd_settime64
#define __IGNORE_utimensat_time64
#define __IGNORE_pselect6_time64
#define __IGNORE_ppoll_time64
#define __IGNORE_io_pgetevents_time64
#define __IGNORE_recvmmsg_time64
#define __IGNORE_mq_timedsend_time64
#define __IGNORE_mq_timedreceive_time64
#define __IGNORE_semtimedop_time64
#define __IGNORE_rt_sigtimedwait_time64
#define __IGNORE_futex_time64
#define __IGNORE_sched_rr_get_interval_time64
#else
#define __IGNORE_sendfile
#define __IGNORE_ftruncate
#define __IGNORE_truncate
#define __IGNORE_stat
#define __IGNORE_lstat
#define __IGNORE_fstat
#define __IGNORE_fcntl
#define __IGNORE_fadvise64
#define __IGNORE_newfstatat
#define __IGNORE_fstatfs
#define __IGNORE_statfs
#define __IGNORE_lseek
#define __IGNORE_mmap
#define __IGNORE_clock_gettime
#define __IGNORE_clock_settime
#define __IGNORE_clock_adjtime
#define __IGNORE_clock_getres
#define __IGNORE_clock_nanosleep
#define __IGNORE_timer_gettime
#define __IGNORE_timer_settime
#define __IGNORE_timerfd_gettime
#define __IGNORE_timerfd_settime
#define __IGNORE_utimensat
#define __IGNORE_pselect6
#define __IGNORE_ppoll
#define __IGNORE_io_pgetevents
#define __IGNORE_recvmmsg
#define __IGNORE_mq_timedsend
#define __IGNORE_mq_timedreceive
#define __IGNORE_semtimedop
#define __IGNORE_rt_sigtimedwait
#define __IGNORE_futex
#define __IGNORE_sched_rr_get_interval
#define __IGNORE_gettimeofday
#define __IGNORE_settimeofday
#define __IGNORE_wait4
#define __IGNORE_adjtimex
#define __IGNORE_nanosleep
#define __IGNORE_io_getevents
#define __IGNORE_recvmmsg
#endif
/* i386-specific or historical system calls */
#define __IGNORE_break
#define __IGNORE_stty
#define __IGNORE_gtty
#define __IGNORE_ftime
#define __IGNORE_prof
#define __IGNORE_lock
#define __IGNORE_mpx
#define __IGNORE_ulimit
#define __IGNORE_profil
#define __IGNORE_ioperm
#define __IGNORE_iopl
#define __IGNORE_idle
#define __IGNORE_modify_ldt
#define __IGNORE_ugetrlimit
#define __IGNORE_vm86
#define __IGNORE_vm86old
#define __IGNORE_set_thread_area
#define __IGNORE_get_thread_area
#define __IGNORE_madvise1
#define __IGNORE_oldstat
#define __IGNORE_oldfstat
#define __IGNORE_oldlstat
#define __IGNORE_oldolduname
#define __IGNORE_olduname
#define __IGNORE_umount
#define __IGNORE_waitpid
#define __IGNORE_stime
#define __IGNORE_nice
#define __IGNORE_signal
#define __IGNORE_sigaction
#define __IGNORE_sgetmask
#define __IGNORE_sigsuspend
#define __IGNORE_sigpending
#define __IGNORE_ssetmask
#define __IGNORE_readdir
#define __IGNORE_socketcall
#define __IGNORE_ipc
#define __IGNORE_sigreturn
#define __IGNORE_sigprocmask
#define __IGNORE_bdflush
#define __IGNORE__llseek
#define __IGNORE__newselect
#define __IGNORE_create_module
#define __IGNORE_query_module
#define __IGNORE_get_kernel_syms
#define __IGNORE_sysfs
#define __IGNORE_uselib
#define __IGNORE__sysctl
#define __IGNORE_arch_prctl
#define __IGNORE_nfsservctl
/* ... including the "new" 32-bit uid syscalls */
#define __IGNORE_lchown32
#define __IGNORE_getuid32
#define __IGNORE_getgid32
#define __IGNORE_geteuid32
#define __IGNORE_getegid32
#define __IGNORE_setreuid32
#define __IGNORE_setregid32
#define __IGNORE_getgroups32
#define __IGNORE_setgroups32
#define __IGNORE_fchown32
#define __IGNORE_setresuid32
#define __IGNORE_getresuid32
#define __IGNORE_setresgid32
#define __IGNORE_getresgid32
#define __IGNORE_chown32
#define __IGNORE_setuid32
#define __IGNORE_setgid32
#define __IGNORE_setfsuid32
#define __IGNORE_setfsgid32
/* these can be expressed using other calls */
#define __IGNORE_alarm /* setitimer */
#define __IGNORE_creat /* open */
#define __IGNORE_fork /* clone */
#define __IGNORE_futimesat /* utimensat */
#define __IGNORE_getpgrp /* getpgid */
#define __IGNORE_getdents /* getdents64 */
#define __IGNORE_pause /* sigsuspend */
#define __IGNORE_poll /* ppoll */
#define __IGNORE_select /* pselect6 */
#define __IGNORE_epoll_wait /* epoll_pwait */
#define __IGNORE_time /* gettimeofday */
#define __IGNORE_uname /* newuname */
#define __IGNORE_ustat /* statfs */
#define __IGNORE_utime /* utimes */
#define __IGNORE_vfork /* clone */
/* sync_file_range had a stupid ABI. Allow sync_file_range2 instead */
#ifdef __NR_sync_file_range2
#define __IGNORE_sync_file_range
#endif
/* Unmerged syscalls for AFS, STREAMS, etc. */
#define __IGNORE_afs_syscall
#define __IGNORE_getpmsg
#define __IGNORE_putpmsg
#define __IGNORE_vserver
/* 64-bit ports never needed these, and new 32-bit ports can use statx */
#define __IGNORE_fstat64
#define __IGNORE_fstatat64
EOF
}
syscall_list() {
grep '^[0-9]' "$1" | sort -n |
while read nr abi name entry ; do
echo "#if !defined(__NR_${name}) && !defined(__IGNORE_${name})"
echo "#warning syscall ${name} not implemented"
echo "#endif"
done
}
(ignore_list && syscall_list $(dirname $0)/../arch/x86/entry/syscalls/syscall_32.tbl) | \
$* -Wno-error -E -x c - > /dev/null
| tprrt/linux-stable | scripts/checksyscalls.sh | Shell | gpl-2.0 | 7,600 |
#!/bin/sh
################################################################################
## ##
## Copyright (c) International Business Machines Corp., 2001 ##
## ##
## This program is free software; you can redistribute it and#or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation; either version 2 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ##
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ##
## for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program; if not, write to the Free Software ##
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ##
## ##
################################################################################
#
# File : logrotate_tests.sh
#
# Description: Test Basic functionality of logrotate command.
# Test #1: Test that logrotate -f <file.conf> rotates the logfile
# as per the specifications in the conf file. Create a file
# tst_logfile in /var/log/. Create a conf file such that this
# logfile is set for rotation every week. Execute the command
# logrotate -f <file.conf>, check to see if it forced rotation.
# Test #2: Check if logrotate running as a cronjob will rotate a
# logfile when it exceeds a specific size. Create two cronjobs
# 1. runs a command to log a string to a logfile. 2. runs
# logrotate <file.conf> every minute. The conf file specifies
# that the rotation happen only if the log file exceeds 2k file
# size.
#
# Author: Manoj Iyer, [email protected]
#
# History: Dec 23 2002 - Created - Manoj Iyer.
# Dec 24 2002 - Added - Test #2 - Test to run logrotate as a
# cron job.
# Feb 28 2003 - Fixed - Modified testcase to use functions.
#
# Function: chk_ifexists
#
# Description: - Check if command required for this test exits.
#
# Input: - $1 - calling test case.
# - $2 - command that needs to be checked.
#
# Return: - zero on success.
# - non-zero on failure.
chk_ifexists()
{
RC=0
which $2 > $LTPTMP/tst_logrotate.err 2>&1 || RC=$?
if [ $RC -ne 0 ]
then
tst_brkm TBROK NULL "$1: command $2 not found."
fi
return $RC
}
# Function: init
#
# Description: - Check if command required for this test exits.
# - Create temporary directories required for this test.
# - Initialize global variables.
#
# Return: - zero on success.
# - non-zero on failure.
init()
{
# Initialize global variables.
export RC=0
export TST_TOTAL=2
export TCID="logrotate"
export TST_COUNT=0
# Inititalize cleanup function.
trap "cleanup" 0
# create the temporary directory used by this testcase
if [ -z $TMP ]
then
LTPTMP=/tmp/tst_logrotate.$$
else
LTPTMP=$TMP/tst_logrotate.$$
fi
mkdir -p $LTPTMP > /dev/null 2>&1 || RC=$?
if [ $RC -ne 0 ]
then
tst_brkm TBROK "INIT: Unable to create temporary directory"
return $RC
fi
# check if commands tst_*, logrotate, awk exists.
chk_ifexists INIT tst_resm || return $RC
chk_ifexists INIT logrotate || return $RC
chk_ifexists INIT awk || return $RC
return $RC
}
# Function: cleanup
#
# Description: - remove temporaty files and directories. Stop all jobs stated
# by this testcase.
#
# Return: - zero on success.
# - non-zero on failure.
cleanup()
{
#remove all cronjobs that were installed.
tst_resm TINFO "CLEAN: removing all cron jobs."
crontab -r > /dev/null 2>&1
# remove all the temporary files created by this test.
tst_resm TINFO "CLEAN: removing $LTPTMP"
rm -fr $LTPTMP
}
# Function: test01
#
# Description: - Test that logrotate logrotate will rotate the logfile
# according to the specifications in the config file.
# - create a config file that will rotate the /var/log/tst_logfile
# file.
# - use force option to force logrotate to cause the log file to
# be rotated.
# - compress the file after rotation.
#
# Return: - zero on success.
# - non-zero on failure.
test01()
{
count=0
files=" "
filesize=0
TCID=logrotate01
TST_COUNT=1
tst_resm TINFO "Test #1: create a configfile $LTPTMP/var_mesg.config"
tst_resm TINFO "Test #1: use logrotate -f <config> to force rotation"
tst_resm TINFO "Test #1: this will rotate the log file according to"
tst_resm TINFO "Test #1: the specification in the configfile."
tst_resm TINFO "Test #1: 1. rotate /var/log/tst_logfile file."
tst_resm TINFO "Test #1: 2. compresses it."
# create config file.
cat >$LTPTMP/tst_logrotate.conf <<-EOF
#****** Begin Config file *******
# create new (empty) log files after rotating old ones
create
# compress the log files
compress
/var/log/tst_logfile {
rotate 5
weekly
}
#****** End Config file *******
EOF
# create a log file in /var/log/
cat >/var/log/tst_logfile <<-EOF
#****** Begin Log File ********
# This is a dummy log file.
#****** End Log File ********
EOF
while [ $count -lt 10 ]
do
echo "This a dummy log file used to test logrotate command." >> \
/var/log/tst_logfile
count=$(( $count+1 ))
done
# remove all old-n-stale logfiles.
for files in /var/log/tst_logfile.*
do
rm -f $files > /dev/null 2>&1
done
chmod 644 $LTPTMP/tst_logrotate.conf
logrotate -fv $LTPTMP/tst_logrotate.conf > $LTPTMP/tst_logrotate.out 2>&1 \
|| RC=$?
if [ $RC -eq 0 ]
then
# check if config file $LTPTMP/tst_logrotate.conf is read
# check if /etc/logrotate.d is included/
# check if 5 rotations are forced.
# check if compression is done.
grep "reading config file $LTPTMP/tst_logrotate.conf" \
$LTPTMP/tst_logrotate.out > $LTPTMP/tst_logrotate.err 2>&1 || RC=$?
grep "forced from command line (5 rotations)" \
$LTPTMP/tst_logrotate.out > $LTPTMP/tst_logrotate.err 2>&1 || RC=$?
egrep "compressing new|log with" \
$LTPTMP/tst_logrotate.out > $LTPTMP/tst_logrotate.err 2>&1 || RC=$?
if [ $RC -ne 0 ]
then
tst_res TFAIL > $LTPTMP/tst_logrotate.err 2>&1 \
"Test #1: logrotate command failed. Reason:"
else
# Check if compressed log file is created.
if [ -f /var/log/tst_logfile.1.gz ]
then
file /var/log/tst_logfile.1.gz | grep "gzip compressed data" \
> $LTPTMP/tst_logrotate.out 2>&1 || RC=$?
if [ $RC -eq 0 ]
then
tst_resm TPASS \
"Test #1: logrotate created a compressed file."
else
tst_res TFAIL $LTPTMP/tst_logrotate.out \
"Test #1: Failed to create a compressed file. Reason:"
fi
return $RC
else
tst_res TFAIL $LTPTMP/tst_logrotate.out \
"Test #1: Failed create /var/log/tst_logfile.1.gz. Reason:"
return $RC
fi
fi
else
tst_res TFAIL $LTPTMP/tst_logrotate.out \
"Test #1: logrotate command exited with $RC return code. Output:"
fi
return $RC
}
test02()
{
# Test #2
# Test that logrotate logrotate will rotate the logfile if the logfile
# exceeds a certain size.
# - create a config file that will rotate the /var/log/tst_largelogfile.
# - run logrotate in a cron job that runs every minute.
# - add messages to the logfile until it gets rotated when a re-dittermined
# size is reached.
export TCID=logrotate02
export TST_COUNT=2
RC=0
tst_resm TINFO "Test #2: create a configfile $LTPTMP/tst_largelog.conf"
tst_resm TINFO "Test #2: logrotate $LTPTMP/tst_largelog.conf - cronjob"
tst_resm TINFO "Test #2: set to rotate tst_largelogfile when size > 2K"
# create config file.
cat >$LTPTMP/tst_largelog.conf <<EOF
# create new (empty) log files after rotating old ones
create
# compress the log files
compress
# RPM packages drop log rotation information into this directory
include /etc/logrotate.d
/var/log/tst_largelogfile {
rotate 5
size=2k
}
EOF
# create the pseudo-log file.
cat >/var/log/tst_largelogfile <<EOF
# This is a psuedo-log file. This file will grow to a 2k size before
# getting rotated.
EOF
# create logrotate cron job.
cat >$LTPTMP/tst_logrotate.cron <<EOF
* * * * * logrotate $LTPTMP/tst_largelog.conf
EOF
chmod 777 $LTPTMP/tst_logrotate.cron > /dev/null 2>&1
tst_resm TINFO "Test #2: Installing cron job to run logrotate"
crontab $LTPTMP/tst_logrotate.cron > $LTPTMP/tst_logrotate.out 2>&1 || RC=$?
if [ $RC -ne 0 ]
then
echo "Exit status of crontab command: $RC" >> tst_logrotate.out 2>/dev/null
tst_brk TBROK $LTPTMP/tst_logrotate.out NULL \
"Test #2: crontab Broke while installing cronjob. Reason:"
TFAILCNT=$(( $TFAILCN+1 ))
else
tst_resm TINFO "Test #2: Cronjob installed successfully"
fi
# cron job to increase the log file size.
cat >$LTPTMP/tst_addtolog.cron <<EOF
* * * * * echo "To Err Is Human, To Really Screw Up You Need A Computer." >>/var/log/tst_largelogfile 2>/dev/null
EOF
tst_resm TINFO "Test #2: Installing cron job to increase logsize"
crontab $LTPTMP/tst_addtolog.cron > $LTPTMP/tst_logrotate.out 2>&1 || RC=$?
if [ $RC -ne 0 ]
then
echo "Exit status of crontab command: $RC" >> tst_logrotate.out 2>/dev/null
tst_brk TBROK $LTPTMP/tst_logrotate.out NULL \
"Test #2: crontab Broke while installing cronjob. Reason:"
TFAILCNT=$(( $TFAILCN+1 ))
else
tst_resm TINFO "Test #2: Cronjob installed successfully"
fi
# let cron jobs get started.
sleep 10s
# increase the log file size.
# wait for the /var/log/tst_largelogfile to be filled to a size greater than 2k
tst_resm TINFO "Test #2: Checking if file size is > 2k"
tst_resm TINFO "Test #2: Pls be patient this will take some time."
tst_resm TINFO "Test #2: or killall -9 logrotate02 to skip.."
if [ -f `which awk` ]
then
while [ $filesize -lt 2046 ]
do
filesize=`ls -l /var/log/tst_largelogfile | awk '{print $5}'`
done
# wait for 1m and check if logrotate has rotated the logfile. The cron job
# that does a logrotate runs every 1 minute so give the cron a minute...
sleep 1m
else
tst_resm TINFO "Test #2: No AWK installed ... sleeping for 10mts"
sleep 10m
fi
if [ -f /var/log/tst_largelogfile.1.gz ]
then
file /var/log/tst_largelogfile.1.gz | grep "gzip compressed data" \
> $LTPTMP/tst_logrotate.out 2>&1 || RC=$?
if [ $RC -eq 0 ]
then
tst_resm TPASS \
"Test #1: logrotate worked as cron, created a compressed file."
else
tst_res TFAIL $LTPTMP/tst_logrotate.out \
"Test #1: Failed to create a compressed file. Reason:"
fi
else
tst_res TFAIL $LTPTMP/tst_logrotate.out \
"Test #1: Failed to create /var/log/tst_largelogfile.1.gz. Reason:"
TFAILCNT=$(( $TFAILCNT+1 ))
fi
}
# Function: main
#
# Description: - Execute all tests and report results.
#
# Exit: - zero on success
# - non-zero on failure.
RC=0
init || exit $?
test01 || RC=$?
exit $RC
| richiejp/ltp | testcases/commands/logrotate/logrotate_tests.sh | Shell | gpl-2.0 | 11,743 |
#!/bin/bash
make
mkdir -p $PREFIX/bin
cp pbgzip $PREFIX/bin
| JenCabral/bioconda-recipes | recipes/pbgzip/build.sh | Shell | mit | 60 |
#!/bin/sh
test_description='previous branch syntax @{-n}'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
test_expect_success 'branch -d @{-1}' '
test_commit A &&
git checkout -b junk &&
git checkout - &&
test "$(git symbolic-ref HEAD)" = refs/heads/main &&
git branch -d @{-1} &&
test_must_fail git rev-parse --verify refs/heads/junk
'
test_expect_success 'branch -d @{-12} when there is not enough switches yet' '
git reflog expire --expire=now &&
git checkout -b junk2 &&
git checkout - &&
test "$(git symbolic-ref HEAD)" = refs/heads/main &&
test_must_fail git branch -d @{-12} &&
git rev-parse --verify refs/heads/main
'
test_expect_success 'merge @{-1}' '
git checkout A &&
test_commit B &&
git checkout A &&
test_commit C &&
test_commit D &&
git branch -f main B &&
git branch -f other &&
git checkout other &&
git checkout main &&
git merge @{-1} &&
git cat-file commit HEAD | grep "Merge branch '\''other'\''"
'
test_expect_success 'merge @{-1}~1' '
git checkout main &&
git reset --hard B &&
git checkout other &&
git checkout main &&
git merge @{-1}~1 &&
git cat-file commit HEAD >actual &&
grep "Merge branch '\''other'\''" actual
'
test_expect_success 'merge @{-100} before checking out that many branches yet' '
git reflog expire --expire=now &&
git checkout -f main &&
git reset --hard B &&
git branch -f other C &&
git checkout other &&
git checkout main &&
test_must_fail git merge @{-100}
'
test_expect_success 'log -g @{-1}' '
git checkout -b last_branch &&
git checkout -b new_branch &&
echo "last_branch@{0}" >expect &&
git log -g --format=%gd @{-1} >actual &&
test_cmp expect actual
'
test_done
| Osse/git | t/t0100-previous.sh | Shell | gpl-2.0 | 1,724 |
#!/bin/sh
# Tests for lp_load() via testparm.
#
# The main purpose (for now) is to test all the special handlers
# and the macro expansions.
TEMP_CONFFILE=${LIBDIR}/smb.conf.tmp
TESTPARM="$VALGRIND ${TESTPARM:-$BINDIR/testparm} --suppress-prompt --skip-logic-checks"
test x"$TEST_FUNCTIONS_SH" != x"INCLUDED" && {
incdir=`dirname $0`
. $incdir/test_functions.sh
}
failed=0
test_include_expand_macro()
{
MACRO=$1
rm -f ${TEMP_CONFFILE}
cat >${TEMP_CONFFILE}<<EOF
[global]
include = ${TEMP_CONFFILE}.%${MACRO}
EOF
${TESTPARM} ${TEMP_CONFFILE}
}
test_one_global_option()
{
OPTION="$@"
rm -f ${TEMP_CONFFILE}
cat > ${TEMP_CONFFILE}<<EOF
[global]
${OPTION}
EOF
${TESTPARM} ${TEMP_CONFFILE}
}
test_copy()
{
rm -f ${TEMP_CONFFILE}
cat > ${TEMP_CONFFILE}<<EOF
[share1]
path = /tmp
read only = no
[share2]
copy = share1
EOF
${TESTPARM} ${TEMP_CONFFILE}
}
testit "netbios name" \
test_one_global_option "netbios name = funky" || \
failed=`expr ${failed} + 1`
testit "netbios aliases" \
test_one_global_option "netbios aliases = funky1 funky2 funky3" || \
failed=`expr ${failed} + 1`
testit "netbios scope" \
test_one_global_option "netbios scope = abc" || \
failed=`expr ${failed} + 1`
testit "workgroup" \
test_one_global_option "workgroup = samba" || \
failed=`expr ${failed} + 1`
testit "display charset" \
test_one_global_option "display charset = UTF8" || \
failed=`expr ${failed} + 1`
testit "ldap debug level" \
test_one_global_option "ldap debug level = 7" || \
failed=`expr ${failed} + 1`
for LETTER in U G D I i L N M R T a d h m v w V ; do
testit "include with %${LETTER} macro expansion" \
test_include_expand_macro "${LETTER}" || \
failed=`expr ${failed} + 1`
done
testit "copy" \
test_copy || \
failed=`expr ${failed} + 1`
rm -f ${TEMP_CONFFILE}
testok $0 ${failed}
| opinkerfi/winexe | source3/script/tests/test_testparm_s3.sh | Shell | gpl-3.0 | 1,822 |
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
find ./tools ./jerry-debugger -name "*.py" \
| xargs pylint --rcfile=tools/pylint/pylintrc
| grgustaf/jerryscript | tools/check-pylint.sh | Shell | apache-2.0 | 725 |
go test -db=sqlite3 -conn_str="./test.db?cache=shared&mode=rwc" | go-xorm/cmd | xorm/vendor/github.com/go-xorm/xorm/test_sqlite.sh | Shell | bsd-3-clause | 63 |
R CMD REMOVE --library=$PREFIX/lib/R/library/ mgu74bv2cdf
| ostrokach/bioconda-recipes | recipes/bioconductor-mgu74bv2cdf/pre-unlink.sh | Shell | mit | 58 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
# Find binary
gendocs=$(kube::util::find-binary "gendocs")
genkubedocs=$(kube::util::find-binary "genkubedocs")
genman=$(kube::util::find-binary "genman")
genbashcomp=$(kube::util::find-binary "genbashcomp")
mungedocs=$(kube::util::find-binary "mungedocs")
DOCROOT="${KUBE_ROOT}/docs/"
EXAMPLEROOT="${KUBE_ROOT}/examples/"
# mungedocs --verify can (and should) be run on the real docs, otherwise their
# links will be distorted. --verify means that it will not make changes.
"${mungedocs}" "--verify=true" "--root-dir=${DOCROOT}" && ret=0 || ret=$?
if [[ $ret -eq 1 ]]; then
echo "${DOCROOT} is out of date. Please run hack/update-generated-docs.sh"
exit 1
fi
if [[ $ret -gt 1 ]]; then
echo "Error running mungedocs"
exit 1
fi
"${mungedocs}" "--verify=true" "--root-dir=${EXAMPLEROOT}" && ret=0 || ret=$?
if [[ $ret -eq 1 ]]; then
echo "${EXAMPLEROOT} is out of date. Please run hack/update-generated-docs.sh"
exit 1
fi
if [[ $ret -gt 1 ]]; then
echo "Error running mungedocs"
exit 1
fi
kube::util::ensure-temp-dir
kube::util::gen-docs "${KUBE_TEMP}"
diff -Naup "${KUBE_TEMP}/.generated_docs" "${KUBE_ROOT}/.generated_docs" || ret=1 || true
while read file; do
diff -Naup "${KUBE_TEMP}/${file}" "${KUBE_ROOT}/${file}" || ret=1 || true
done <"${KUBE_TEMP}/.generated_docs"
needsanalytics=($(kube::util::gen-analytics "${KUBE_ROOT}" 1))
if [[ ${#needsanalytics[@]} -ne 0 ]]; then
echo -e "Some md files are missing ga-beacon analytics link:"
printf '%s\n' "${needsanalytics[@]}"
ret=1
fi
if [[ $ret -eq 0 ]]
then
echo "Generated docs are up to date."
else
echo "Generated docs are out of date. Please run hack/update-generated-docs.sh"
exit 1
fi
# ex: ts=2 sw=2 et filetype=sh
| roofmonkey/kubernetes | hack/after-build/verify-generated-docs.sh | Shell | apache-2.0 | 2,489 |
#!/bin/bash
export CPATH=${PREFIX}/include
export LD_LIBRARY_PATH=${PREFIX}/lib
export LDFLAGS="-L${PREFIX}/lib"
./bootstrap
./configure --prefix=${PREFIX}
make
make install
| guowei-he/bioconda-recipes | recipes/pullseq/build.sh | Shell | mit | 176 |
#!/bin/sh -e
if ! /usr/bin/test -e config.guess; then
/usr/bin/wget -q -O config.guess 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD'
/bin/chmod a+x config.guess
fi
if ! /usr/bin/test -e config.sub; then
/usr/bin/wget -q -O config.sub 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD'
/bin/chmod a+x config.sub
fi
/usr/bin/aclocal --force
/usr/bin/autoconf -f
/bin/rm -rf autom4te.cache
res=0
for d in cygwin utils cygserver; do
(cd $d && exec ./autogen.sh) || res=1
done
exit $res
| xyzz/vita-newlib | winsup/autogen.sh | Shell | gpl-2.0 | 577 |
#!/bin/sh
###############################################################################
#
# Author: Lasse Collin
#
# This file has been put into the public domain.
# You can do whatever you want with this file.
#
###############################################################################
# If xz wasn't built, this test is skipped.
if test -x ../src/xz/xz ; then
:
else
(exit 77)
exit 77
fi
# Find out if our shell supports functions.
eval 'unset foo ; foo() { return 42; } ; foo'
if test $? != 42 ; then
echo "/bin/sh doesn't support functions, skipping this test."
(exit 77)
exit 77
fi
test_xz() {
if $XZ -c "$@" "$FILE" > tmp_compressed; then
:
else
echo "Compressing failed: $* $FILE"
(exit 1)
exit 1
fi
if $XZ -cd tmp_compressed > tmp_uncompressed ; then
:
else
echo "Decompressing failed: $* $FILE"
(exit 1)
exit 1
fi
if cmp tmp_uncompressed "$FILE" ; then
:
else
echo "Decompressed file does not match" \
"the original: $* $FILE"
(exit 1)
exit 1
fi
if test -n "$XZDEC" ; then
if $XZDEC tmp_compressed > tmp_uncompressed ; then
:
else
echo "Decompressing failed: $* $FILE"
(exit 1)
exit 1
fi
if cmp tmp_uncompressed "$FILE" ; then
:
else
echo "Decompressed file does not match" \
"the original: $* $FILE"
(exit 1)
exit 1
fi
fi
# Show progress:
echo . | tr -d '\n\r'
}
XZ="../src/xz/xz --memlimit-compress=48MiB --memlimit-decompress=5MiB \
--no-adjust --threads=1 --check=crc64"
XZDEC="../src/xzdec/xzdec" # No memory usage limiter available
test -x ../src/xzdec/xzdec || XZDEC=
# Create the required input files.
if ./create_compress_files ; then
:
else
rm -f compress_*
echo "Failed to create files to test compression."
(exit 1)
exit 1
fi
# Remove temporary now (in case they are something weird), and on exit.
rm -f tmp_compressed tmp_uncompressed
trap 'rm -f tmp_compressed tmp_uncompressed' 0
# Compress and decompress each file with various filter configurations.
# This takes quite a bit of time.
echo "test_compress.sh:"
for FILE in compress_generated_* "$srcdir"/compress_prepared_*
do
MSG=`echo "x$FILE" | sed 's,^x,,; s,^.*/,,; s,^compress_,,'`
echo " $MSG" | tr -d '\n\r'
# Don't test with empty arguments; it breaks some ancient
# proprietary /bin/sh versions due to $@ used in test_xz().
test_xz -1
test_xz -2
test_xz -3
test_xz -4
# Disabled until Subblock format is stable.
# --subblock \
# --subblock=size=1 \
# --subblock=size=1,rle=1 \
# --subblock=size=1,rle=4 \
# --subblock=size=4,rle=4 \
# --subblock=size=8,rle=4 \
# --subblock=size=8,rle=8 \
# --subblock=size=4096,rle=12 \
#
for ARGS in \
--delta=dist=1 \
--delta=dist=4 \
--delta=dist=256 \
--x86 \
--powerpc \
--ia64 \
--arm \
--armthumb \
--sparc
do
test_xz $ARGS --lzma2=dict=64KiB,nice=32,mode=fast
# Disabled until Subblock format is stable.
# test_xz --subblock $ARGS --lzma2=dict=64KiB,nice=32,mode=fast
done
echo
done
(exit 0)
exit 0
| KubaKaszycki/kubux | xz-utils/tests/test_compress.sh | Shell | gpl-3.0 | 2,992 |
#!/bin/bash
NAME=hook
. ../build.inc
| dtzWill/ipcopter | docker/ipc-test/build.sh | Shell | isc | 39 |
#!/bin/sh
set -e
ROOTDIR=dist
BUNDLE=${ROOTDIR}/Mercury-Qt.app
CODESIGN=codesign
TEMPDIR=sign.temp
TEMPLIST=${TEMPDIR}/signatures.txt
OUT=signature.tar.gz
if [ ! -n "$1" ]; then
echo "usage: $0 <codesign args>"
echo "example: $0 -s MyIdentity"
exit 1
fi
rm -rf ${TEMPDIR} ${TEMPLIST}
mkdir -p ${TEMPDIR}
${CODESIGN} -f --file-list ${TEMPLIST} "$@" "${BUNDLE}"
for i in `grep -v CodeResources ${TEMPLIST}`; do
TARGETFILE="${BUNDLE}/`echo ${i} | sed "s|.*${BUNDLE}/||"`"
SIZE=`pagestuff $i -p | tail -2 | grep size | sed 's/[^0-9]*//g'`
OFFSET=`pagestuff $i -p | tail -2 | grep offset | sed 's/[^0-9]*//g'`
SIGNFILE="${TEMPDIR}/${TARGETFILE}.sign"
DIRNAME="`dirname ${SIGNFILE}`"
mkdir -p "${DIRNAME}"
echo "Adding detached signature for: ${TARGETFILE}. Size: ${SIZE}. Offset: ${OFFSET}"
dd if=$i of=${SIGNFILE} bs=1 skip=${OFFSET} count=${SIZE} 2>/dev/null
done
for i in `grep CodeResources ${TEMPLIST}`; do
TARGETFILE="${BUNDLE}/`echo ${i} | sed "s|.*${BUNDLE}/||"`"
RESOURCE="${TEMPDIR}/${TARGETFILE}"
DIRNAME="`dirname "${RESOURCE}"`"
mkdir -p "${DIRNAME}"
echo "Adding resource for: "${TARGETFILE}""
cp "${i}" "${RESOURCE}"
done
rm ${TEMPLIST}
tar -C ${TEMPDIR} -czf ${OUT} .
rm -rf ${TEMPDIR}
echo "Created ${OUT}"
| Jheguy2/Mercury | contrib/macdeploy/detached-sig-create.sh | Shell | mit | 1,261 |
#!/bin/bash
# Simple bash script to build basic lnd tools for all the platforms
# we support with the golang cross-compiler.
#
# Copyright (c) 2016 Company 0, LLC.
# Use of this source code is governed by the ISC
# license.
set -e
LND_VERSION_REGEX="lnd version (.+) commit"
PKG="github.com/lightningnetwork/lnd"
PACKAGE=lnd
# green prints one line of green text (if the terminal supports it).
function green() {
echo -e "\e[0;32m${1}\e[0m"
}
# red prints one line of red text (if the terminal supports it).
function red() {
echo -e "\e[0;31m${1}\e[0m"
}
# check_tag_correct makes sure the given git tag is checked out and the git tree
# is not dirty.
# arguments: <version-tag>
function check_tag_correct() {
local tag=$1
# For automated builds we can skip this check as they will only be triggered
# on tags.
if [[ "$SKIP_VERSION_CHECK" -eq "1" ]]; then
green "skipping version check, assuming automated build"
exit 0
fi
# If a tag is specified, ensure that that tag is present and checked out.
if [[ $tag != $(git describe) ]]; then
red "tag $tag not checked out"
exit 1
fi
# Build lnd to extract version.
go build ${PKG}/cmd/lnd
# Extract version command output.
lnd_version_output=$(./lnd --version)
# Use a regex to isolate the version string.
if [[ $lnd_version_output =~ $LND_VERSION_REGEX ]]; then
# Prepend 'v' to match git tag naming scheme.
lnd_version="v${BASH_REMATCH[1]}"
green "version: $lnd_version"
# If tag contains a release candidate suffix, append this suffix to the
# lnd reported version before we compare.
RC_REGEX="-rc[0-9]+$"
if [[ $tag =~ $RC_REGEX ]]; then
lnd_version+=${BASH_REMATCH[0]}
fi
# Match git tag with lnd version.
if [[ $tag != "${lnd_version}" ]]; then
red "lnd version $lnd_version does not match tag $tag"
exit 1
fi
else
red "malformed lnd version output"
exit 1
fi
}
# build_release builds the actual release binaries.
# arguments: <version-tag> <build-system(s)> <build-tags> <ldflags>
function build_release() {
local tag=$1
local sys=$2
local buildtags=$3
local ldflags=$4
green " - Packaging vendor"
go mod vendor
tar -czf vendor.tar.gz vendor
maindir=$PACKAGE-$tag
mkdir -p $maindir
cp vendor.tar.gz $maindir/
rm vendor.tar.gz
rm -r vendor
package_source="${maindir}/${PACKAGE}-source-${tag}.tar"
git archive -o "${package_source}" HEAD
gzip -f "${package_source}" >"${package_source}.gz"
cd "${maindir}"
for i in $sys; do
os=$(echo $i | cut -f1 -d-)
arch=$(echo $i | cut -f2 -d-)
arm=
if [[ $arch == "armv6" ]]; then
arch=arm
arm=6
elif [[ $arch == "armv7" ]]; then
arch=arm
arm=7
fi
dir="${PACKAGE}-${i}-${tag}"
mkdir "${dir}"
pushd "${dir}"
green " - Building: ${os} ${arch} ${arm} with build tags '${buildtags}'"
env CGO_ENABLED=0 GOOS=$os GOARCH=$arch GOARM=$arm go build -v -trimpath -ldflags="${ldflags}" -tags="${buildtags}" ${PKG}/cmd/lnd
env CGO_ENABLED=0 GOOS=$os GOARCH=$arch GOARM=$arm go build -v -trimpath -ldflags="${ldflags}" -tags="${buildtags}" ${PKG}/cmd/lncli
popd
if [[ $os == "windows" ]]; then
zip -r "${dir}.zip" "${dir}"
else
tar -cvzf "${dir}.tar.gz" "${dir}"
fi
rm -r "${dir}"
done
shasum -a 256 * >manifest-$tag.txt
}
# usage prints the usage of the whole script.
function usage() {
red "Usage: "
red "release.sh check-tag <version-tag>"
red "release.sh build-release <version-tag> <build-system(s)> <build-tags> <ldflags>"
}
# Whatever sub command is passed in, we need at least 2 arguments.
if [ "$#" -lt 2 ]; then
usage
exit 1
fi
# Extract the sub command and remove it from the list of parameters by shifting
# them to the left.
SUBCOMMAND=$1
shift
# Call the function corresponding to the specified sub command or print the
# usage if the sub command was not found.
case $SUBCOMMAND in
check-tag)
green "Checking if version tag exists"
check_tag_correct "$@"
;;
build-release)
green "Building release"
build_release "$@"
;;
*)
usage
exit 1
;;
esac
| LightningNetwork/lnd | scripts/release.sh | Shell | mit | 4,158 |
#!/bin/bash
# askYN.sh - Ask the user a yes/no question on stdout/stdin.
#
# Usage: askYN.sh [ "Question?" ]
#
# Exits with code 0 for yes, 1 for no.
#
# E.B. Smith - October 2013
set -eu
set -o pipefail
question="${1:-""}"
read -n 1 -r -p "${question} [y/N] "
echo ""
if [[ "${REPLY}" =~ ^[Yy]$ ]]; then
exit 0
else
exit 1
fi
| BranchMetrics/ios-branch-deep-linking | scripts/askYN.sh | Shell | mit | 352 |
#!/bin/bash
# This is the initial script run for new nodes on the network to get them set up.
## Blender and utils install
sudo apt-get update
sudo apt-get upgrade -y
sudo apt-get install -y blender htop tmux ufw
## UFW
sudo ufw allow 22 # SSH
sudo ufw allow 80 # HTTP
sudo ufw allow 8140 # Puppet
sudo ufw allow 443 # HTTPS
sudo ufw enable
## PUPPET
# Remove old file:
rm puppetlabs-release-pc1-xenial.deb
# Get Puppet package
wget https://apt.puppetlabs.com/puppetlabs-release-pc1-xenial.deb
# Install puppet sources
sudo dpkg -i puppetlabs-release-pc1-xenial.deb
# Update for puppet repos
sudo apt-get update
# Install the agent
sudo apt-get install puppet-agent
# Set master server
sudo /opt/puppetlabs/bin/puppet config set server jabba-masta-00.knoxschools.ad
# Now let's try running the agent for the first time
# This will give us things like the cert, which need to be signed on the master.
sudo /opt/puppetlabs/bin/puppet agent --waitforcert 20 --test --debug
# When that's done, restart puppet:
sudo service puppet restart
| lnstem-renderfarm/puppet-blenderfarm | puppet/modules/blenderfarm/files/slave-setup.sh | Shell | mit | 1,038 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2016:2006
#
# Security announcement date: 2016-10-05 14:03:13 UTC
# Script generation date: 2017-01-13 21:13:57 UTC
#
# Operating System: CentOS 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - kernel-abi-whitelists.noarch:2.6.32-642.6.1.el6
# - kernel-debug-devel.i686:2.6.32-642.6.1.el6
# - kernel-doc.noarch:2.6.32-642.6.1.el6
# - kernel-firmware.noarch:2.6.32-642.6.1.el6
# - kernel.x86_64:2.6.32-642.6.1.el6
# - kernel-debug.x86_64:2.6.32-642.6.1.el6
# - kernel-debug-devel.x86_64:2.6.32-642.6.1.el6
# - kernel-devel.x86_64:2.6.32-642.6.1.el6
# - kernel-headers.x86_64:2.6.32-642.6.1.el6
# - perf.x86_64:2.6.32-642.6.1.el6
# - python-perf.x86_64:2.6.32-642.6.1.el6
#
# Last versions recommanded by security team:
# - kernel-abi-whitelists.noarch:2.6.32-642.13.1.el6
# - kernel-debug-devel.i686:2.6.32-642.13.1.el6
# - kernel-doc.noarch:2.6.32-642.13.1.el6
# - kernel-firmware.noarch:2.6.32-642.13.1.el6
# - kernel.x86_64:2.6.32-642.13.1.el6
# - kernel-debug.x86_64:2.6.32-642.13.1.el6
# - kernel-debug-devel.x86_64:2.6.32-642.13.1.el6
# - kernel-devel.x86_64:2.6.32-642.13.1.el6
# - kernel-headers.x86_64:2.6.32-642.13.1.el6
# - perf.x86_64:2.6.32-642.13.1.el6
# - python-perf.x86_64:2.6.32-642.13.1.el6
#
# CVE List:
# - CVE-2016-4470
# - CVE-2016-5829
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install kernel-abi-whitelists.noarch-2.6.32 -y
sudo yum install kernel-debug-devel.i686-2.6.32 -y
sudo yum install kernel-doc.noarch-2.6.32 -y
sudo yum install kernel-firmware.noarch-2.6.32 -y
sudo yum install kernel.x86_64-2.6.32 -y
sudo yum install kernel-debug.x86_64-2.6.32 -y
sudo yum install kernel-debug-devel.x86_64-2.6.32 -y
sudo yum install kernel-devel.x86_64-2.6.32 -y
sudo yum install kernel-headers.x86_64-2.6.32 -y
sudo yum install perf.x86_64-2.6.32 -y
sudo yum install python-perf.x86_64-2.6.32 -y
| Cyberwatch/cbw-security-fixes | CentOS_6/x86_64/2016/CESA-2016:2006.sh | Shell | mit | 2,059 |
stemmer=$1
gramSize=$2
transcript_list=$3
for transcript in $transcript_list; do
python audio_features/tfidf.py $stemmer $gramSize $transcript
done
| amudalab/concept-graphs | keyphrase/tfidf.sh | Shell | mit | 151 |
#/bin/bash
### Volatility semi-automated memory image processing, for Windows images
### bsk for dfirnotes.org, Copyleft MIT License : https://github.com/DFIRnotes/rules/blob/master/LICENSE
### Requirements: SIFT 3 or volatility 2.5.x ; pictures needs PIL and dot available
### Run volatility framework anlaysis plugins against a provided image in an opinionated order
### 0) Quick pass to list processes and connections, 1)long run of many plugins with simple args,
### 2) long run of complex plugins; 3)make pictures
### Ref: FOR508, Art of Memory Forensics, Malware Analyst Cookbook
### Win7 and up version, set your profile appropriately
## always check the profile if things aren't working right!
VOLATILITY_PROFILE=Win7SP1x86
#define $VOLATILITY_FILEIN for your memory image via export or here
#VOLATILITY_FILEIN=xp-tdungan-memory-raw.001
#define VOLATILITY_LOCATION here or export, module -h for a hint
#VOLATILITY_LOCATION=file:///cases/xp-tdungan-memory/xp-tdungan-memory-raw.001
## redefine this to use another vol binary or include more plugins path
VOLATILITY_COMM=vol.py
## Redirect STDERR for the whole script, comment out to debug a thing
exec 2>/dev/null
## create and define OUT_FOLDER; . is fine
STARS="***Volatility batch***"
### TODO
## BUGFIX: Set out location to . if env var not set
## FEATURE: document how to branch -2
## FEATURE: Pull volatility version rather than static string
## FEATURE: if file exists and is greater than sizeof(vol usage error), skip the plugin ?
## FEATURE: tidy malsysproc extra linefeeds ?
## WISHLIST: find a way to use the ssdeep, baseline community plugins
## WISHLIST: duplicate image file to run plugins in parallel for faster results / test this more
## WISHLIST: port to BAT for Windows or BETTER python for crossplatform
###
## Volatility banner only needed once per run :)
echo $STARS using Volatility Foundation Volatility Framework 2.5 + Community plugins on `uname`
## get some tables upfront to look for interesting processes
echo "$STARS 0) First, quick tables upfront to look for interesting processes"
for p in pstree malsysproc malprocfind netscan imageinfo; do
echo -n "$p "
$VOLATILITY_COMM $p --output-file=$OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-$p.txt ; done
echo; echo "$STARS 0)Quick tables completed. 1)Starting batch plugin processing ..."
## use imageinfo to get KDBG, add to our vol cmd for speedup
KDBG=$(grep KDBG *imageinfo* | awk -F':' '{print $2}' | sed -e 's/L//')
VOL_COMM="$VOL_COMM -g $KDBG"
## do the whole batch of data processing, simple arguments
for q in apihooks callbacks cmdline cmdscan clipboard consoles dlllist driverirp drivermodule driverscan editbox getsids idt iehistory handles hivelist hivescan modscan modules prefetchparser psxview schtasks shellbags ssdt; do
echo -n " $q, "
$VOLATILITY_COMM $q > $OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-$q.txt; done
echo; echo "$STARS 1) Batch processing, simple plugin arguments done"
echo "$STARS 2) Starting complex plugins: autoruns V T all Table, pstree -V greptext, ldrmodules V, pstotal DOT, svcscan V, malfind D, mutantscan N, mftparser BODY, and timeliner BODY"
$VOLATILITY_COMM autoruns -v -t all --output=table > $OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-autoruns.txt
$VOLATILITY_COMM pstree -v --output=greptext > $OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-pstreeV-grep.txt
$VOLATILITY_COMM ldrmodules -v > $OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-ldrmodulesv.txt
$VOLATILITY_COMM pstree --output=dot > $OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-pstree.dot
$VOLATILITY_COMM svcscan -v > $OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-svcscanv.txt
$VOLATILITY_COMM malfind -D $OUT_FOLDER > $OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-malfindD.txt
$VOLATILITY_COMM mutantscan -s > $OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-mutantsv.txt
$VOLATILITY_COMM mftparser --output=body > $OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-mftparser-body.txt
$VOLATILITY_COMM timeliner --output=body > $OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-tl.body
echo "$STARS 3) Make pictures!"
dot -T png $OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-pstree.dot > $OUT_FOLDER/$VOLATILITY_FILEIN-vol25c-pstree.png
$VOLATILITY_COMM screenshot -D $OUT_FOLDER
echo "$STARS Volatility batch run on $VOLATILITY_FILEIN completed!"
### and then something like this
#VOLATILITY_FILEIN=xp-tdungan-memory-raw.001 VOLATILITY_PROFILE=WinXPSP3x86 VOLATILITY_COMM=vol.py; for pid in 3296 11640 12244 ; do echo -n "PID $pid :"; for p in dlllist ldrmodules malfind handles; do echo -n "$p "; $VOLATILITY_COMM $p -p $pid > $VOLATILITY_FILEIN-vol25c-$pid-$p.txt 2>/dev/null; done; echo; done
###
| DFIRnotes/rules | vol7.sh | Shell | mit | 4,576 |
#!/usr/bin/env bash
#$ -cwd
#$ -l h_vmem=2G
#$ -sync yes
######################################################################
## job script "generic_submit_to_cluster.sh"
## so this is a generic shell script that can call a certain perl script
## with the given parameters.
## This is useful if you need to commit several similar jobs to the cluster
######################################################################
#standard path variable for any programs that are used
export PATH=/usr/local/vrna/1.8.5/bin/:$HOME/bin/:$PATH
# all parameters for the shell script
working_dir=$1
perlscript=$2
params=$3
echo working_dir: $working_dir
echo perlscript: $perlscript
echo params: $params
if [ -z "$SGE_TASK_ID" ]; then
echo "No SGE environment found! Set SGE_TASK_ID to 0!"
let SGE_TASK_ID=0
fi
echo call: "/usr/local/perl/bin/perl $perlscript $params --jobid $SGE_TASK_ID"
cd $working_dir
/usr/local/perl/bin/perl $perlscript $params --jobid $SGE_TASK_ID
| dmaticzka/GraphProt | bin/generic_submit_to_cluster.sh | Shell | mit | 969 |
nice python ./../topsy-crawler.py 1000 5 True negative/#prohibit '#prohibit' > logs/console_log_#prohibit.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#forbid '#forbid' > logs/console_log_#forbid.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#blackball '#blackball' > logs/console_log_#blackball.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#damage '#damage' > logs/console_log_#damage.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#invalidating '#invalidating' > logs/console_log_#invalidating.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#ostracise '#ostracise' > logs/console_log_#ostracise.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#ostracize '#ostracize' > logs/console_log_#ostracize.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#negative '#negative' > logs/console_log_#negative.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#negatively_charged '#negatively_charged' > logs/console_log_#negatively_charged.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#prejudicial '#prejudicial' > logs/console_log_#prejudicial.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#detrimental '#detrimental' > logs/console_log_#detrimental.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#prejudicious '#prejudicious' > logs/console_log_#prejudicious.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#electronegative '#electronegative' > logs/console_log_#electronegative.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#veto '#veto' > logs/console_log_#veto.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#proscribe '#proscribe' > logs/console_log_#proscribe.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#disallow '#disallow' > logs/console_log_#disallow.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#nix '#nix' > logs/console_log_#nix.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#interdict '#interdict' > logs/console_log_#interdict.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#cast_out '#cast_out' > logs/console_log_#cast_out.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#subtraction '#subtraction' > logs/console_log_#subtraction.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#shun '#shun' > logs/console_log_#shun.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#banish '#banish' > logs/console_log_#banish.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#barring '#barring' > logs/console_log_#barring.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#damaging '#damaging' > logs/console_log_#damaging.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#ban '#ban' > logs/console_log_#ban.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#disconfirming '#disconfirming' > logs/console_log_#disconfirming.txt
nice python ./../topsy-crawler.py 1000 5 True negative/#minus '#minus' > logs/console_log_#minus.txt
| nakul225/SMM-II | TwitterCrawl/Wordnet/negative.sh | Shell | mit | 3,019 |
#!/bin/bash
# working directory
cd /
# git clone
rm -rf /data/app \
&& mkdir -p /data/app \
&& git clone $GIT_REPO --branch ${GIT_BRANCH:-"master"} /data/app \
&& chown -R www-data:www-data /data/app
# document root
sed -e "s|^DocumentRoot .*|DocumentRoot \"/data/app$DOC_ROOT\"|g" \
-i /etc/apache2/apache2.conf
# start apache
exec apache2-foreground | dortort/continuous-deployment | php/docker-entrypoint.sh | Shell | mit | 387 |
git clone -b 0.4.0-pami https://github.com/pami-inssjp/notifier.git notifier/app
| pami-inssjp/dos-debatics | docker/bootstrap.sh | Shell | mit | 81 |
set -e
if [ ! -n "$ZSH" ]; then
ZSH=~/.oh-my-zsh
fi
if [ -d "$ZSH" ]; then
exit
fi
hash git >/dev/null 2>&1 && env git clone --depth=1 --quiet https://github.com/IsmailM/oh-my-zsh.git $ZSH || {
exit
}
if [ -f ~/.zshrc ] || [ -h ~/.zshrc ]; then
mv ~/.zshrc ~/.zshrc.pre-oh-my-zsh;
fi
cp $ZSH/templates/zshrc.zsh-template ~/.zshrc
sed -i -e "/^export ZSH=/ c\\
export ZSH=$ZSH
" ~/.zshrc
sed -i -e "/export PATH=/ c\\
export PATH=\"$PATH\"
" ~/.zshrc
echo 'Oh-My-ZSH has been installed'
| IsmailM/oh-my-zsh | tools/install.sh | Shell | mit | 501 |
#!/bin/sh
# by: "John Hazelwood" <[email protected]>
#
# build_users.sh - Do a docker build 1 or more user Dockerfiles.
# Extracts the image tag and username from the Dockerfile itself and uses those to name the user image.
#
# Without args: build all Dockerfiles found in jumper/users/ recursively
# args can be Dockerfiles or directories
#
# Usage Examples:
# ./build_users.sh
# ./build_users.sh users/corp/
# ./build_users.sh users/corp/*
# ./build_users.sh users/corp/Dockerfile-c*
# ./build_users.sh users/Dockerfile-example
# ./build_users.sh users/Dockerfile-bob* users/devel/Dockerfile-stan users/sales/
#
oops(){ echo "${@}"; exit 1; }
build_context=$(dirname $0)/users/
. $(dirname $0)/cfg/settings.sh
if [ $# -eq 0 ]; then
targets="`find $build_context -type f -name Dockerfile-\*`"
else
for this in $@; do
if [ -f $this ]; then
targets="${targets} ${this}"
elif [ -d $this ]; then
targets="${targets} `find ${this} -type f -name Dockerfile-\*`"
else
oops "$this is not a file or directory"
fi
done
fi
for dockerfile in $targets; do
PERSON=`egrep 'ENV PERSON ' $dockerfile|awk '{print $3}'|egrep "^[a-z0-9]+$"` || \
oops "Failed to get PERSON from ${dockerfile}. Username must be numbers and/or lowercase letters only."
docker build \
--force-rm=true \
--tag="${image_repo_name}/${container_name_prefix}-${PERSON}:${image_tag}" \
--file="${dockerfile}" \
$build_context || exit $?
done
| jhazelwo/docker-jumper | jumper/build_users.sh | Shell | mit | 1,541 |
rpm -ivh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el7.rf.x86_64.rpm
rpm -ivh http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm
rpm -ivh http://repo.webtatic.com/yum/el7/webtatic-release.rpm
rpm -ivh http://dl.atrpms.net/all/atrpms-repo-7-7.el7.x86_64.rpm
rpm -ivh http://rpms.famillecollet.com/enterprise/remi-release-7.rpm
rpm -ivh http://yum.puppetlabs.com/puppetlabs-release-el-7.noarch.rpm
yum -y install puppet
systemctl start puppet
systemctl enable puppet | JATool/JATool | manifests/script.sh | Shell | mit | 515 |
#!/usr/bin/env bash
cd ./api
forever start ./bin/www
forever start ./algorithm/hot.js
forever start ./algorithm/index.js
cd ../frontend/
forever start build/build.js | fengyuanzemin/graduation | run.sh | Shell | mit | 166 |
#!/bin/sh
set -eo pipefail -o nounset
## Get the hg19.genome file to sort
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/hg19/hg19.genome
wget --quiet $genome
## Get the hg19 cpg island file and process it
## unzip it,
## remove any lines that do not have a scaffolding in the hg19.genome file. (If scaffolding in hg19.genome, grep exists with 0)
## Get all columns but the first (bin column)
## add header to the file
## sort it based on the genome file
## bgzip it
wget --quiet -O - http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/cpgIslandExt.txt.gz \
| gzip -dc \
| awk '{ if (system("grep -Fq " $2 " hg19.genome") == 0) print $0}' \
| cut -f 2- \
| awk -v OFS="\t" 'BEGIN {print "#Table Info: https://genome.ucsc.edu/cgi-bin/hgTables?db=hg19&hgta_group=regulation&hgta_track=cpgIslandExt&hgta_table=cpgIslandExt&hgta_doSchema=describe+table+schema\n#chrom\tstart\tend\tname\tlength\tcpgNum\tgcNum\tperCpG\tperGC\tobsExp"} {print $0}' \
| gsort /dev/stdin $genome \
| bgzip -c > hg19-cpg-islands-ucsc-v1.bed.gz
## Tabix the processesed cpg file
tabix hg19-cpg-islands-ucsc-v1.bed.gz
rm hg19.genome
| gogetdata/ggd-recipes | recipes/genomics/Homo_sapiens/hg19/hg19-cpg-islands-ucsc-v1/recipe.sh | Shell | mit | 1,186 |
#!/bin/sh
. @LEDCTRL@/ledctrl
silence_wifi
if test `cat /sys/class/net/eth0/carrier` -eq 0
then
logger -s "eth0 got no-carrier ..."
led_error
fi
| tobw/meta-jens | recipes-core/init-ifupdown/init-ifupdown/wifi/post_down.sh | Shell | mit | 149 |
#!/usr/bin/env bash
set -eo pipefail
# shellcheck disable=SC2120
setup_circle() {
echo "=====> setup_circle on CIRCLE_NODE_INDEX: $CIRCLE_NODE_INDEX"
sudo -E CI=true make -e sshcommand
# need to add the dokku user to the docker group
sudo usermod -G docker dokku
[[ "$1" == "buildstack" ]] && BUILD_STACK=true make -e stack
sudo -E CI=true make -e install
sudo -E make -e setup-deploy-tests
bash --version
docker version
lsb_release -a
# setup .dokkurc
sudo -E mkdir -p /home/dokku/.dokkurc
sudo -E chown dokku:ubuntu /home/dokku/.dokkurc
sudo -E chmod 775 /home/dokku/.dokkurc
# pull node:4 image for testing
sudo docker pull node:4
}
# shellcheck disable=SC2119
setup_circle
exit $?
| elia/dokku | tests/ci/setup.sh | Shell | mit | 720 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2876-1
#
# Security announcement date: 2016-01-20 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:08 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: i386
#
# Vulnerable packages fix on version:
# - ecryptfs-utils:96-0ubuntu3.5
#
# Last versions recommanded by security team:
# - ecryptfs-utils:96-0ubuntu3.5
#
# CVE List:
# - CVE-2016-1572
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade ecryptfs-utils=96-0ubuntu3.5 -y
| Cyberwatch/cbw-security-fixes | Ubuntu_12.04_LTS/i386/2016/USN-2876-1.sh | Shell | mit | 629 |
go_path=$(which go)
if [[ -x ${go_path} ]]; then
export GOPATH=$PROJECTS/go
export PATH=$PATH:$(go env GOPATH)/bin
fi
| Veraticus/dotfiles | go/path.zsh | Shell | mit | 122 |
#!/bin/bash
if [[ ! "$(mount)" =~ \ /var/lib/pgsql/data\ type ]]
then
echo "There is no mounted volume"
else
if [ -z "$(ls -A /var/lib/pgsql/data)" ]; then
/usr/pgsql-9.1/bin/initdb -D /var/lib/pgsql/data
echo "host all all 172.17.42.1/32 trust" >> /var/lib/pgsql/data/pg_hba.conf
echo "listen_addresses='*'" >> /var/lib/pgsql/data/postgresql.conf
fi
/usr/pgsql-9.1/bin/postgres -D /var/lib/pgsql/data
fi
| cdelaitre/ci | dockerimages/postgres91/postgres-start.sh | Shell | mit | 440 |
# Parses and retrieves a remote branch given a branch name.
#
# If the branch name contains '*' it will retrieve remote branches
# and try to match against tags and heads, returning the latest matching.
#
# Usage
# -antigen-parse-branch https://github.com/user/repo.git x.y.z
#
# Returns
# Branch name
-antigen-parse-branch () {
local url="$1" branch="$2" branches
local match mbegin mend MATCH MBEGIN MEND
if [[ "$branch" =~ '\*' ]]; then
branches=$(git ls-remote --tags -q "$url" "$branch"|cut -d'/' -f3|sort -n|tail -1)
# There is no --refs flag in git 1.8 and below, this way we
# emulate this flag -- also git 1.8 ref order is undefined.
branch=${${branches#*/*/}%^*} # Why you are like this?
fi
echo $branch
}
| typosquats/dotfiles | .antigen/src/helpers/parse-branch.zsh | Shell | mit | 755 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/MMPlayerView/MMPlayerView.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/MMPlayerView/MMPlayerView.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
| MillmanY/MMPlayerView | Example/Pods/Target Support Files/Pods-MMPlayerView_Example/Pods-MMPlayerView_Example-frameworks.sh | Shell | mit | 3,723 |
#!/bin/bash
# release gem to rubygems
# exit on any error
set -e
#############################################################################
## defaults
readonly progname=$(basename $0)
readonly script_dir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
readonly main_dir=$( cd "${script_dir}" && cd .. && pwd )
readonly application=$(basename ${main_dir})
readonly module_version=$( cd "${main_dir}" && grep spec.version *.gemspec | grep -o '[=].*[^ ]' | tr -d "= ")
#############################################################################
## functions
## write green message with time stamp to stdout
function puts () {
echo -e "\033[0;32m"$(date +"%Y-%m-%d %T") $*"\033[0m"
}
## bump version, must be in main git directory
function bump_version () {
puts "bump ${application} gem version"
old_version=$( ruby -I lib/${application} -e "require 'version'; puts Gem::Version.new(${module_version})" )
puts "gem version currently:" ${old_version}
new_version=$( ruby -I lib/${application} -e "require 'version'; puts Gem::Version.new(${module_version} + '.1').bump" )
puts "we will change it into:" ${new_version}
cat lib/${application}/version.rb | sed "s/$old_version/$new_version/" > lib/${application}/version.rb.new
mv lib/${application}/version.rb.new lib/${application}/version.rb
}
## commit and push version, must be in main git directory
function commit_and_push_version () {
puts "commit and push ${application} gem version"
git add lib/${application}/version.rb
git commit -m "m31's version bumper"
git push
}
#############################################################################
## main processsing
cd ${main_dir}
puts "trying to release ${application} gem to rubygems"
puts
puts "updating git repository"
git pull -r
puts "build new Gemfile.lock with bundle install"
rm Gemfile.lock
bundle install
puts "check and test"
bundle exec rake spec
bump_version
commit_and_push_version
puts "build gem"
result=$( gem build ${application}.gemspec )
puts " ${result}"
# get gem file name
set +e
gem_file=$( echo $result | grep -o "${application}-[.0-9]*\.gem$" )
set -e
puts " gem file:" $gem_file
## check if group was specified
if [ -z "${gem_file}" ]; then
echo "generated gem file not found" >&2
exit 1
fi
puts "push gem to rubygems"
gem push ${gem_file} --host https://rubygems.org
echo -e "\033[0;34mThe lioness has rejoined her cub, and all is right in the jungle...\033[0m"
| m-31/vcenter_lib_mongodb | scripts/deploy_gem.sh | Shell | mit | 2,460 |
#!/bin/sh
# this tests whether all required args are listed as
# missing when no arguments are specified
# failure
../build/examples/test2 > tmp.out 2>&1
if cmp -s tmp.out $srcdir/test62.out; then
exit 0
else
exit 1
fi
| mjkoo/tclap | tests/test62.sh | Shell | mit | 227 |
#!/usr/bin/env bash
# NOTE: Parsing curl to tac to circumnvent "failed writing body"
# https://stackoverflow.com/questions/16703647/why-curl-return-and-error-23-failed-writing-body
set -e
set -u
set -o pipefail
SCRIPT_PATH="$( cd "$(dirname "$0")" && pwd -P )"
DVLBOX_PATH="$( cd "${SCRIPT_PATH}/../.." && pwd -P )"
# shellcheck disable=SC1090
. "${SCRIPT_PATH}/../scripts/.lib.sh"
RETRIES=20
DISABLED_VERSIONS=("8.0" "8.1" "8.2")
echo
echo "# --------------------------------------------------------------------------------------------------"
echo "# [Framework] Wordpress"
echo "# --------------------------------------------------------------------------------------------------"
echo
# -------------------------------------------------------------------------------------------------
# Pre-check
# -------------------------------------------------------------------------------------------------
PHP_VERSION="$( get_php_version "${DVLBOX_PATH}" )"
if [[ ${DISABLED_VERSIONS[*]} =~ ${PHP_VERSION} ]]; then
printf "[SKIP] Skipping all checks for PHP %s\\n" "${PHP_VERSION}"
exit 0
fi
# -------------------------------------------------------------------------------------------------
# ENTRYPOINT
# -------------------------------------------------------------------------------------------------
###
### Get required env values
###
MYSQL_ROOT_PASSWORD="$( "${SCRIPT_PATH}/../scripts/env-getvar.sh" "MYSQL_ROOT_PASSWORD" )"
HOST_PORT_HTTPD="$( "${SCRIPT_PATH}/../scripts/env-getvar.sh" "HOST_PORT_HTTPD" )"
TLD_SUFFIX="$( "${SCRIPT_PATH}/../scripts/env-getvar.sh" "TLD_SUFFIX" )"
###
### Custom variables
###
DB_NAME="my_wp"
PROJECT_NAME="this-is-my-grepable-project-name"
VHOST="my-wordpress"
# Create vhost dir
create_vhost_dir "${VHOST}"
# Download Wordpress
run "docker-compose exec --user devilbox -T php bash -c ' \
git clone https://github.com/WordPress/WordPress /shared/httpd/${VHOST}/wordpress \
&& ln -sf wordpress /shared/httpd/${VHOST}/htdocs'" \
"${RETRIES}" "${DVLBOX_PATH}"
# Switch to an earlier Wordpress version for older PHP versions
if [ "${PHP_VERSION}" = "5.3" ] || [ "${PHP_VERSION}" = "5.4" ] || [ "${PHP_VERSION}" = "5.5" ]; then
run "docker-compose exec --user devilbox -T php bash -c ' \
cd /shared/httpd/${VHOST}/wordpress \
&& git checkout 5.1.3'" \
"${RETRIES}" "${DVLBOX_PATH}"
# Checkout latest git tag
else
run "docker-compose exec --user devilbox -T php bash -c ' \
cd /shared/httpd/${VHOST}/wordpress \
&& git checkout \"\$(git tag | sort -V | tail -1)\"'" \
"${RETRIES}" "${DVLBOX_PATH}"
fi
# Setup Database
run "docker-compose exec --user devilbox -T php mysql -u root -h mysql --password=\"${MYSQL_ROOT_PASSWORD}\" -e \"DROP DATABASE IF EXISTS ${DB_NAME}; CREATE DATABASE ${DB_NAME};\"" "${RETRIES}" "${DVLBOX_PATH}"
# Configure Wordpress database settings
run "docker-compose exec --user devilbox -T php bash -c \"perl -pe 's/\\r$//' < /shared/httpd/${VHOST}/wordpress/wp-config-sample.php > /shared/httpd/${VHOST}/wordpress/wp-config.php\"" "${RETRIES}" "${DVLBOX_PATH}"
run "docker-compose exec --user devilbox -T php sed -i\"\" \"s/define(\\s*'DB_NAME.*/define('DB_NAME', '${DB_NAME}');/g\" /shared/httpd/${VHOST}/wordpress/wp-config.php" "${RETRIES}" "${DVLBOX_PATH}"
run "docker-compose exec --user devilbox -T php sed -i\"\" \"s/define(\\s*'DB_USER.*/define('DB_USER', 'root');/g\" /shared/httpd/${VHOST}/wordpress/wp-config.php" "${RETRIES}" "${DVLBOX_PATH}"
run "docker-compose exec --user devilbox -T php sed -i\"\" \"s/define(\\s*'DB_PASSWORD.*/define('DB_PASSWORD', '${MYSQL_ROOT_PASSWORD}');/g\" /shared/httpd/${VHOST}/wordpress/wp-config.php" "${RETRIES}" "${DVLBOX_PATH}"
run "docker-compose exec --user devilbox -T php sed -i\"\" \"s/define(\\s*'DB_HOST.*/define('DB_HOST', 'mysql');/g\" /shared/httpd/${VHOST}/wordpress/wp-config.php" "${RETRIES}" "${DVLBOX_PATH}"
run "docker-compose exec --user devilbox -T php sed -i\"\" \"s/define(\\s*'WP_DEBUG.*/define('WP_DEBUG', true);/g\" /shared/httpd/${VHOST}/wordpress/wp-config.php" "${RETRIES}" "${DVLBOX_PATH}"
run "docker-compose exec --user devilbox -T php php -l /shared/httpd/${VHOST}/wordpress/wp-config.php" "${RETRIES}" "${DVLBOX_PATH}"
# Install Wordpress
if ! run "docker-compose exec --user devilbox -T php curl -sS --fail -L -XPOST -c cookie.txt -b cookie.txt \
'http://${VHOST}.${TLD_SUFFIX}/wp-admin/install.php?step=1'\
--data 'language=1' >/dev/null" "${RETRIES}" "${DVLBOX_PATH}"; then
run "docker-compose exec --user devilbox -T php curl -sS --fail -L -XPOST -c cookie.txt -b cookie.txt \
'http://${VHOST}.${TLD_SUFFIX}/wp-admin/install.php?step=1'\
--data 'language=1' >/dev/null" "1" "${DVLBOX_PATH}" || true
run "docker-compose exec --user devilbox -T php curl -sS --fail -L -I \
'http://${VHOST}.${TLD_SUFFIX}/wp-admin/install.php?step=1'" "1" "${DVLBOX_PATH}" || true
run "docker-compose exec --user devilbox -T php curl -sS --fail -L \
'http://${VHOST}.${TLD_SUFFIX}/'" "1" "${DVLBOX_PATH}" || true
run "docker-compose logs php" || true
run "docker-compose logs httpd" || true
exit 1
fi
if ! run "docker-compose exec --user devilbox -T php curl -sS --fail -L -XPOST -c cookie.txt -b cookie.txt \
'http://${VHOST}.${TLD_SUFFIX}/wp-admin/install.php?step=2' \
--data 'weblog_title=${PROJECT_NAME}' \
--data 'user_name=admin' \
--data 'admin_password=password' \
--data 'admin_password2=password' \
--data 'pw_weak=on' \
--data 'admin_email=test%40test.com' \
--data 'blog_public=0' \
--data 'Submit=Install+WordPress&language=' >/dev/null" "${RETRIES}" "${DVLBOX_PATH}"; then
run "docker-compose exec --user devilbox -T php curl -sS --fail -L -XPOST -c cookie.txt -b cookie.txt \
'http://${VHOST}.${TLD_SUFFIX}/wp-admin/install.php?step=2' \
--data 'weblog_title=${PROJECT_NAME}' \
--data 'user_name=admin' \
--data 'admin_password=password' \
--data 'admin_password2=password' \
--data 'pw_weak=on' \
--data 'admin_email=test%40test.com' \
--data 'blog_public=0' \
--data 'Submit=Install+WordPress' \
--data 'language='" "1" "${DVLBOX_PATH}" || true
run "docker-compose logs php" || true
run "docker-compose logs httpd" || true
exit 1
fi
# Test Wordpress
if ! run "docker-compose exec --user devilbox -T php curl -sS --fail -L 'http://${VHOST}.${TLD_SUFFIX}/' | grep '${PROJECT_NAME}' >/dev/null" "${RETRIES}" "${DVLBOX_PATH}"; then
run "docker-compose exec --user devilbox -T php curl -sS -L 'http://${VHOST}.${TLD_SUFFIX}/'" "1" "${DVLBOX_PATH}" || true
exit 1
fi
if ! run "curl -sS --fail -L --header 'host: ${VHOST}.${TLD_SUFFIX}' 'http://localhost:${HOST_PORT_HTTPD}/' | grep '${PROJECT_NAME}' >/dev/null" "${RETRIES}" "${DVLBOX_PATH}"; then
run "curl -sS -L --header 'host: ${VHOST}.${TLD_SUFFIX}' 'http://localhost:${HOST_PORT_HTTPD}/'" "1" "${DVLBOX_PATH}" || true
exit 1
fi
# Check for Exceptions, Errors or Warnings
if ! run_fail "docker-compose exec --user devilbox -T php curl -sS --fail -L 'http://${VHOST}.${TLD_SUFFIX}/' | grep -Ei 'fatal|error|warn' >/dev/null" "${RETRIES}" "${DVLBOX_PATH}"; then
run "docker-compose exec --user devilbox -T php curl -sS -L 'http://${VHOST}.${TLD_SUFFIX}/' | grep -Ei 'fatal|error|warn'" "1" "${DVLBOX_PATH}"
exit 1
fi
| cytopia/devilbox | .tests/tests/framework-wordpress.sh | Shell | mit | 7,197 |
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="mh"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(web-search colored-man-pages colorize docker docker-compose)
# User configuration
# export PATH="/Users/matt/.rvm/gems/ruby-2.1.3/bin:/Users/matt/.rvm/gems/ruby-2.1.3@global/bin:/Users/matt/.rvm/rubies/ruby-2.1.3/bin:/Library/Frameworks/Python.framework/Versions/Current/bin:/opt/local/bin:/opt/local/sbin:/Users/matt/code/nio-scripts:/Users/matt/.nvm/v0.10.33/bin:/Users/matt/code/go_appengine:/usr/local/heroku/bin:/Users/matt/android/android-sdk/sdk/tools:/Users/matt/android/android-sdk/sdk/platform-tools:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/go/bin:/Users/matt/.rvm/bin"
# export MANPATH="/usr/local/man:$MANPATH"
source $ZSH/oh-my-zsh.sh
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# ssh
# export SSH_KEY_PATH="~/.ssh/dsa_id"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
| mattdodge/dotfiles | zsh/ohmy.zsh | Shell | mit | 3,119 |
#!/bin/bash
set -eux -o pipefail
# Gather a list of all top-level non-module subdirectories.
non_module_dirs=()
while IFS= read -r -d $'\0'; do
non_module_dirs+=("$REPLY")
done < <(find . -maxdepth 1 -type d -and -not -name "modules" -print0)
# Initialize all top-level non-module subdirectories.
for dir in "${non_module_dirs[@]}"; do
pushd "${dir}" || exit 1
if [[ "${CI}" = "true" ]]; then
terraform init \
-backend-config="bucket=carterjones-terraform-state-ci"
else
terraform init
fi
popd
done
# Run tflint and terraform validate against all top-level non-module
# subdirectories.
for dir in "${non_module_dirs[@]}"; do
pushd "${dir}" || exit 1
tflint --module
terraform validate
popd
done
| carterjones/infrastructure | terraform/lint.sh | Shell | mit | 767 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-3052-1
#
# Security announcement date: 2016-08-10 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:34 UTC
#
# Operating System: Ubuntu 14.04 LTS
# Architecture: i686
#
# Vulnerable packages fix on version:
# - linux-image-3.13.0-93-lowlatency:3.13.0-93.140
# - linux-image-3.13.0-93-generic:3.13.0-93.140
#
# Last versions recommanded by security team:
# - linux-image-3.13.0-93-lowlatency:3.13.0-93.140
# - linux-image-3.13.0-93-generic:3.13.0-93.140
#
# CVE List:
# - CVE-2016-4470
# - CVE-2016-5243
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade linux-image-3.13.0-93-lowlatency=3.13.0-93.140 -y
sudo apt-get install --only-upgrade linux-image-3.13.0-93-generic=3.13.0-93.140 -y
| Cyberwatch/cbw-security-fixes | Ubuntu_14.04_LTS/i686/2016/USN-3052-1.sh | Shell | mit | 886 |
#!/bin/bash
for var in $(find . ! -path "./ConceptsOrigin*" -iname "*.cpp" -exec grep -L -e "NO CLANG FORMAT" {} \; );
do clang-tidy -fix $var -- -std=c++14 -stdlib=libc++ -I.;
done
for var in $(find . ! -path "./ConceptsOrigin*" -iname "*.hpp" -exec grep -L -e "NO CLANG FORMAT" {} \; );
do clang-tidy -fix $var -- -std=c++14 -stdlib=libc++ -I.;
done
for var in $(find . ! -path "./ConceptsOrigin*" -iname "*.h" -exec grep -L -e "NO CLANG FORMAT" {} \; );
do clang-tidy -fix $var -- -std=c++14 -stdlib=libc++ -I.;
done
| jbcoe/CppSandbox | tidy.sh | Shell | mit | 527 |
#!/bin/bash
timeout -k 5 $3 dOp $1
| keram88/gelpia_tests | src/dOp_wrapper.sh | Shell | mit | 36 |
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
warn_missing_arch=${2:-true}
if [ -r "$source" ]; then
# Copy the dSYM into the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .dSYM "$source")"
binary_name="$(ls "$source/Contents/Resources/DWARF")"
binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary" "$warn_missing_arch"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM"
fi
fi
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
warn_missing_arch=${2:-true}
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
if [[ "$warn_missing_arch" == "true" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
fi
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
install_artifact() {
artifact="$1"
base="$(basename "$artifact")"
case $base in
*.framework)
install_framework "$artifact"
;;
*.dSYM)
# Suppress arch warnings since XCFrameworks will include many dSYM files
install_dsym "$artifact" "false"
;;
*.bcsymbolmap)
install_bcsymbolmap "$artifact"
;;
*)
echo "error: Unrecognized artifact "$artifact""
;;
esac
}
copy_artifacts() {
file_list="$1"
while read artifact; do
install_artifact "$artifact"
done <$file_list
}
ARTIFACT_LIST_FILE="${BUILT_PRODUCTS_DIR}/cocoapods-artifacts-${CONFIGURATION}.txt"
if [ -r "${ARTIFACT_LIST_FILE}" ]; then
copy_artifacts "${ARTIFACT_LIST_FILE}"
fi
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Lighty/Lighty.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Lighty/Lighty.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
| abdullahselek/Lighty | Pods/Target Support Files/Pods-iOS Sample/Pods-iOS Sample-frameworks.sh | Shell | mit | 8,751 |
#! /bin/bash
set -ue
# The script is used to solve Jenkins auth problem on MAC OS
# Since we use automation to dual with deploy and package with jenkins.
# We have a lot of problem caused by user auth such as docker, mvn build ...
# Use this script we will make jenkins run under login user.
runInitCheck(){
sys_info=$(uname -s)
if [ "$sys_info" != "Darwin" ]; then
echo "This script is only running on mac os!"
exit 1
fi
jenkins_user=$(id -u jenkins)
if [ $jenkins_user -le 0 ]; then
echo 'Should install jenkins and then run this script.'
exit 1
fi
}
chJenkinsRunner(){
jenkins_runner_path=/Library/Application\ Support/Jenkins/jenkins-runner.sh
env_path='PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin'
echo 'Adding env variable path for jenkins ...'
cat "$jenkins_runner_path"|grep 'export PATH=/usr/bin'
if [ $? -eq 0 ]; then
echo 'Env variable path for jenkins already been added!!'
else
sudo sed -i '.bak' '12i\'$"\nexport $env_path\n" "$jenkins_runner_path"
fi
}
chConfigs(){
jenkins_conf_path=/Library/LaunchDaemons/org.jenkins-ci.plist
echo "Replacing [Jj]enkins user to $1"
cat "$jenkins_conf_path" |grep "<string>$1</string>"
if [ $? -eq 0 ]; then
echo "[Jj]enkins user has been replaced to $1"
else
sudo sed -i '' "s#<string>[Jj]enkins</string>#<string>$1</string>#" "$jenkins_conf_path"
fi
}
chJenkinsOwner(){
echo "Chown jenkins & log foler owner to $1:staff"
sudo chown -R $1:staff /Users/Shared/Jenkins/
sudo chown -R $1:staff /var/log/jenkins
}
restartJenkins(){
echo "Restarting Jenkins"
sudo launchctl unload /Library/LaunchDaemons/org.jenkins-ci.plist
sudo launchctl load /Library/LaunchDaemons/org.jenkins-ci.plist
sleep 3
ps aux | grep jenkins.war |grep -v grep
if [ $? -eq 0 ]; then
echo "Done!"
else
echo "Please check jenkins status!"
fi
}
main(){
runInitCheck
chJenkinsRunner
user=$(whoami)
echo "Current user:$user"
chConfigs $user
chJenkinsOwner $user
restartJenkins
}
main
exit 0 | blueroc2003/opsscripts | jenkins/make_jenkins_use_current_user.sh | Shell | mit | 1,986 |
#!/bin/bash
export APP_INCLUDE=./require.php
export QUEUE=*
export COUNT=1
export VVERBOSE=1 # for debugging
export REDIS_BACKEND=localhost:6379
. /etc/rc.d/init.d/functions
start() {
/usr/bin/php ./resque.php
}
stop() {
ps -ef | grep resque | grep -v grep | grep -v resque-web | awk '{print $2}' | xargs kill -15
}
kill() {
ps -ef | grep resque | grep -v grep | grep -v resque-web | awk '{print $2}' | xargs kill -9
}
case "$1" in
start)
number=$(ps aux | grep php-resque/resque.php | grep -v grep | wc -l)
if [ $number -gt 0 ]
then
echo "php-resque is running. ($number workers)"
echo "You may wanna stop them before you start."
else
start
fi
;;
stop)
stop
;;
kill)
kill
;;
status)
number=$(ps aux | grep php-resque/resque.php | grep -v grep | wc -l)
if [ $number -gt 0 ]
then
echo "php-resque is running. ($number workers)"
else
echo "php-resque is not running."
fi
;;
*)
echo -n "Usage: $0 {start|stop|status}"
esac
| firmy/resque | demo/php-resque.sh | Shell | mit | 1,135 |
#!/usr/bin/env bash
# This file is part of The RetroPie Project
#
# The RetroPie Project is the legal property of its developers, whose names are
# too numerous to list here. Please refer to the COPYRIGHT.md file distributed with this source.
#
# See the LICENSE.md file at the top-level directory of this distribution and
# at https://raw.githubusercontent.com/RetroPie/RetroPie-Setup/master/LICENSE.md
#
rp_module_id="firefox-esr"
rp_module_desc="FireFox-ESR - Formally known as IceWeasel, the Rebranded Firefox Web Browser"
rp_module_licence="MPL2 https://www.mozilla.org/media/MPL/2.0/index.815ca599c9df.txt"
rp_module_section="exp"
rp_module_flags="!mali !x86"
function depends_firefox-esr() {
getDepends xorg matchbox
}
function install_bin_firefox-esr() {
aptInstall firefox-esr
}
function configure_firefox-esr() {
mkRomDir "ports"
mkdir -p "$md_inst"
moveConfigDir "$home/.mozilla" "$md_conf_root/$md_id"
cat >"$md_inst/firefox-esr.sh" << _EOF_
#!/bin/bash
xset -dpms s off s noblank
matchbox-window-manager -use_titlebar no &
/usr/bin/firefox-esr
_EOF_
chmod +x "$md_inst/firefox-esr.sh"
addPort "$md_id" "firefox-esr" "FireFox-ESR - Formally known as IceWeasel, the Rebranded Firefox Web Browser" "xinit $md_inst/firefox-esr.sh"
}
| zerojay/RetroPie-Extra | scriptmodules/ports/firefox-esr.sh | Shell | mit | 1,285 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-2688-1
#
# Security announcement date: 2013-05-23 00:00:00 UTC
# Script generation date: 2017-01-01 21:06:34 UTC
#
# Operating System: Debian 6 (Squeeze)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libxres:2:1.0.4-1+squeeze
#
# Last versions recommanded by security team:
# - libxres:2:1.0.4-1+squeeze
#
# CVE List:
# - CVE-2013-1988
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libxres=2:1.0.4-1+squeeze -y
| Cyberwatch/cbw-security-fixes | Debian_6_(Squeeze)/x86_64/2013/DSA-2688-1.sh | Shell | mit | 624 |
#!/bin/bash
ULTRAWIDE=0
WIDE=0
NORMAL=0
TELE=0
ULTRATELE=0
COUNT=0
SUBCOUNT=0
COUNTNOEXIF=0
HEREIAM=`pwd`
# Function showhelp: shows a little help message
function showhelp {
echo -e "\n"
echo "Usage: stats_focal.sh [-d DIR] [-s STRING] [-f FORMAT]"
echo " -d DIR: root directory where the photos directories live, WITHOUT trailing slash; by default it's the current directory."
echo " -s STRING: sets the STRING to look for in the EXIF data; default: 'Focal Length'."
echo " -f FORMAT: sets the FORMAT of the camera; available values: 'm43', 'apsc', 'ff', 'mf'; default is 'm43'."
echo ""
echo "Examples:"
echo " ./stats_focal.sh -d /media/Fotos/2014 -> will proccess the *.jpg and *.JPG inside all the subdirectories under /media/Fotos/2014/."
echo " ./stats_focal.sh -d /media/Fotos/2014 -s \"Focal size\" -> will proccess the *.jpg and *.JPG inside all the subdirectories under /media/Fotos/2014/ looking for string 'Focal size' in the EXIF data."
echo " ./stats_focal.sh -f \"apsc\" -> will proccess the *.jpg and *.JPG inside all the subdirectories under the current directory, using format Full Frame to calculate the focal lenght."
echo ""
echo ""
}
# Function countfiles: recursive function to loop through all the subdirectories, and all the *.jpg and *.JPG files inside them
function countfiles {
echo -e "\nReading `pwd` ..."
EXISTS=`ls -1 *.jpg 2>/dev/null | wc -l`
if [ $EXISTS != 0 ]; then
for FILE in *.jpg; do
VAL=`exiftool $FILE | grep -i $EXIFSTRING | grep -v equivalent | grep -v "35mm Format" | grep mm | cut -d":" -f 2 | cut -d" " -f 2 | cut -d"." -f 1 | uniq`
if [ "$VAL" != "" ]; then
if [ $VAL -lt $ULTRAWIDELIMIT ]; then
ULTRAWIDE=$((ULTRAWIDE+1))
elif [ $VAL -ge $ULTRAWIDELIMIT ] && [ $VAL -le $WIDELIMIT ]; then
WIDE=$((WIDE+1))
elif [ $VAL -gt $WIDELIMIT ] && [ $VAL -le $NORMALLIMIT ]; then
NORMAL=$((NORMAL+1))
elif [ $VAL -gt $NORMALLIMIT ] && [ $VAL -le $TELELIMIT ]; then
TELE=$((TELE+1))
elif [ $VAL -gt $TELELIMIT ]; then
ULTRATELE=$((ULTRATELE+1))
fi
COUNT=$((COUNT+1))
SUBCOUNT=$((SUBCOUNT+1))
else
COUNTNOEXIF=$((COUNTNOEXIF+1))
fi
done
fi
EXISTS=`ls -1 *.JPG 2>/dev/null | wc -l`
if [ $EXISTS != 0 ]; then
for FILE in *.JPG; do
VAL=`exiftool $FILE | grep -i $EXIFSTRING | grep -v equivalent | grep -v "35mm Format" | grep mm | cut -d":" -f 2 | cut -d" " -f 2 | cut -d"." -f 1 | uniq`
if [ "$VAL" != "" ]; then
if [ $VAL -lt $ULTRAWIDELIMIT ]; then
ULTRAWIDE=$((ULTRAWIDE+1))
elif [ $VAL -ge $ULTRAWIDELIMIT ] && [ $VAL -le $WIDELIMIT ]; then
WIDE=$((WIDE+1))
elif [ $VAL -gt $WIDELIMIT ] && [ $VAL -le $NORMALLIMIT ]; then
NORMAL=$((NORMAL+1))
elif [ $VAL -gt $NORMALLIMIT ] && [ $VAL -le $TELELIMIT ]; then
TELE=$((TELE+1))
elif [ $VAL -gt $TELELIMIT ]; then
ULTRATELE=$((ULTRATELE+1))
fi
COUNT=$((COUNT+1))
SUBCOUNT=$((SUBCOUNT+1))
else
COUNTNOEXIF=$((COUNTNOEXIF+1))
fi
done
fi
# If there are any subdirectories, go inside them recursively
EXISTS=`ls -1 */ 2>/dev/null | wc -l`
if [ $EXISTS != 0 ]; then
for SUBFOLDER in `ls -d */`; do
cd `pwd`/$SUBFOLDER
countfiles
cd ..
done
fi
#echo "Processed $SUBCOUNT"
SUBCOUNT=0
}
command -v exiftool >/dev/null 2>&1 || { echo >&2 "I require 'exiftool', but it's not installed. Aborting."; exit 1; }
if [ "$1" == "-h" ]; then
showhelp
exit 0
fi
# Vars to loop through directories
OLDIFS=$IFS
IFS=$'\n' # This is to use only \n as spacer, and not other spaces or tabs (useful for the 'for' loop)
# Default values
WORKDIR=`pwd`
EXIFSTRING="Focal Length"
CAMFORMAT="m43"
# Analize params
ARGG=""
while test $# -gt 0
do
case "$1" in
-d) ARGG="WORKDIR"
;;
-s) ARGG="EXIFSTRING"
;;
-f) ARGG="CAMFORMAT"
;;
-*) echo "Bad option $1"
showhelp
exit 1
;;
--*) echo "Bad option $1"
showhelp
exit 1
;;
*) eval $ARGG='$1'
;;
esac
shift
done
# Set focal lenghts depending on camera format. The reference is the 35mm format (Full Frame)
# UltraWide: < 28mm equiv
# Wide: 29-40mm equiv
# Normal: 41-60mm equiv
# Tele: 61-130mm equiv
# UltraTele: > 131mm equiv
case "$CAMFORMAT" in
m43)
# Factor = x2
ULTRAWIDELIMIT=14
WIDELIMIT=20
NORMALLIMIT=30
TELELIMIT=65
;;
apsc)
# Factor = x1.5
ULTRAWIDELIMIT=18
WIDELIMIT=27
NORMALLIMIT=40
TELELIMIT=86
;;
ff)
# Factor = x1
ULTRAWIDELIMIT=28
WIDELIMIT=40
NORMALLIMIT=60
TELELIMIT=130
;;
mf)
# Factor = x0.651
ULTRAWIDELIMIT=43
WIDELIMIT=62
NORMALLIMIT=92
TELELIMIT=200
;;
esac
# Loop through directories
for FOLDER in `ls -d $WORKDIR/*/`; do
# Delete quotation marks, if exists
PROCESSDIR=`echo $FOLDER | sed "s/\"//g"`
cd $PROCESSDIR
countfiles
#echo "Processsed $COUNT files"
done
IFS=$OLDIFS # Undo this change
# Show final report
echo -e "\n======================================\n"
echo "Total de archivos: `echo $COUNT+$COUNTNOEXIF | bc`"
echo "Total de archivos procesados: $COUNT"
echo "Total de archivos sin datos EXIF: $COUNTNOEXIF"
echo -e "\n======================================\n"
echo "Ultra angular: $ULTRAWIDE"
echo "Angular: $WIDE"
echo "Normal: $NORMAL"
echo "Tele: $TELE"
echo "Ultra tele: $ULTRATELE"
echo ""
# Go back to the former path
cd $HEREIAM
exit 0
| aremesal/photo-focal-stats-script | stats_focal.sh | Shell | mit | 6,067 |
#!/bin/sh
openssl aes-256-cbc -K $encrypted_4e8862369e3e_key -iv $encrypted_4e8862369e3e_iv -in travis-deploy-key.enc -out travis-deploy-key -d;
chmod 600 travis-deploy-key;
cp travis-deploy-key ~/.ssh/id_rsa; | Katrix-/AckCord | scripts/decrypt-keys.sh | Shell | mit | 210 |
#!/bin/bash
if [ "$TRAVIS_REPO_SLUG" == "JakobOvrum/hexchatd" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_BRANCH" == "master" ]; then
git clone --recursive --branch=gh-pages https://github.com/${TRAVIS_REPO_SLUG}.git gh-pages
cd gh-pages
git config credential.helper "store --file=.git/credentials"
echo "https://${TOKEN}:@github.com" > .git/credentials
git config --global user.name "travis-ci"
git config --global user.email "[email protected]"
git config --global push.default simple
echo -e "Generating DDoc...\n"
sh ./generate.sh
git add -f *.html
git commit -m "Lastest documentation on successful travis build $TRAVIS_BUILD_NUMBER auto-pushed to gh-pages"
git push
echo -e "Published DDoc to gh-pages.\n"
fi
| JakobOvrum/hexchatd | push-ddoc.sh | Shell | mit | 747 |
#!/bin/bash
cd "$( dirname "${BASH_SOURCE[0]}" )"
cd -P ..
P=src/main/resources/examples
for F in src/main/resources/examples/*.trig; do
F=${F%.*}; F=${F##*/}
echo "ASCII plots for $F.trig" > $P/$F.txt
echo >> $P/$F.txt
for Q in src/main/resources/queries/*.sparql; do
Q=${Q%.*}; Q=${Q##*/}
echo "$Q plot:" >> $P/$F.txt
echo >> $P/$F.txt
scripts/Run.sh -p $Q $P/$F.trig >> $P/$F.txt
echo "" >> $P/$F.txt
done
done
| tkuhn/nanolytics | scripts/run-examples.sh | Shell | mit | 449 |
#!/bin/bash
# Download and extract phpMemcachedAdmin to provide a dashboard view and
# admin interface to the goings on of memcached when running
if [[ ! -d "/srv/www/default/memcached-admin" ]]; then
echo -e "\nDownloading phpMemcachedAdmin, see https://github.com/wp-cloud/phpmemcacheadmin"
cd /srv/www/default
wget -q -O phpmemcachedadmin.tar.gz "https://github.com/wp-cloud/phpmemcacheadmin/archive/1.2.2.1.tar.gz"
tar -xf phpmemcachedadmin.tar.gz
mv phpmemcacheadmin* memcached-admin
rm phpmemcachedadmin.tar.gz
else
echo "phpMemcachedAdmin already installed."
fi
| ezekg/theme-juice-vvv | scripts/provision/memcached.sh | Shell | mit | 584 |
# Backups, swaps and undos are stored here.
mkdir -p $DOTFILES/caches/vim
# Download Vim plugins.
if [[ "$(type -P vim)" ]]; then
vim +PlugUpdate +qall
fi
| unicell/dotfiles | init/50_vim.sh | Shell | mit | 158 |
#!/bin/sh
# Helper script to create feature branch (always branches from dev)
git checkout -b $1 dev | phtrivier/ube | scripts/git-start-feature.sh | Shell | mit | 100 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.