code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
#tommi 24.10.2015
#Syyua.sh version 0.5.0
#Pacman and yaourt inspired tool
#feature? another switch to check for CURRENT?
#add support for printing ports UPDATING lines for AFFECTING.
# limit these to show only the last 2-4 weeks?
#add these to Version/Verbose output
# echo ''
#echo 'Syyua update script'
#echo 'inspired by pacman and yaourt'
echo ''
if [ -e "/usr/sbin/pkg" ]
then
echo ''
echo 'found pkg'
echo ''
else
echo ''
echo ' install pkg?'
echo ''
pkg
fi
if [ -e "/var/db/portsnap/INDEX" ]
then
echo ''
echo 'ports found and they are extracted'
echo ''
else
echo ''
echo 'install portsnap and portst'
portsnap fetch extract
echo ''
fi
if [ -e "/usr/local/sbin/portmaster" ]
then
echo ''
echo 'portmaster installed'
echo ''
else
echo ''
echo 'install portmaster'
pkg install portmaster
echo ''
fi
if [ -e "/etc/defaults/trueos" ]
then
echo ''
echo 'running TrueOS'
echo ''
echo '/usr/local/bin/pc-updatemanager check'
/usr/local/bin/pc-updatemanager check
echo ''
echo '/usr/local/bin/pc-updatemanager pkgcheck'
/usr/local/bin/pc-updatemanager pkgcheck
echo ''
echo ''
echo 'pkg update'
pkg update
echo ''
echo 'pkg upgrade'
pkg upgrade
echo ''
echo ''
echo 'portsnap fetch update'
portsnap fetch update
echo ''
echo 'Ports has newer versions of the following software:'
portmaster -L | grep New
echo ''
else
echo ''
echo 'running FreeBSD'
echo ''
echo 'freebsd-update fetch'
freebsd-update fetch
echo ''
echo 'freebsd-update install'
freebsd-update install
echo ''
echo ''
echo ''
echo 'pkg update -q'
pkg update
echo ''
echo 'pkg upgrade -q'
pkg upgrade
echo ''
echo ''
echo 'portsnap fetch update'
portsnap fetch update
echo ''
echo 'Ports has newer versions of the following software:'
portmaster -L | grep New |awk '{print $5}'
echo ''
fi
echo ''
#List of locked packages
pkg lock -l
echo ''
echo 'Syyua finished'
echo ''
|
pernila/scripts
|
dev/Syyua.sh
|
Shell
|
bsd-2-clause
| 1,939 |
#!/bin/bash
## Edit these two lines according to your server setup
HOST="[email protected] -pxxxx"
DEPLOY_DIR="/home/xxxx/deploy/"
## Probably don't edit any of the below unless you want to modify functionality
## Install?
ssh $HOST << EOF
cd ~
#Clone in deployment scripts if they don't already exist
if [ ! -d "$DEPLOY_DIR" ]; then
echo "Setting up remote deployment scripts in $DEPLOY_DIR"
git clone "https://github.com/sheadawson/silverstripe-bash-deploy.git" $DEPLOY_DIR
chmod +x "${DEPLOY_DIR}deploy.sh"
chmod +x "${DEPLOY_DIR}configure.sh"
fi
EOF
## Configure?
if [ "$1" == "configure" ]; then
ssh $HOST -t "${DEPLOY_DIR}configure.sh; bash --login"
exit
fi
## Deploy
if [ ! $1 ]; then
ENVIRONMENT='prod'
else
ENVIRONMENT=${1}
fi
CONFIG_FILE="${DEPLOY_DIR}config/config-${ENVIRONMENT}"
ssh $HOST << EOF
cd $DEPLOY_DIR
if [ ! -f "$CONFIG_FILE" ]; then
echo "No config file $CONFIG_FILE found. Please run configure"
exit
fi
./deploy.sh $ENVIRONMENT
exit
EOF
|
sheadawson/silverstripe-bash-deploy
|
deploy-local.sh
|
Shell
|
bsd-2-clause
| 995 |
#!/bin/sh
GRAPEFS="/tmp/GrapeFS"
RESULT_SUBDIR="GrapeFS.Reencoding"
EVAL_DIR=$(readlink -f $(dirname "$0"))
TESTING_DIR="$EVAL_DIR/.."
RESULT_DIR="$EVAL_DIR/Results/$RESULT_SUBDIR"
OPERATIONS="parse dump updateAssembly executeAssembly mapPath"
if [ -d "$RESULT_DIR" ]
then
cd "$RESULT_DIR"
cd ..
rm -rf -- "$RESULT_SUBDIR"
fi
cd "$TESTING_DIR"
mkdir -p "$RESULT_DIR"
mkdir "$GRAPEFS/test.c"
rm "$RESULT_DIR.stdout"
rm "$RESULT_DIR.stderr"
for i in {1..5}
do
cat "$TESTING_DIR/Kernel/Passthrough.c" > "$GRAPEFS/test.c/Kernel.c"
cat "$TESTING_DIR/Data/1024x1024.png" > "$GRAPEFS/test.c/data"
setfattr -n grapefs.encoding -v image "$GRAPEFS/test.c/data"
setfattr -n grapefs.encoding -v image "$GRAPEFS/test.c/Output"
for j in {1..10}
do
touch "$GRAPEFS/test.c"
ls -lisa "$GRAPEFS/test.c/Output/Output.png" 1>>"$RESULT_DIR.stdout" 2>>"$RESULT_DIR.stderr"
done
done
for op in $OPERATIONS
do
if [ -d "$GRAPEFS/.perf" ]
then
oplist=`find "$GRAPEFS/.perf" -name $op`
for f in $oplist
do
cat $f >> "$RESULT_DIR/$op"
done
fi
done
for f in "$RESULT_DIR"/*
do
f=`basename "$f"`
if [ -f "$EVAL_DIR/Header/$f" ]
then
cat "$EVAL_DIR/Header/$f" "$RESULT_DIR/$f" > "$RESULT_DIR/$f.new"
mv "$RESULT_DIR/$f.new" "$RESULT_DIR/$f"
fi
done
|
crurik/GrapeFS
|
Testing/Evaluation/Reencoding.sh
|
Shell
|
bsd-2-clause
| 1,282 |
#@IgnoreInspection BashAddShebang
# shellcheck shell=bash disable=SC2034,SC1090,SC2164
#
# THIS IS NOT IMMUTABLE DEFINITION FILE, don't use it to enforce fixed version.
#
# See setenv-java.sh and tool-download.sh for the details how this is used.
# We use Zulu JDK from: https://www.azul.com/downloads/zulu-community
# For each platform you choose proper format (plain archive, not installer),
# copy the download link, change the final dir (for Zulu it's archive name without extension),
# change expected checksum (and check its format) and you're done.
#
# msys for Windows+Git, linux for Linux or darwin for OSX
case "${OSTYPE//[0-9.-]*/}" in
msys)
ARCHIVE_URL=https://cdn.azul.com/zulu/bin/zulu17.28.13-ca-jdk17.0.0-win_x64.zip
FINAL_DIR=zulu17.28.13-ca-jdk17.0.0-win_x64
ARCHIVE_SUM=f4437011239f3f0031c794bb91c02a6350bc941d4196bdd19c9f157b491815a3
ARCHIVE_SUM_APP=sha256sum
UNPACK_APP=unzip
;;
linux)
ARCHIVE_URL=https://cdn.azul.com/zulu/bin/zulu17.28.13-ca-jdk17.0.0-linux_x64.tar.gz
FINAL_DIR=zulu17.28.13-ca-jdk17.0.0-linux_x64
ARCHIVE_SUM=37c4f8e48536cceae8c6c20250d6c385e176972532fd35759fa7d6015c965f56
ARCHIVE_SUM_APP=sha256sum
UNPACK_APP="tar xzvf"
;;
esac
|
virgo47/litterbin
|
automation/project-scripts/global-tools/java/defs/zulujdk-17.0.0-def.sh
|
Shell
|
bsd-2-clause
| 1,199 |
#!/bin/sh
echo "\n\n[task] Bundling assets\n"
# Clean and prepare build
# see https://github.com/yiisoft/yii2/issues/7414, https://github.com/yiisoft/yii2/issues/7473
rm -rf web/assets-prod
mkdir -p web/assets-prod/js
mkdir -p web/assets-prod/css
touch web/assets-prod/js/backend-temp.js
touch web/assets-prod/css/all-temp.css
# Compress asset bundles in Docker container
docker-compose run web ./yii asset config/assets.php config/assets-prod.php
du -h web/assets-prod/css/*
du -h web/assets-prod/js/*
|
davidfang/playground
|
build/assets.sh
|
Shell
|
bsd-3-clause
| 506 |
# $FreeBSD$
# Import helper functions
. $(atf_get_srcdir)/helper_functions.shin
# Test add user
atf_test_case user_add
user_add_body() {
populate_etc_skel
atf_check -s exit:0 ${PW} useradd test
atf_check -s exit:0 -o match:"^test:.*" \
grep "^test:.*" $HOME/master.passwd
}
# Test add user with option -N
atf_test_case user_add_noupdate
user_add_noupdate_body() {
populate_etc_skel
atf_check -s exit:0 -o match:"^test:.*" ${PW} useradd test -N
atf_check -s exit:1 -o empty grep "^test:.*" $HOME/master.passwd
}
# Test add user with comments
atf_test_case user_add_comments
user_add_comments_body() {
populate_etc_skel
atf_check -s exit:0 ${PW} useradd test -c "Test User,work,123,456"
atf_check -s exit:0 -o match:"^test:.*:Test User,work,123,456:" \
grep "^test:.*:Test User,work,123,456:" $HOME/master.passwd
}
# Test add user with comments and option -N
atf_test_case user_add_comments_noupdate
user_add_comments_noupdate_body() {
populate_etc_skel
atf_check -s exit:0 -o match:"^test:.*:Test User,work,123,456:" \
${PW} useradd test -c "Test User,work,123,456" -N
atf_check -s exit:1 -o empty grep "^test:.*" $HOME/master.passwd
}
# Test add user with invalid comments
atf_test_case user_add_comments_invalid
user_add_comments_invalid_body() {
populate_etc_skel
atf_check -s exit:65 -e match:"invalid character" \
${PW} useradd test -c "Test User,work,123:456,456"
atf_check -s exit:1 -o empty \
grep "^test:.*:Test User,work,123:456,456:" $HOME/master.passwd
}
# Test add user with invalid comments and option -N
atf_test_case user_add_comments_invalid_noupdate
user_add_comments_invalid_noupdate_body() {
populate_etc_skel
atf_check -s exit:65 -e match:"invalid character" \
${PW} useradd test -c "Test User,work,123:456,456" -N
atf_check -s exit:1 -o empty grep "^test:.*" $HOME/master.passwd
}
# Test add user with alternate homedir
atf_test_case user_add_homedir
user_add_homedir_body() {
populate_etc_skel
atf_check -s exit:0 ${PW} useradd test -d /foo/bar
atf_check -s exit:0 -o match:"^test:\*:.*::0:0:User &:/foo/bar:.*" \
${PW} usershow test
}
# Test add user with account expiration as an epoch date
atf_test_case user_add_account_expiration_epoch
user_add_account_expiration_epoch_body() {
populate_etc_skel
DATE=`date -j -v+1d "+%s"`
atf_check -s exit:0 ${PW} useradd test -e ${DATE}
atf_check -s exit:0 -o match:"^test:\*:.*::0:${DATE}:.*" \
${PW} usershow test
}
# Test add user with account expiration as a DD-MM-YYYY date
atf_test_case user_add_account_expiration_date_numeric
user_add_account_expiration_date_numeric_body() {
populate_etc_skel
DATE=`date -j -v+1d "+%d-%m-%Y"`
EPOCH=`date -j -f "%d-%m-%Y %H:%M:%S" "${DATE} 00:00:00" "+%s"`
atf_check -s exit:0 ${PW} useradd test -e ${DATE}
atf_check -s exit:0 -o match:"^test:\*:.*::0:${EPOCH}:User &:.*" \
${PW} usershow test
}
# Test add user with account expiration as a DD-MM-YYYY date
atf_test_case user_add_account_expiration_date_month
user_add_account_expiration_date_month_body() {
populate_etc_skel
DATE=`date -j -v+1d "+%d-%b-%Y"`
EPOCH=`date -j -f "%d-%b-%Y %H:%M:%S" "${DATE} 00:00:00" "+%s"`
atf_check -s exit:0 ${PW} useradd test -e ${DATE}
atf_check -s exit:0 -o match:"^test:\*:.*::0:${EPOCH}:User &:.*" \
${PW} usershow test
}
# Test add user with account expiration as a relative date
atf_test_case user_add_account_expiration_date_relative
user_add_account_expiration_date_relative_body() {
populate_etc_skel
EPOCH=`date -j -v+13m "+%s"`
BUF=`expr $EPOCH + 5`
atf_check -s exit:0 ${PW} useradd test -e +13o
TIME=`${PW} usershow test | awk -F ':' '{print $7}'`
[ ! -z $TIME -a $TIME -ge $EPOCH -a $TIME -lt $BUF ] || \
atf_fail "Expiration time($TIME) was not within $EPOCH - $BUF seconds."
}
# Test add user with password expiration as an epoch date
atf_test_case user_add_password_expiration_epoch
user_add_password_expiration_epoch_body() {
populate_etc_skel
DATE=`date -j -v+1d "+%s"`
atf_check -s exit:0 ${PW} useradd test -p ${DATE}
atf_check -s exit:0 -o match:"^test:\*:.*::${DATE}:0:.*" \
${PW} usershow test
}
# Test add user with password expiration as a DD-MM-YYYY date
atf_test_case user_add_password_expiration_date_numeric
user_add_password_expiration_date_numeric_body() {
populate_etc_skel
DATE=`date -j -v+1d "+%d-%m-%Y"`
EPOCH=`date -j -f "%d-%m-%Y %H:%M:%S" "${DATE} 00:00:00" "+%s"`
atf_check -s exit:0 ${PW} useradd test -p ${DATE}
atf_check -s exit:0 -o match:"^test:\*:.*::${EPOCH}:0:User &:.*" \
${PW} usershow test
}
# Test add user with password expiration as a DD-MMM-YYYY date
atf_test_case user_add_password_expiration_date_month
user_add_password_expiration_date_month_body() {
populate_etc_skel
DATE=`date -j -v+1d "+%d-%b-%Y"`
EPOCH=`date -j -f "%d-%b-%Y %H:%M:%S" "${DATE} 00:00:00" "+%s"`
atf_check -s exit:0 ${PW} useradd test -p ${DATE}
atf_check -s exit:0 -o match:"^test:\*:.*::${EPOCH}:0:User &:.*" \
${PW} usershow test
}
# Test add user with password expiration as a relative date
atf_test_case user_add_password_expiration_date_relative
user_add_password_expiration_date_relative_body() {
populate_etc_skel
EPOCH=`date -j -v+13m "+%s"`
BUF=`expr $EPOCH + 5`
atf_check -s exit:0 ${PW} useradd test -p +13o
TIME=`${PW} usershow test | awk -F ':' '{print $6}'`
[ ! -z $TIME -a $TIME -ge $EPOCH -a $TIME -lt $BUF ] || \
atf_fail "Expiration time($TIME) was not within $EPOCH - $BUF seconds."
}
atf_test_case user_add_name_too_long
user_add_name_too_long_body() {
populate_etc_skel
atf_check -e match:"too long" -s exit:64 \
${PW} useradd name_very_vert_very_very_very_long
}
atf_init_test_cases() {
atf_add_test_case user_add
atf_add_test_case user_add_noupdate
atf_add_test_case user_add_comments
atf_add_test_case user_add_comments_noupdate
atf_add_test_case user_add_comments_invalid
atf_add_test_case user_add_comments_invalid_noupdate
atf_add_test_case user_add_homedir
atf_add_test_case user_add_account_expiration_epoch
atf_add_test_case user_add_account_expiration_date_numeric
atf_add_test_case user_add_account_expiration_date_month
atf_add_test_case user_add_account_expiration_date_relative
atf_add_test_case user_add_password_expiration_epoch
atf_add_test_case user_add_password_expiration_date_numeric
atf_add_test_case user_add_password_expiration_date_month
atf_add_test_case user_add_password_expiration_date_relative
atf_add_test_case user_add_name_too_long
}
|
jrobhoward/SCADAbase
|
usr.sbin/pw/tests/pw_useradd.sh
|
Shell
|
bsd-3-clause
| 6,453 |
#!/bin/bash
cd /home/kivan/Projects/cv-stereo/build/egomotion/release/
dataset_config="../../../config_files/config_kitti_07.txt"
experiment_config="../../../config_files/experiments/kitti/ncc_validation_wgt.txt"
# the best 2 processes x 12 threads
OMP_NUM_THREADS=12 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=24 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=23 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=22 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=21 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=20 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=16 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=14 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=12 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=10 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=8 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=4 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=2 ./egomotion -c $dataset_config -e $experiment_config
#OMP_NUM_THREADS=1 ./egomotion -c $dataset_config -e $experiment_config
|
ivankreso/stereo-vision
|
scripts/egomotion_kitti_eval/benchmark_odometry.sh
|
Shell
|
bsd-3-clause
| 1,270 |
#!/usr/bin/env bash
SCRIPT_PATH=$(dirname $(readlink -f $0))
$SCRIPT_PATH/init --env=Development --overwrite=n
|
leerais/yii2-demo
|
deploy.sh
|
Shell
|
bsd-3-clause
| 113 |
#!/bin/sh
wget -O remakeremodel.xml http://chompy.net/blogs/jacob/feeds/index.atom
wget -O darkentries.xml http://www.chompy.net/blogs/jacob/feeds/darkentries.atom
wget -O peenywally.xml http://chompy.net/blogs/pogo/atom.xml
wget -O tigerpounces.xml http://chompy.net/blogs/nathalie/atom.xml
wget -O greencandle.xml http://palcontent.blogspot.com/feeds/posts/default
wget -O garbage.xml http://hoolifan.blogspot.com/feeds/posts/default
wget -O bubblegumdamage.xml http://bubblegumdamage.blogspot.com/feeds/posts/default
wget -O charlieletters.xml http://savagepencil.typepad.com/charlieletters/index.rdf
wget -O confeessions.xml http://savagepencil.typepad.com/confessions/index.rdf
wget -O headcrab.xml http://headcrab.org/feed/atom/
wget -O arabic.xml http://benkerishan.blogspot.com/feeds/posts/default
wget -O ongoing.xml http://www.tbray.org/ongoing/ongoing.atom
wget -O newsofthedead.xml http://wileywiggins.blogspot.com/feeds/posts/default
|
braveulysses/backwater
|
scripts/backwaterfeeds.sh
|
Shell
|
bsd-3-clause
| 947 |
#!/bin/bash
set -e
TESTNAME=
CONTINUE_AFTER=0
ENABLED=1
if [[ $# == 1 ]]; then
TESTNAME=$1
LAST_INDEX=$((${#TESTNAME}-1))
LAST_CHAR="${TESTNAME:$LAST_INDEX:1}"
if [[ $LAST_CHAR == "-" ]]; then
TESTNAME="${TESTNAME:0:$LAST_INDEX}"
CONTINUE_AFTER=1
fi
ENABLED=0
fi
echo "TESTNAME=${TESTNAME} CONTINUE_AFTER=${CONTINUE_AFTER}"
echo
for t in $(cat tests); do
echo $t
if [[ ! -z $TESTNAME && $TESTNAME == $t ]]; then
ENABLED=1
fi
if [[ $ENABLED == 0 ]]; then
continue
fi
cd $t
mvn clean &> ../clean.log
mvn package &> ../package.log
./generate.sh &> ../generate.log
./convert.sh &> ../convert.log
./run.sh check &> ../check.log
cd ..
set +e
cat check.log | grep PASSED
ERR=$?
set -e
if [[ $ERR != 0 ]]; then
echo FAILED
exit 1
fi
if [[ ! -z $TESTNAME && $ENABLED == 1 && $CONTINUE_AFTER == 0 ]]; then
ENABLED=0
fi
done
|
agrippa/spark-swat
|
functional-tests/test_all.sh
|
Shell
|
bsd-3-clause
| 985 |
#!/bin/bash
RETCODE=$(fw_exists ${IROOT}/lein.installed)
[ ! "$RETCODE" == 0 ] || { return 0; }
mkdir -p lein/bin
fw_get https://raw.github.com/technomancy/leiningen/stable/bin/lein -O leinbin
mv leinbin lein/bin/lein
chmod +x lein/bin/lein
touch ${IROOT}/lein.installed
|
kellabyte/FrameworkBenchmarks
|
toolset/setup/linux/systools/leiningen.sh
|
Shell
|
bsd-3-clause
| 273 |
# Enpass Debian repo
# http://enpass.io/
sudo echo "deb http://repo.sinew.in/ stable main" > /etc/apt/sources.list.d/enpass.list
# Oracle Java 8 repo (or 9 ;-)
# http://www.webupd8.org/2015/02/install-oracle-java-9-in-ubuntu-linux.html
echo "deb http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | sudo tee /etc/apt/sources.list.d/webupd8team-java.list
echo "deb-src http://ppa.launchpad.net/webupd8team/java/ubuntu trusty main" | sudo tee -a /etc/apt/sources.list.d/webupd8team-java.list
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys EEA14886
|
jscappini/fresh_os_install
|
setup_debian_repo.sh
|
Shell
|
mit
| 583 |
#!/bin/bash -x
set -e
rm -f Gemfile.lock
bundle install --path "${HOME}/bundles/${JOB_NAME}"
bundle exec rake
bundle exec rake publish_gem
|
alphagov/govuk_mirrorer
|
jenkins.sh
|
Shell
|
mit
| 139 |
#!/bin/bash
VERSION="3.3.0"
rm m2x-*.js
r.js -o build.js out=m2x-${VERSION}.js optimize=none
r.js -o build.js out=m2x-${VERSION}.min.js
r.js -o build.js out=current.js
|
kkng88/mdd2017
|
www/lib/m2x/dist/build.sh
|
Shell
|
mit
| 170 |
#!/bin/bash
# This file is part of cc-oci-runtime.
#
# Copyright (C) 2016 Intel Corporation
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
set -e -x
root=$(cd `dirname "$0"`/..; pwd -P)
source "$root/versions.txt"
if [ "$SEMAPHORE" = true ]
then
# SemaphoreCI has different environments that builds can run in. The
# default environment does not have docker enabled so it is
# necessary to specify a docker-enabled build environment on the
# semaphoreci.com web site.
#
# However, currently, the docker-enabled environment does not
# provide nested KVM (whereas the default environment does), so
# manually enable nested kvm for the time being.
sudo rmmod kvm-intel || :
sudo sh -c "echo 'options kvm-intel nested=y' >> /etc/modprobe.d/dist.conf" || :
sudo modprobe kvm-intel || :
fi
source $(dirname "$0")/ci-common.sh
if [ "$nested" = "Y" ]
then
# Ensure the user can access the kvm device
sudo chmod g+rw /dev/kvm
sudo chgrp "$USER" /dev/kvm
fi
#
# Install go
#
go_tarball="go${go_version}.linux-amd64.tar.gz"
curl -L -O "https://storage.googleapis.com/golang/$go_tarball"
tar xvf $go_tarball 1>/dev/null
mv go $GOROOT
# Unfortunately, go doesn't support vendoring outside of GOPATH (maybe in 1.8?)
# So, we setup a GOPATH tree with our vendored dependencies.
# See: https://github.com/golang/go/issues/14566
mkdir -p "$GOPATH/src"
cp -r vendor/* "$GOPATH/src"
# We also need to put the runtime into its right place in the GOPATH so we can
# self-import internal packages
mkdir -p "$GOPATH/src/github.com/01org/"
ln -s $PWD "$GOPATH/src/github.com/01org/"
go get github.com/fzipp/gocyclo
go get github.com/client9/misspell/cmd/misspell
go get github.com/golang/lint/golint
#
# Install cc-oci-runtime dependencies
#
# Ensure "make install" as root can find clang
#
# See: https://github.com/travis-ci/travis-ci/issues/2607
export CC=$(which "$CC")
gnome_dl=https://download.gnome.org/sources
# Install required dependencies to build
# glib, json-glib, libmnl-dev, check, gcc, cc-oci-runtime and qemu-lite
pkgs=""
# general
pkgs+=" pkg-config"
pkgs+=" gettext"
pkgs+=" rpm2cpio"
pkgs+=" valgrind"
# runtime dependencies
pkgs+=" uuid-dev"
pkgs+=" cppcheck"
pkgs+=" libmnl-dev"
pkgs+=" libffi-dev"
pkgs+=" libpcre3-dev"
# runtime + qemu-lite
pkgs+=" zlib1g-dev"
# qemu-lite
pkgs+=" libpixman-1-dev"
# gcc
pkgs+=" libcap-ng-dev"
pkgs+=" libgmp-dev"
pkgs+=" libmpfr-dev"
pkgs+=" libmpc-dev"
# code coverage
pkgs+=" lcov"
# chronic(1)
pkgs+=" moreutils"
# qemu-lite won't be built
# some unit tests need qemu-system
if [ "$nested" != "Y" ]
then
pkgs+=" qemu-system-x86"
fi
eval sudo apt-get -qq install "$pkgs"
function compile {
name="$1"
tarball="$2"
directory="$3"
configure_opts="$4"
chronic tar -xvf ${tarball}
pushd ${directory}
if [ -n "$configure_opts" ]
then
args="$configure_opts"
else
args=""
args+=" --disable-silent-rules"
args+=" --prefix=\"${prefix_dir}\""
fi
eval CC=${CC:-cc} chronic ./configure "$args"
chronic make -j5
chronic sudo make install
popd
}
# Determined if the specified command has already been installed
function cmd_installed {
local cmd="$1"
local path="${prefix_dir}/bin/$cmd"
[ -e "$path" ]
}
# Determine if the specified library version is available
function lib_installed {
local name="$1"
local required_version="$2"
version=$(pkg-config --print-provides "$name" 2>/dev/null | awk '{print $3}')
[ "$version" = "$required_version" ]
}
pushd "$deps_dir"
# Build glib
glib_major=`echo $glib_version | cut -d. -f1`
glib_minor=`echo $glib_version | cut -d. -f2`
file="glib-${glib_version}.tar.xz"
if ! lib_installed "glib-2.0" "$glib_version"
then
if [ ! -e "$file" ]
then
curl -L -O "$gnome_dl/glib/${glib_major}.${glib_minor}/$file"
fi
compile glib glib-${glib_version}.tar.xz glib-${glib_version}
fi
# Build json-glib
json_major=`echo $json_glib_version | cut -d. -f1`
json_minor=`echo $json_glib_version | cut -d. -f2`
file="json-glib-${json_glib_version}.tar.xz"
if ! lib_installed "json-glib-1.0" "$json_glib_version"
then
if [ ! -e "$file" ]
then
curl -L -O "$gnome_dl/json-glib/${json_major}.${json_minor}/$file"
fi
compile json-glib json-glib-${json_glib_version}.tar.xz json-glib-${json_glib_version}
fi
# Build check
# We need to build check as the check version in the OS used by travis isn't
# -pedantic safe.
if ! lib_installed "check" "${check_version}"
then
file="check-${check_version}.tar.gz"
if [ ! -e "$file" ]
then
curl -L -O "https://github.com/libcheck/check/releases/download/${check_version}/$file"
fi
compile check check-${check_version}.tar.gz check-${check_version}
fi
cmd="bats"
if ! cmd_installed "$cmd"
then
# Install bats
[ ! -d bats ] && git clone https://github.com/sstephenson/bats.git
pushd bats
sudo ./install.sh "$prefix_dir"
popd
fi
if [ "$nested" != "Y" ]
then
popd
exit 0
fi
cmd="gcc"
if ! cmd_installed "$cmd"
then
# build gcc (required for qemu-lite)
gcc_dir="gcc-${gcc_version}"
gcc_site="http://mirrors.kernel.org/gnu/gcc/${gcc_dir}"
gcc_file="gcc-${gcc_version}.tar.bz2"
gcc_url="${gcc_site}/${gcc_file}"
if [ ! -e "$gcc_file" ]
then
curl -L -O "$gcc_url"
fi
gcc_opts=""
gcc_opts+=" --enable-languages=c"
gcc_opts+=" --disable-multilib"
gcc_opts+=" --disable-libstdcxx"
gcc_opts+=" --disable-bootstrap"
gcc_opts+=" --disable-nls"
gcc_opts+=" --prefix=\"${prefix_dir}\""
compile gcc "$gcc_file" "$gcc_dir" "$gcc_opts"
fi
# Use built version of gcc
export CC="${prefix_dir}/bin/gcc"
# build qemu-lite
cmd="qemu-system-x86_64"
if ! cmd_installed "$cmd"
then
qemu_lite_site="https://github.com/01org/qemu-lite/archive/"
qemu_lite_file="${qemu_lite_version}.tar.gz"
qemu_lite_url="${qemu_lite_site}/${qemu_lite_file}"
qemu_lite_dir="qemu-lite-${qemu_lite_version}"
qemu_lite_opts=""
qemu_lite_opts+=" --disable-bluez"
qemu_lite_opts+=" --disable-brlapi"
qemu_lite_opts+=" --disable-bzip2"
qemu_lite_opts+=" --disable-curl"
qemu_lite_opts+=" --disable-curses"
qemu_lite_opts+=" --disable-debug-tcg"
qemu_lite_opts+=" --disable-fdt"
qemu_lite_opts+=" --disable-glusterfs"
qemu_lite_opts+=" --disable-gtk"
qemu_lite_opts+=" --disable-libiscsi"
qemu_lite_opts+=" --disable-libnfs"
qemu_lite_opts+=" --disable-libssh2"
qemu_lite_opts+=" --disable-libusb"
qemu_lite_opts+=" --disable-linux-aio"
qemu_lite_opts+=" --disable-lzo"
qemu_lite_opts+=" --disable-opengl"
qemu_lite_opts+=" --disable-qom-cast-debug"
qemu_lite_opts+=" --disable-rbd"
qemu_lite_opts+=" --disable-rdma"
qemu_lite_opts+=" --disable-sdl"
qemu_lite_opts+=" --disable-seccomp"
qemu_lite_opts+=" --disable-slirp"
qemu_lite_opts+=" --disable-snappy"
qemu_lite_opts+=" --disable-spice"
qemu_lite_opts+=" --disable-strip"
qemu_lite_opts+=" --disable-tcg-interpreter"
qemu_lite_opts+=" --disable-tcmalloc"
qemu_lite_opts+=" --disable-tools"
qemu_lite_opts+=" --disable-tpm"
qemu_lite_opts+=" --disable-usb-redir"
qemu_lite_opts+=" --disable-uuid"
qemu_lite_opts+=" --disable-vnc"
qemu_lite_opts+=" --disable-vnc-{jpeg,png,sasl}"
qemu_lite_opts+=" --disable-vte"
qemu_lite_opts+=" --disable-xen"
qemu_lite_opts+=" --enable-attr"
qemu_lite_opts+=" --enable-cap-ng"
qemu_lite_opts+=" --enable-kvm"
qemu_lite_opts+=" --enable-virtfs"
qemu_lite_opts+=" --enable-vhost-net"
qemu_lite_opts+=" --target-list=x86_64-softmmu"
qemu_lite_opts+=" --extra-cflags=\"-fno-semantic-interposition -O3 -falign-functions=32\""
qemu_lite_opts+=" --prefix=\"${prefix_dir}\""
qemu_lite_opts+=" --datadir=\"${prefix_dir}/share/qemu-lite\""
qemu_lite_opts+=" --libdir=\"${prefix_dir}/lib64/qemu-lite\""
qemu_lite_opts+=" --libexecdir=\"${prefix_dir}/libexec/qemu-lite\""
if [ ! -e "$qemu_lite_file" ]
then
curl -L -O "${qemu_lite_url}"
fi
compile qemu-lite "$qemu_lite_file" \
"$qemu_lite_dir" "$qemu_lite_opts"
fi
# install kernel + Clear Containers image
mkdir -p assets
pushd assets
clr_dl_site="https://download.clearlinux.org"
clr_release=$(curl -L "${clr_dl_site}/latest")
clr_kernel_base_url="${clr_dl_site}/releases/${clr_release}/clear/x86_64/os/Packages"
sudo mkdir -p "$clr_assets_dir"
# find newest containers kernel
clr_kernel=$(curl -l -s -L "${clr_kernel_base_url}" |\
grep -o "linux-container-[0-9][0-9.-]*\.x86_64.rpm" |\
sort -u)
# download kernel
if [ ! -e "${clr_assets_dir}/${clr_kernel}" ]
then
if [ ! -e "$clr_kernel" ]
then
curl -L -O "${clr_kernel_base_url}/${clr_kernel}"
fi
# install kernel
# (note: cpio on trusty does not support "-D")
rpm2cpio "${clr_kernel}"| (cd / && sudo cpio -idv)
fi
clr_image_url="${clr_dl_site}/current/clear-${clr_release}-containers.img.xz"
clr_image_compressed=$(basename "$clr_image_url")
# uncompressed image name
clr_image=${clr_image_compressed/.xz/}
# download image
if [ ! -e "${clr_assets_dir}/${clr_image}" ]
then
for file in "${clr_image_url}-SHA512SUMS" "${clr_image_url}"
do
[ ! -e "$file" ] && curl -L -O "$file"
done
# verify image
checksum_file="${clr_image_compressed}-SHA512SUMS"
sha512sum -c "${checksum_file}"
# unpack image
unxz --force "${clr_image_compressed}"
# install image
sudo install "${clr_image}" "${clr_assets_dir}"
rm -f "${checksum_file}" "${clr_image}" "${clr_image_compressed}"
fi
# change kernel+image ownership
sudo chown -R "$USER" "${clr_assets_dir}"
# create image symlink (kernel will already have one)
clr_image_link=clear-containers.img
sudo rm -f "${clr_assets_dir}/${clr_image_link}"
(cd "${clr_assets_dir}" && sudo ln -s "${clr_image}" "${clr_image_link}")
popd
popd
if [ "$SEMAPHORE" = true ]
then
distro=$(lsb_release -c|awk '{print $2}' || :)
if [ "$distro" = trusty ]
then
# Configure docker to use the runtime
docker_opts=""
docker_opts+=" --add-runtime cor=cc-oci-runtime"
docker_opts+=" --default-runtime=cor"
sudo initctl stop docker
# Remove first as apt-get doesn't like downgrading this package
# on trusty.
sudo apt-get -qq purge docker-engine
sudo apt-get -qq install \
docker-engine="$docker_engine_semaphoreci_ubuntu_version"
echo "DOCKER_OPTS=\"$docker_opts\"" |\
sudo tee -a /etc/default/docker
sudo initctl restart docker
else
echo "ERROR: unhandled Semaphore distro: $distro"
exit 1
fi
fi
|
sameo/cc-oci-runtime
|
.ci/ci-setup.sh
|
Shell
|
gpl-2.0
| 11,422 |
#!/bin/sh
#
# Cloud Hook: Reinstall Headless Lightning
#
# Run `drush site-install headless_lightning` in the target environment.
site="$1"
target_env="$2"
# Fresh install of Headless Lightning.
drush @$site.$target_env site-install headless_lightning --yes --account-pass=admin --site-name='Headless Lightning - Nightly Build'
drush @$site.$target_env pm-enable api_test --yes
drush @$site.$target_env config-set simple_oauth.settings public_key /home/headlessnightly/5b5cbb3034b52b0208eb5055624de07a64e2bbfca5b61d33f074d8d2074fb4fa.key --yes
drush @$site.$target_env config-set simple_oauth.settings private_key /home/headlessnightly/57050ee7319509e25c53e3954e119abe654a2c0519634e7b19e4b7cfdf8e25c5.key --yes
|
balsama/headless-lightning
|
hooks/common/post-code-update/reinstall.sh
|
Shell
|
gpl-2.0
| 713 |
#!/bin/sh
#
# Copyright (c) 2016 Fujitsu Ltd.
# Author: Xiao Yang <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
#
# Test wc command with some basic options.
#
TST_CNT=12
TST_SETUP=setup
TST_TESTFUNC=do_test
TST_NEEDS_TMPDIR=1
TST_NEEDS_CMDS="wc"
. tst_test.sh
setup()
{
echo "hello world" > ltp_wc
echo "This is a test" >> ltp_wc
}
wc_test()
{
local wc_opt=$1
local wc_file=$2
local std_out=$3
local wc_cmd="wc $wc_opt $wc_file"
eval $wc_cmd > temp 2>&1
if [ $? -ne 0 ]; then
grep -q -E "unknown option|invalid option" temp
if [ $? -eq 0 ]; then
tst_res TCONF "$wc_cmd not supported."
else
tst_res TFAIL "$wc_cmd failed."
fi
return
fi
if [ $# -gt 1 ]; then
local act_out=`cat temp | awk '{printf $1}'`
if [ $act_out -ne $std_out ]; then
tst_res TFAIL "$wc_cmd got mismatched data."
return
fi
fi
tst_res TPASS "wc passed with $wc_opt option."
}
do_test()
{
case $1 in
1) wc_test "-c" ltp_wc 27;;
2) wc_test "--bytes" ltp_wc 27;;
3) wc_test "-l" ltp_wc 2;;
4) wc_test "--lines" ltp_wc 2;;
5) wc_test "-L" ltp_wc 14;;
6) wc_test "--max-line-length" ltp_wc 14;;
7) wc_test "-w" ltp_wc 6;;
8) wc_test "--words" ltp_wc 6;;
9) wc_test "-m" ltp_wc 27;;
10) wc_test "--chars" ltp_wc 27;;
11) wc_test "--help";;
12) wc_test "--version";;
esac
}
tst_run
|
richiejp/ltp
|
testcases/commands/wc/wc01.sh
|
Shell
|
gpl-2.0
| 1,780 |
#!/usr/bin/env bash
########this script run ccs_anat_preproc###########
#there are three inputs
# The first step of this script is to run on bash will move to python eventually
# 1.CCS_DIR
# 2.SUBJECTS_DIR
# 3.subject
######################################################
#set dirs
CCS_DIR=$1
SUBJECTS_DIR=$2
subject=$3
anat_dir=${CCS_DIR}/${subject}/anat
reg_dir=${anat_dir}/reg
seg_dir=${anat_dir}/segment
mkdir -p ${reg_dir} ${seg_dir}
if [ $# -lt 3 ];
then
echo -e "\033[47;35m Usage: $0 CCS_DIR SUBJECTS_DIR subject \033[0m"
exit
fi
#generate copy brainmask
#copy orig.migz
if [ ! -f ${anat_dir}/T1_crop_sanlm_fs.nii.gz ]
then
mri_convert -it mgz ${SUBJECTS_DIR}/${subject}/mri/orig.mgz -ot nii ${anat_dir}/T1_crop_sanlm_fs.nii.gz
fi
if [ ! -f ${seg_dir}/brainmask.nii.gz ]
then
mri_convert -it mgz ${SUBJECTS_DIR}/${subject}/mri/brainmask.mgz -ot nii ${seg_dir}/brainmask.nii.gz
fi
## 1. Prepare anatomical images
if [ -f ${reg_dir}/highres_head.nii.gz ]
then
rm -v ${reg_dir}/highres_head.nii.gz
fi
mv ${anat_dir}/T1_crop_sanlm_fs.nii.gz ${reg_dir}/highres_head.nii.gz
fslmaths ${seg_dir}/brainmask.nii.gz -thr 2 ${seg_dir}/brainmask.nii.gz #clean voxels manually edited in freesurfer (assigned value 1)
fslmaths ${reg_dir}/highres_head.nii.gz -mas ${seg_dir}/brainmask.nii.gz ${reg_dir}/highres.nii.gz
cd ${reg_dir}
## 1. copy standard (We provide two reg pipelines: FSL and Freesurfer, the latter was done in Recon-all automatically)
standard_head=${FSLDIR}/data/standard/MNI152_T1_2mm.nii.gz
standard=${FSLDIR}/data/standard/MNI152_T1_2mm_brain.nii.gz
standard_mask=${FSLDIR}/data/standard/MNI152_T1_2mm_brain_mask_dil.nii.gz
## 2. FLIRT T1->STANDARD
echo "########################## Performing FLIRT T1 -> STANDARD #################################"
fslreorient2std highres.nii.gz highres_rpi.nii.gz
fslreorient2std highres_head.nii.gz highres_head_rpi.nii.gz # not used, just test for future use
flirt -ref ${standard} -in highres_rpi -out highres_rpi2standard -omat highres_rpi2standard.mat -cost corratio -searchcost corratio -dof 12 -interp trilinear
## 3. Create mat file for conversion from standard to high res
fslreorient2std highres.nii.gz > reorient2rpi.mat
convert_xfm -omat highres2standard.mat -concat highres_rpi2standard.mat reorient2rpi.mat
convert_xfm -inverse -omat standard2highres.mat highres2standard.mat
## 3. FNIRT
echo "########################## Performing nolinear registration ... #################################"
fnirt --in=highres_head --aff=highres2standard.mat --cout=highres2standard_warp --iout=fnirt_highres2standard --jout=highres2standard_jac --config=T1_2_MNI152_2mm --ref=${standard_head} --refmask=${standard_mask} --warpres=10,10,10 > warnings.fnirt
if [ -s ${reg_dir}/warnings.fnirt ]
then
mv fnirt_highres2standard.nii.gz fnirt_highres2standard_wres10.nii.gz
fnirt --in=highres_head --aff=highres2standard.mat --cout=highres2standard_warp --iout=fnirt_highres2standard --jout=highres2standard_jac --config=T1_2_MNI152_2mm --ref=${standard_head} --refmask=${standard_mask} --warpres=20,20,20
else
rm -v warnings.fnirt
fi
cd ${cwd}
|
zuoxinian/CCS
|
samplesScripts/ccs_anat_03_postfs.sh
|
Shell
|
gpl-2.0
| 3,119 |
#!/bin/bash
if [ -z "$BUILD_PATH" ]
then
$BUILD_PATH="$IONIC_PATH"/build
echo "No BUILD_PATH given. Using $BUILD_PATH..."
fi
if [ -z "$INTERMEDIATE_PATH" ]
then
echo "No INTERMEDIATE_PATH given..."
exit
fi
if [ -d "${INTERMEDIATE_PATH}/apps" ];
then
echo "${INTERMEDIATE_PATH}/apps path exists";
else
echo "${INTERMEDIATE_PATH}/apps path not found!";
exit
fi
echo "Generating images for ${LOWERCASE_APP_NAME} Chrome at ${PWD}..."
convert resources/icon_transparent.png -resize 700x700 www/img/icons/icon_700.png
convert resources/icon_transparent.png -resize 16x16 www/img/icons/icon_16.png
convert resources/icon_transparent.png -resize 48x48 www/img/icons/icon_48.png
convert resources/icon_transparent.png -resize 128x128 www/img/icons/icon_128.png
echo -e "${GREEN}Copying www folder into app and extension${NC}"
mkdir -p "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_app/www"
mkdir -p "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_extension/www"
cp -R ${INTERMEDIATE_PATH}/resources/chrome_app/* "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_app/"
cp -R ${INTERMEDIATE_PATH}/resources/chrome_extension/* "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_extension/"
#rsync -aP --exclude=build/ --exclude=.git/ ${INTERMEDIATE_PATH}/www/* "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_app/www/"
#rsync -aP --exclude=build/ --exclude=.git/ ${INTERMEDIATE_PATH}/www/* "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_extension/www/"
cp -R ${INTERMEDIATE_PATH}/www/* "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_app/www/"
cp -R ${INTERMEDIATE_PATH}/www/* "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_extension/www/"
rm -rf "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_extension/www/lib/phonegap-facebook-plugin/platforms/android"
rm -rf "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_extension/www/lib/phonegap-facebook-plugin/platforms/ios"
cd "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_extension" && zip -r "${BUILD_PATH}/${LOWERCASE_APP_NAME}/${LOWERCASE_APP_NAME}-Chrome-Extension.zip" * >/dev/null
rm -rf "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_extension"
cp "${BUILD_PATH}/${LOWERCASE_APP_NAME}/${LOWERCASE_APP_NAME}-Chrome-Extension.zip" "$DROPBOX_PATH/QuantiModo/apps/"
echo "${LOWERCASE_APP_NAME} Chrome extension is ready"
rm -rf "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_app/www/lib/phonegap-facebook-plugin/platforms/android"
rm -rf "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_app/www/lib/phonegap-facebook-plugin/platforms/ios"
cd "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_app" && zip -r "${BUILD_PATH}/${LOWERCASE_APP_NAME}/${LOWERCASE_APP_NAME}-Chrome-App.zip" * >/dev/null
rm -rf "${BUILD_PATH}/${LOWERCASE_APP_NAME}/chrome_app"
cp "${BUILD_PATH}/${LOWERCASE_APP_NAME}/${LOWERCASE_APP_NAME}-Chrome-App.zip" "$DROPBOX_PATH/QuantiModo/apps/${LOWERCASE_APP_NAME}/"
echo "${LOWERCASE_APP_NAME} Chrome app is ready"
#mkdir "$DROPBOX_PATH/QuantiModo/apps/$LOWERCASE_APP_NAME" || true
#echo -e "${GREEN}Copying ${BUILD_PATH}/${LOWERCASE_APP_NAME} to $DROPBOX_PATH/QuantiModo/apps/${LOWERCASE_APP_NAME}/${NC}"
#cp -R ${BUILD_PATH}/${LOWERCASE_APP_NAME}/* "$DROPBOX_PATH/QuantiModo/apps/${LOWERCASE_APP_NAME}/"
#rsync ${BUILD_PATH}/${LOWERCASE_APP_NAME}/* "$DROPBOX_PATH/QuantiModo/apps/${LOWERCASE_APP_NAME}/"
|
lVBY/quanti
|
scripts/build_scripts/02_build_chrome.sh
|
Shell
|
gpl-2.0
| 3,266 |
#!/bin/bash
source /usr/local/rvm/scripts/rvm
cd /var/alchemy
bundle exec rake assets:precompile db:migrate db:seed RAILS_ENV=development
|
kurtmc/alchemy-internal-website
|
initialise-database-dev.sh
|
Shell
|
gpl-2.0
| 138 |
# Copyright (c) 2019, Google Inc. All rights reserved.
# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#!/bin/sh
#
# @test testtls.sh
# @summary Test with extra TLS size.
# @requires os.family == "linux"
# @compile T.java
# @run shell testtls.sh
#
if [ "${TESTSRC}" = "" ]
then
TESTSRC=${PWD}
echo "TESTSRC not set. Using "${TESTSRC}" as default"
fi
echo "TESTSRC=${TESTSRC}"
## Adding common setup Variables for running shell tests.
. ${TESTSRC}/../../test_env.sh
LD_LIBRARY_PATH=.:${TESTJAVA}/lib/${VM_TYPE}:/usr/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH
# Test 1) Run with stack size adjusted for TLS
${TESTNATIVEPATH}/stack-tls -add_tls || exit $?
# Test 2) Run with no stack size adjustment and expect failure.
#
# Potential failures include StackOverflowError, thread creation failures,
# crashes, and etc. The test case can be used to demonstrate the TLS issue
# but is excluded from running in regular testing.
#${TESTNATIVEPATH}/stack-tls || exit $?
|
md-5/jdk10
|
test/hotspot/jtreg/runtime/TLS/testtls.sh
|
Shell
|
gpl-2.0
| 1,947 |
# shell-script-functions.sh
#
# shell script functions for Relax-and-Recover
#
# This file is part of Relax-and-Recover, licensed under the GNU General
# Public License. Refer to the included COPYING for full text of license.
#
# convert tabs into 4 spaces with: expand --tabs=4 file >new-file
# source a file given in $1
function Source () {
local source_file="$1"
local source_return_code=0
# Skip if source file name is empty:
if test -z "$source_file" ; then
Debug "Skipping Source() because it was called with empty source file name"
return
fi
# Ensure source file is not a directory:
test -d "$source_file" && Error "Source file '$source_file' is a directory, cannot source"
# Skip if source file does not exist of if its content is empty:
if ! test -s "$source_file" ; then
Debug "Skipping Source() because source file '$source_file' not found or empty"
return
fi
# Clip leading standard path to rear files (usually /usr/share/rear/):
local relname="${source_file##$SHARE_DIR/}"
# Simulate sourcing the scripts in $SHARE_DIR
if test "$SIMULATE" && expr "$source_file" : "$SHARE_DIR" >/dev/null; then
LogPrint "Source $relname"
return
fi
# Step-by-step mode or breakpoint if needed
# Usage of the external variable BREAKPOINT: sudo BREAKPOINT="*foo*" rear mkrescue
# an empty default value is set to avoid 'set -eu' error exit if BREAKPOINT is unset:
: ${BREAKPOINT:=}
if [[ "$STEPBYSTEP" || ( "$BREAKPOINT" && "$relname" == "$BREAKPOINT" ) ]] ; then
# Use the original STDIN STDOUT and STDERR when 'rear' was launched by the user
# to get input from the user and to show output to the user (cf. _input-output-functions.sh):
read -p "Press ENTER to include '$source_file' ... " 0<&6 1>&7 2>&8
fi
Log "Including $relname"
# DEBUGSCRIPTS mode settings:
if test "$DEBUGSCRIPTS" ; then
Debug "Entering debugscripts mode via 'set -$DEBUGSCRIPTS_ARGUMENT'."
local saved_bash_flags_and_options_commands="$( get_bash_flags_and_options_commands )"
set -$DEBUGSCRIPTS_ARGUMENT
fi
# The actual work (source the source file):
# Do not error out here when 'source' fails (i.e. when 'source' returns a non-zero exit code)
# because scripts usually return the exit code of their last command
# cf. https://github.com/rear/rear/issues/1965#issuecomment-439330017
# and in general ReaR should not error out in a (helper) function but instead
# a function should return an error code so that its caller can decide what to do
# cf. https://github.com/rear/rear/pull/1418#issuecomment-316004608
source "$source_file"
source_return_code=$?
test "0" -eq "$source_return_code" || Debug "Source function: 'source $source_file' returns $source_return_code"
# Undo DEBUGSCRIPTS mode settings:
if test "$DEBUGSCRIPTS" ; then
Debug "Leaving debugscripts mode (back to previous bash flags and options settings)."
# The only known way how to do 'set +x' after 'set -x' without 'set -x' output for the 'set +x' call
# is a current shell environment where stderr is redirected to /dev/null before 'set +x' is run via
# { set +x ; } 2>/dev/null
# here we avoid much useless 'set -x' debug output for the apply_bash_flags_and_options_commands call:
{ apply_bash_flags_and_options_commands "$saved_bash_flags_and_options_commands" ; } 2>/dev/null
fi
# Breakpoint if needed:
if [[ "$BREAKPOINT" && "$relname" == "$BREAKPOINT" ]] ; then
# Use the original STDIN STDOUT and STDERR when 'rear' was launched by the user
# to get input from the user and to show output to the user (cf. _input-output-functions.sh):
read -p "Press ENTER to continue ... " 0<&6 1>&7 2>&8
fi
# Return the return value of the actual work (source the source file):
return $source_return_code
}
# Collect scripts given in the stage directory $1
# therein in the standard subdirectories and
# sort them by their script file name and
# Source() the scripts one by one:
function SourceStage () {
local stage="$1"
local start_SourceStage=$SECONDS
Log "======================"
Log "Running '$stage' stage"
Log "======================"
# We always source scripts in the same subdirectory structure.
# The {...,...,...} way of writing it is a shell shortcut that expands as intended.
# The sed pipe is used to sort the scripts by their 3-digit number independent of the directory depth of the script.
# Basically sed inserts a ! before and after the number which makes the number field nr. 2
# when dividing lines into fields by ! so that the subsequent sort can sort by that field.
# The final tr removes the ! to restore the original script name.
# That code would break if ! is used in a directory name of the ReaR subdirectory structure
# but those directories below ReaR's $SHARE_DIR/$stage directory are not named by the user
# so that it even works when a user runs a git clone in his .../ReaRtest!/ directory.
local scripts=( $( cd $SHARE_DIR/$stage
ls -d {default,"$ARCH","$OS","$OS_MASTER_VENDOR","$OS_MASTER_VENDOR_ARCH","$OS_MASTER_VENDOR_VERSION","$OS_VENDOR","$OS_VENDOR_ARCH","$OS_VENDOR_VERSION"}/*.sh \
"$BACKUP"/{default,"$ARCH","$OS","$OS_MASTER_VENDOR","$OS_MASTER_VENDOR_ARCH","$OS_MASTER_VENDOR_VERSION","$OS_VENDOR","$OS_VENDOR_ARCH","$OS_VENDOR_VERSION"}/*.sh \
"$OUTPUT"/{default,"$ARCH","$OS","$OS_MASTER_VENDOR","$OS_MASTER_VENDOR_ARCH","$OS_MASTER_VENDOR_VERSION","$OS_VENDOR","$OS_VENDOR_ARCH","$OS_VENDOR_VERSION"}/*.sh \
"$OUTPUT"/"$BACKUP"/{default,"$ARCH","$OS","$OS_MASTER_VENDOR","$OS_MASTER_VENDOR_ARCH","$OS_MASTER_VENDOR_VERSION","$OS_VENDOR","$OS_VENDOR_ARCH","$OS_VENDOR_VERSION"}/*.sh \
| sed -e 's#/\([0-9][0-9][0-9]\)_#/!\1!_#g' | sort -t \! -k 2 | tr -d \! ) )
# If no script is found, then the scripts array contains only one element '.'
if test "$scripts" = '.' ; then
Log "Finished running empty '$stage' stage"
return 0
fi
# Source() the scripts one by one:
local script=''
for script in "${scripts[@]}" ; do
# Tell the user about unexpected named scripts.
# All sripts must be named with a leading three-digit number NNN_something.sh
# otherwise the above sorting by the 3-digit number may not work as intended
# so that sripts without leading 3-digit number are likely run in wrong order:
grep -q '^[0-9][0-9][0-9]_' <<< $( basename $script ) || LogPrintError "Script '$script' without leading 3-digit number 'NNN_' is likely run in wrong order"
Source $SHARE_DIR/$stage/"$script"
done
Log "Finished running '$stage' stage in $(( SECONDS - start_SourceStage )) seconds"
}
function cleanup_build_area_and_end_program () {
# Cleanup build area
Log "Finished in $((SECONDS-STARTTIME)) seconds"
if test "$KEEP_BUILD_DIR" ; then
LogPrint "You should also rm -Rf $BUILD_DIR"
else
Log "Removing build area $BUILD_DIR"
rm -Rf $TMP_DIR
rm -Rf $ROOTFS_DIR
# line below put in comment due to issue #465
#rm -Rf $BUILD_DIR/outputfs
# in worst case it could not umount; so before remove the BUILD_DIR check if above outputfs is gone
mount | grep -q "$BUILD_DIR/outputfs"
if [[ $? -eq 0 ]]; then
# still mounted it seems
LogPrint "Directory $BUILD_DIR/outputfs still mounted - trying lazy umount"
sleep 2
umount -f -l $BUILD_DIR/outputfs >&2
rm -Rf $v $BUILD_DIR/outputfs >&2
else
# not mounted so we can safely delete $BUILD_DIR/outputfs
rm -Rf $BUILD_DIR/outputfs
fi
rm -Rf $v $BUILD_DIR >&2
fi
Log "End of program reached"
}
|
phracek/rear
|
usr/share/rear/lib/framework-functions.sh
|
Shell
|
gpl-3.0
| 7,972 |
FILES="\
favicon.ico \
fuego-screenshot.html \
fuego-screenshot.jpg \
fuego-screenshot-thumb.jpg \
index.html \
"
echo "Enter user id for SourceForge:"
read NAME
scp $FILES $NAME,[email protected]:/home/groups/f/fu/fuego/htdocs
|
yotomyoto/fuego
|
website/copy.sh
|
Shell
|
gpl-3.0
| 238 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-medianmemory_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::medianmemory_0:1.0 -N ID0000005 -R condorpool -L example_workflow -T 2016-12-06T17:21:35+00:00 ./example_workflow-medianmemory_0-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 1B/logs/w-09-A/20161206T172135+0000/00/00/medianmemory_0_ID0000005.sh
|
Shell
|
gpl-3.0
| 1,246 |
#!/usr/bin/env bash
apt-get update -y
apt-get install -y software-properties-common python-software-properties
add-apt-repository ppa:fkrull/deadsnakes
echo deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main > /etc/apt/sources.list.d/pgdg.list
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
apt-get update -y
|
berylTechnologies/redcap
|
.provision/scripts/repo_import_and_update.sh
|
Shell
|
gpl-3.0
| 364 |
#!/usr/bin/env bash
# This file is part of The RetroPie Project
#
# The RetroPie Project is the legal property of its developers, whose names are
# too numerous to list here. Please refer to the COPYRIGHT.md file distributed with this source.
#
# See the LICENSE.md file at the top-level directory of this distribution and
# at https://raw.githubusercontent.com/RetroPie/RetroPie-Setup/master/LICENSE.md
#
rp_module_id="rpix86"
rp_module_desc="DOS Emulator rpix86"
rp_module_help="ROM Extensions: .bat .com .exe .sh\n\nCopy your DOS games to $romdir/pc"
rp_module_licence="FREEWARE http://rpix86.patrickaalto.com/rdown.html"
rp_module_section="opt"
rp_module_flags="!x86 !mali !kms"
function install_bin_rpix86() {
downloadAndExtract "$__archive_url/rpix86.tar.gz" "$md_inst"
# install 4DOS.com
downloadAndExtract "$__archive_url/4dos.zip" "$md_inst"
patchVendorGraphics "$md_inst/rpix86"
}
function configure_rpix86() {
mkRomDir "pc"
rm -f "$romdir/pc/Start rpix86.sh" "$romdir/pc/+Start.txt"
cat > "$romdir/pc/+Start rpix86.sh" << _EOF_
#!/bin/bash
params=("\$@")
pushd "$md_inst"
if [[ "\${params[0]}" == *.sh ]]; then
bash "\${params[@]}"
else
./rpix86 -a0 -f2 "\${params[@]}"
fi
popd
_EOF_
chmod +x "$romdir/pc/+Start rpix86.sh"
chown $user:$user "$romdir/pc/+Start rpix86.sh"
ln -sfn "$romdir/pc" games
addEmulator 0 "$md_id" "pc" "bash $romdir/pc/+Start\ rpix86.sh %ROM%"
addSystem "pc"
}
|
HerbFargus/RetroPie-Setup
|
scriptmodules/emulators/rpix86.sh
|
Shell
|
gpl-3.0
| 1,458 |
sh tulip_run_test.sh import_grid_approximation
|
jukkes/TulipProject
|
unit_test/gui/run_import_grid_approximation_test.sh
|
Shell
|
gpl-3.0
| 48 |
#!/bin/bash
set -euf -o pipefail
build_deps=(
gcc
build-essential
pkg-config
git
openssl
ca-certificates
libglib2.0-dev
libfdt-dev
zlib1g-dev
libbz2-dev
libcurl4-gnutls-dev
libpixman-1-dev
librbd-dev
librdmacm-dev
libsasl2-dev
libsnappy-dev
libssh2-1-dev
liblzo2-dev
xfslibs-dev
)
deps=(
python
libglib2.0-0
libfdt1
zlib1g
libbz2-1.0
libcurl3-gnutls
libpixman-1-0
librbd1
librdmacm1
libsasl2-2
libsnappy1v5
libssh2-1
liblzo2-2
)
build_deps_remove=$(comm -23 <(for dep in "${build_deps[@]}"; do echo "${dep}"; done | sort) <(dpkg -l | awk '{print $2}' | cut -f1 -d: | sort))
echo "Installing prereqs and build deps: ${build_deps[@]} ${deps[@]}"
apt-get update && apt-get install -y --no-install-recommends ${build_deps[@]} ${deps[@]}
# Create temporary directory for building
export TMPDIR=$(mktemp -d)
echo "getting qemu source"
cd "${TMPDIR}"
git clone https://github.com/qemu/qemu.git
cd qemu
git checkout v2.11.1
echo "building qemu"
mkdir build
cd build
../configure --enable-tools --disable-system
make
cp ./qemu-img /usr/local/bin/
echo "removing $TMPDIR"
cd
rm -rf ${TMPDIR}
echo "Removing build deps: ${build_deps_remove[@]}"
apt-get remove -y ${build_deps_remove[@]}
apt-get autoremove -y
echo "Clearing apt cache"
rm -rf /var/lib/apt/lists/*
|
wtsi-hgi/hgi-systems
|
docker/scripts/get-qemu-img.sh
|
Shell
|
gpl-3.0
| 1,393 |
#/bin/sh
rm mesh_connected_*
gmsh mesh_connected.geo -2
../../../scripts/gmsh2triangle mesh_connected.msh -2
../../../bin/fldecomp -n 4 -m triangle mesh_connected
|
FluidityProject/multifluids
|
tests/wetting_and_drying_balzano3_cg_parallel/mesh/makemesh_parallel.sh
|
Shell
|
lgpl-2.1
| 164 |
#!/bin/sh
valgrind --show-reachable=yes --tool=memcheck --leak-check=yes -v ./aio_client -k -S -c 1 -n 10
|
hankwing/Squirrel
|
library/acl/lib_acl_cpp/samples/ssl/aio_client/valgrind.sh
|
Shell
|
apache-2.0
| 107 |
#!/bin/bash
set -euo pipefail
. common.sh
export DOCKER_COMPOSE_SPEC=docker-compose-multinode-test.yml
if [ -f rundeck-launcher.war ] ; then
mv rundeck-launcher.war dockers/rundeck/data/
fi
if [ -f rd.deb ] ; then
mv rd.deb dockers/rundeck/data/
fi
build_rdtest_docker
# clean up docker env
docker-compose -f $DOCKER_COMPOSE_SPEC down --volumes --remove-orphans
set -e
# re-build docker env
docker-compose -f $DOCKER_COMPOSE_SPEC build
# run docker
docker-compose -f $DOCKER_COMPOSE_SPEC up -d
echo "up completed, running tests..."
set +e
docker-compose -f $DOCKER_COMPOSE_SPEC exec -T --user rundeck rundeck1 \
bash scripts/run_tests.sh /tests/rundeck /tests/run-tests.sh testproj1
EC=$?
echo "run_tests.sh finished with: $EC"
docker-compose -f $DOCKER_COMPOSE_SPEC logs
# Stop and clean all
docker-compose -f $DOCKER_COMPOSE_SPEC down --volumes --remove-orphans
exit $EC
|
variacode/rundeck
|
test/docker/test.sh
|
Shell
|
apache-2.0
| 893 |
#!/bin/bash
### Signal handlers ###
function handle_signal {
case "$1" in
TERM|INT|EXIT)
if [ -n "$CMD_PID" ]; then
kill "$CMD_PID" &>/dev/null
sleep 1
fi
echo "Exiting ..." >&2
exit 0
;;
*)
echo "Terminating abnormally" >&2
exit 1
;;
esac
}
function ignore_signal {
log "Caught signal $1 - ignored" >&2
}
trap "handle_signal TERM" "TERM"
trap "handle_signal INT" "INT"
trap "ignore_signal HUP" "HUP"
### Sleeper function ###
# $1 process PID
function wait_to_finish {
while true; do
sleep 1 &
PID=$!
if ! wait $PID ; then
kill $PID &>/dev/null
fi
if ! ps -p "$1" > /dev/null ; then # process not running anymore
break; # exit while loop
fi
done
}
### Arguments ###
# context variables are used in $CATALINA_HOME/conf/Catalina/localhost/ROOT.xml
if [ -z "$ENDPOINT" ] ; then
echo '$ENDPOINT not set'
exit 1
fi
if [ -z "$GRAPH_STORE" ] ; then
echo '$GRAPH_STORE not set'
exit 1
fi
if [ -z "$ONTOLOGY" ] ; then
echo '$ONTOLOGY not set'
exit 1
fi
# if user-defined location mapping exists, append it to system location mapping
if [ -f "$CUSTOM_LOCATION_MAPPING" ] ; then
cat "$CUSTOM_LOCATION_MAPPING" >> "$LOCATION_MAPPING"
cat "$LOCATION_MAPPING"
fi
# set Context variables (which are used in $CATALINA_HOME/conf/Catalina/localhost/ROOT.xml)
if [ -n "$ENDPOINT" ] ; then
ENDPOINT_PARAM="--stringparam sd:endpoint $ENDPOINT "
fi
if [ -n "$GRAPH_STORE" ] ; then
GRAPH_STORE_PARAM="--stringparam a:graphStore $GRAPH_STORE "
fi
if [ -n "$ONTOLOGY" ] ; then
ONTOLOGY_PARAM="--stringparam ldt:ontology $ONTOLOGY "
fi
if [ -n "$AUTH_USER" ] ; then
AUTH_USER_PARAM="--stringparam a:authUser $AUTH_USER "
fi
if [ -n "$AUTH_PWD" ] ; then
AUTH_PWD_PARAM="--stringparam a:authPwd $AUTH_PWD "
fi
if [ -n "$PREEMPTIVE_AUTH" ] ; then
PREEMPTIVE_AUTH_PARAM="--stringparam a:preemptiveAuth $PREEMPTIVE_AUTH "
fi
### Execution ###
# $CATALINA_HOME must be the WORKDIR at this point
transform="xsltproc \
--output conf/Catalina/localhost/ROOT.xml \
$ENDPOINT_PARAM \
$GRAPH_STORE_PARAM \
$ONTOLOGY_PARAM \
$AUTH_USER_PARAM \
$AUTH_PWD_PARAM \
$PREEMPTIVE_AUTH_PARAM \
conf/Catalina/localhost/context.xsl \
conf/Catalina/localhost/ROOT.xml"
eval "$transform"
# run Tomcat process in the background
if [ -z "$JPDA_ADDRESS" ] ; then
catalina.sh run &
else
catalina.sh jpda run &
fi
CMD_PID=$!
wait_to_finish $CMD_PID
|
Graphity/graphity-processor
|
entrypoint.sh
|
Shell
|
apache-2.0
| 2,591 |
#!/bin/sh
LANG="zh_CN.UTF-8";
#filename release_diff.sh src desc
trunk="/home/www/publish/trunk/php/";
#tag="[email protected]::publish/";
tag="[email protected]::publish/";
username="admin";
password="admin1234";
#set -x;
src="$trunk$1/";
desc="${tag}${2}/";
if [ $# -lt 2 ]; then
echo "$0 src desc";
exit 1;
fi;
#update
echo "svn up $1 ....."
#/usr/bin/svn cleanup ${trunk}$1;
/usr/bin/svn up --username=$username --password=$password ${trunk}$1;
#diff
echo "diff $1 $2 .....";
/usr/bin/rsync -avnrCtopg --progress --exclude-from=/etc/rsyncd/rsyncd_exclude --delete-excluded --password-file=/etc/rsyncd/rsyncd_web.pwd ${src} ${desc} | egrep -v '/$|files' |egrep -v '/.svn/'
#set +x;
exit 0;
|
libo2452/workspace
|
collection/manage/codeRelease/release_diff.sh
|
Shell
|
apache-2.0
| 709 |
#!/bin/sh
# Returns 0, with "jobid=<jobid>" on stdout, if job submitted.
# Returns 1, with multiline error message on stdout otherwise.
#
# This is invoked as:
# submit.sh url 'commandline'
# If we wanted to set scheduler options, we'd pass them in before the url as a
# single argument and give them to set_scheduler_options, then shift.
source $CIPRES_SCRIPT/job_wrapper.sh
URL=$1
shift
set_scheduler_options
cat > $RUNFILE << EOF
#!/bin/sh
#PBS -q $QUEUE
#PBS -N $JOBNAME
#PBS -l walltime=$RUNTIME:00
#PBS -o stdout.txt
#PBS -e stderr.txt
#PBS -V
#PBS -M $EMAIL
##PBS -m ae
#PBS -A $ACCOUNT
#PBS -d $JOBDIR
echo Job starting at `date` > start.txt
curl $URL\&status=START
$*
echo Job finished at `date` > done.txt
curl $URL\&status=DONE
EOF
submit_job
exit $?
|
SciGaP/DEPRECATED-Cipres-Airavata-POC
|
saminda/cipres-airavata/sdk/scripts/remote_resource/triton/submit.sh
|
Shell
|
apache-2.0
| 778 |
#!/bin/bash
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Restarts Hue
#parse command line arguments
parse_arguments()
{
# Test that we're using compatible getopt version.
getopt -T > /dev/null
if [[ $? -ne 4 ]]; then
echo "Incompatible getopt version."
exit 1
fi
# Parse short and long option parameters.
CM_HOSTNAME="localhost"
CM_PORT="7180"
CM_HTTP="http"
CM_API="v11"
CM_USERNAME="admin"
CM_PASSWORD_INPUT=
ENCODE_LOCATION=/var/lib/hue
GETOPT=`getopt -n $0 -o c:,p:,u:,w:,n,s,l:,h \
-l cmhost:,cmport:,cmuser:,cmpass:,newpass,ssl,encodeloc:,help \
-- "$@"`
eval set -- "$GETOPT"
while true;
do
case "$1" in
-c|--cmhost)
CM_HOSTNAME=$2
shift 2
;;
-p|--cmport)
CM_PORT=$2
shift 2
;;
-u|--cmuser)
CM_USERNAME=$2
shift 2
;;
-w|--cmpass)
CM_PASSWORD_INPUT=$2
shift 2
;;
-n|--newpass)
NEW_PASS=1
shift
;;
-s|--ssl)
CM_HTTP="https"
shift
;;
-l|--encodeloc)
ENCODE_LOCATION=$2
shift 2
;;
--)
shift
break
;;
*)
usage
exit 1
;;
esac
done
#
ENC_PASSWORD_FILE=${ENCODE_LOCATION}/`basename "$0" | awk -F\. '{print $1}'`.enc
}
usage() {
cat << EOF
usage: $0 [options]
Restarts Hue instances with high memory utilization through CM:
OPTIONS
-c|--cmhost <hostname> Host where CM is running - default localhost.
-p|--cmport <port> Port CM is running on - default ${CM_PORT}.
-u|--cmuser <cm_user> Admin User in CM - default admin.
-w|--cmpass <user_pass> Admin User password in CM, required on first run, no default. Will prompt
if not provided through this flag. Future runs will use
encrypted version in <enc_loc>/`basename "$0" | awk -F\. '{print $1}'`.enc
-s|--ssl Enable SSL.
-n|--newpass Prompt for a new password.
-l|--encodeloc <enc_loc> Location to store encoded password in file - default /var/lib/hue.
-v|--verbose Enable verbose logging.
-h|--help Show this message.
EOF
}
main() {
parse_arguments "$@"
if [[ ! ${USER} =~ .*root.* ]]
then
echo "Script must be run as root: exiting"
exit 1
fi
if [[ ! -d ${ENCODE_LOCATION} ]]
then
mkdir -p ${ENCODE_LOCATION}
fi
if [[ ! -z ${CM_PASSWORD_INPUT} ]]
then
echo ${CM_PASSWORD_INPUT} | base64 > ${ENC_PASSWORD_FILE}
chown root:root ${ENC_PASSWORD_FILE}
chmod 600 ${ENC_PASSWORD_FILE}
fi
if [[ -z ${CM_PASSWORD_INPUT} ]]
then
if [[ ! -f ${ENC_PASSWORD_FILE} ]] || [[ ! -z ${NEW_PASS} ]]
then
message "CM Admin user password required on first run"
read -s -p "Please enter password:" CM_PASSWORD_INPUT
echo "New password provided"
echo ${CM_PASSWORD_INPUT} | base64 > ${ENC_PASSWORD_FILE}
chown root:root ${ENC_PASSWORD_FILE}
chmod 600 ${ENC_PASSWORD_FILE}
fi
fi
if [[ ! -f ${ENC_PASSWORD_FILE} ]]
then
message "CM Admin password has not been provided and this is"
message "is first run of the script. Please run again and"
message "provide password."
exit 1
else
CM_PASSWORD=`cat ${ENC_PASSWORD_FILE} | base64 --decode`
fi
if [[ ${CM_HTTP} =~ .*https.* ]]
then
if [[ ${CM_PORT} =~ .*7180.* ]]
then
CM_PORT=7183
fi
fi
CLUSTERNAME=$(urlencode "$(curl -L -s -k -X GET -u ${CM_USERNAME}:${CM_PASSWORD} "${CM_HTTP}://${CM_HOSTNAME}:${CM_PORT}/api/${CM_API}/clusters" | grep '"name" :' | awk -F\" '{print $4}')")
SERVICENAME=$(urlencode "$(curl -L -s -k -X GET -u ${CM_USERNAME}:${CM_PASSWORD} "${CM_HTTP}://${CM_HOSTNAME}:${CM_PORT}/api/${CM_API}/clusters/${CLUSTERNAME}/services" | grep -B1 '"HUE"' | grep '"name" :' | awk -F\" '{print $4}')")
ROLES_JSON="{ \"items\" : [ \""
while read -r ROLE
do
ROLES_JSON="${ROLES_JSON}${ROLE}\",\""
done < <(curl -L -s -k -X GET -u ${CM_USERNAME}:${CM_PASSWORD} "${CM_HTTP}://${CM_HOSTNAME}:${CM_PORT}/api/${CM_API}/clusters/${CLUSTERNAME}/services/${SERVICENAME}/roles" | grep ${SERVICENAME}- | grep '"name" :' | awk -F\" '{print $4}')
ROLES_JSON=$(echo ${ROLES_JSON} | sed "s/,\"$/ ] }/g")
RESTART_API_URL="/api/${CM_API}/clusters/${CLUSTERNAME}/services/${SERVICENAME}/roleCommands/restart"
message "Restarting Hue process -u ${CM_USERNAME}:${CM_PASSWORD}: ${CM_HTTP}://${CM_HOSTNAME}:${CM_PORT}${RESTART_API_URL}: Roles: ${ROLES_JSON}"
RESULTS=`curl -s -X POST -u ${CM_USERNAME}:${CM_PASSWORD} -i -H "content-type:application/json" -d "${ROLES_JSON}" "${CM_HTTP}://${CM_HOSTNAME}:${CM_PORT}${RESTART_API_URL}"`
}
urlencode() {
# urlencode <string>
old_lc_collate=$LC_COLLATE
LC_COLLATE=C
local length="${#1}"
for (( i = 0; i < length; i++ )); do
local c="${1:i:1}"
case $c in
[a-zA-Z0-9.~_-]) printf "$c" ;;
*) printf '%%%02X' "'$c" ;;
esac
done
LC_COLLATE=$old_lc_collate
}
message()
{
echo "$1"
}
main "$@"
|
cloudera/hue
|
tools/ops/script_runner/hue_restart_cm.sh
|
Shell
|
apache-2.0
| 5,928 |
#
#/**
# * Copyright 2007 The Apache Software Foundation
# *
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
# Modelled after $HBASE_HOME/conf/hbase-env.sh.
# Set environment variables here.
# The java implementation to use. Java 1.6 required.
# export JAVA_HOME=/usr/java/jdk1.6.0/
# Extra Java CLASSPATH elements. Optional.
# export WASP_CLASSPATH=
# The maximum amount of heap to use, in MB. Default is 1000.
# export WASP_HEAPSIZE=1000
# Extra Java runtime options.
# Below are what we set by default. May only work with SUN JVM.
# For more on why as well as other possible settings,
# see http://wiki.apache.org/hadoop/PerformanceTuning
export WASP_OPTS="$WASP_OPTS -XX:+UseConcMarkSweepGC"
# Uncomment below to enable java garbage collection logging in the .out file.
# export WASP_OPTS="$WASP_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps $WASP_GC_OPTS"
# Uncomment below (along with above GC logging) to put GC information in its own logfile (will set WASP_GC_OPTS)
# export WASP_USE_GC_LOGFILE=true
# Uncomment and adjust to enable JMX exporting
# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
#
# export WASP_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
# export WASP_MASTER_OPTS="$WASP_MASTER_OPTS $WASP_JMX_BASE -Dcom.sun.management.jmxremote.port=10101"
# export WASP_FSERVER_OPTS="$WASP_FSERVER_OPTS $WASP_JMX_BASE -Dcom.sun.management.jmxremote.port=10102"
# export WASP_THRIFT_OPTS="$WASP_THRIFT_OPTS $WASP_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
# export WASP_ZOOKEEPER_OPTS="$WASP_ZOOKEEPER_OPTS $WASP_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
# File naming hosts on which FServers will run. $WASP_HOME/conf/fservers by default.
# export WASP_FSERVERS=${WASP_HOME}/conf/fservers
# Uncomment and adjust to keep all the FServer pages mapped to be memory resident
#WASP_FSERVER_MLOCK=true
#WASP_FSERVER_UID="wasp"
# File naming hosts on which backup HMaster will run. $WASP_HOME/conf/backup-masters by default.
# export WASP_BACKUP_MASTERS=${WASP_HOME}/conf/backup-masters
# Extra ssh options. Empty by default.
# export WASP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=WASP_CONF_DIR"
# Where log files are stored. $WASP_HOME/logs by default.
# export WASP_LOG_DIR=${WASP_HOME}/logs
# Enable remote JDWP debugging of major Wasp processes. Meant for Core Developers
# export WASP_MASTER_OPTS="$WASP_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070"
# export WASP_FSERVER_OPTS="$WASP_FSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071"
# export WASP_THRIFT_OPTS="$WASP_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072"
# export WASP_ZOOKEEPER_OPTS="$WASP_ZOOKEEPER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8073"
# A string representing this instance of wasp. $USER by default.
# export WASP_IDENT_STRING=$USER
# The scheduling priority for daemon processes. See 'man nice'.
# export WASP_NICENESS=10
# The directory where pid files are stored. /tmp by default.
# export WASP_PID_DIR=/var/hadoop/pids
# Seconds to sleep between slave commands. Unset by default. This
# can be useful in large clusters, where, e.g., slave rsyncs can
# otherwise arrive faster than the master can service them.
# export WASP_SLAVE_SLEEP=0.1
# Tell Wasp whether it should manage it's own instance of Zookeeper or not.
# export WASP_MANAGES_ZK=true
# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the
# RFA appender. Please refer to the log4j.properties file to see more details on this appender.
# In case one needs to do log rolling on a date change, one should set the environment property
# WASP_ROOT_LOGGER to "<DESIRED_LOG LEVEL>,DRFA".
# For example:
# WASP_ROOT_LOGGER=INFO,DRFA
# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as
# DRFA doesn't put any cap on the log size.
|
alibaba/wasp
|
conf/wasp-env.sh
|
Shell
|
apache-2.0
| 4,949 |
#!/bin/bash -e
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
source "$(dirname $0)/dev_common.sh"
SCRIPT_NAME="$0"
DEFAULT_STEPS=( file_type asf cpplint clang_format pylint python_format jnilint cppdocs mypy )
inplace_fix=0
function run_lint_step() {
validate_only=0
if [ "$1" == "--validate-only" ]; then
validate_only=1
shift
fi
case "$1" in
file_type)
cmd=( python3 tests/lint/check_file_type.py )
;;
asf)
cmd=( tests/lint/check_asf_header.sh --local )
if [ $inplace_fix -eq 1 ]; then
cmd=( "${cmd[@]}" --fix )
fi
;;
clang_format)
if [ $inplace_fix -eq 0 ]; then
cmd=( tests/lint/clang_format.sh )
else
# NOTE: need to run git status to update some docker-side cache. Otherwise,
# git-clang-format will fail with "The following files would be modified but have
# unstaged changes:"
cmd=( bash -c 'git status &>/dev/null && tests/lint/git-clang-format.sh -i origin/main' )
fi
;;
cpplint)
cmd=( tests/lint/cpplint.sh )
;;
flake8)
cmd=( tests/lint/flake8.sh )
;;
pylint)
cmd=( tests/lint/pylint.sh )
;;
python_format)
if [ $inplace_fix -eq 0 ]; then
cmd=( tests/lint/python_format.sh )
else
cmd=( tests/lint/git-black.sh -i origin/main )
fi
;;
jnilint)
cmd=( tests/lint/jnilint.sh )
;;
cppdocs)
cmd=( tests/lint/cppdocs.sh )
;;
mypy)
cmd=( tests/scripts/task_mypy.sh )
;;
*)
echo "error: don't know how to run lint step: $1" >&2
echo "usage: ${SCRIPT_NAME} [-i] <lint_step>" >&2
echo >&2
echo "options:" >&2
echo " -i Fix lint errors in-place where possible (modifies non-compliant files)" >&2
echo >&2
echo "available lint_step: ${DEFAULT_STEPS[@]}" >&2
exit 2
;;
esac
shift
if [ $validate_only -eq 0 ]; then
run_docker -it "ci_lint" "${cmd[@]}"
fi
}
if [ $# -eq 0 ]; then
# NOTE: matches order in tests/scripts/task_lint.sh
steps=( "${DEFAULT_STEPS[@]}" )
else
steps=( "$@" )
fi
validated_steps=( )
for step in "${steps[@]}"; do
if [ "${step}" == "-i" ]; then
inplace_fix=1
continue
fi
run_lint_step --validate-only "$step"
validated_steps=( "${validated_steps[@]}" "$step" )
done
for step in "${validated_steps[@]}"; do
run_lint_step "$step"
done
|
dmlc/tvm
|
docker/lint.sh
|
Shell
|
apache-2.0
| 3,542 |
#!/bin/bash
#
# Copyright 2005-2014 Red Hat, Inc.
#
# Red Hat licenses this file to you under the Apache License, version
# 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
#
# Discover the APP_BASE from the location of this script.
#
if [ -z "$APP_BASE" ] ; then
DIRNAME=`dirname "$0"`
APP_BASE=`cd "$DIRNAME"; pwd`
export APP_BASE
fi
echo "Running launcher in folder: $APP_BASE"
$APP_BASE/config.sh
$APP_BASE/bin/zkServer.sh start-foreground
|
davsclaus/jube
|
images/fabric8/zookeeper/src/main/distro/start.sh
|
Shell
|
apache-2.0
| 914 |
#!/bin/bash
# This is the automated release script
# guard against stupid
if [ -z "$1" ]; then
echo "You must specify a new version level: [patch, minor, major]";
exit 1;
fi
# make sure all our dependencies are installed so we can publish docs
npm install
# try to build to make sure we don't publish something really broken
npm run build
# bump the version
echo "npm version $1"
npm version $1
git push
git push --tags
# start from a clean state
rm -rf docs/ out/
mkdir out
# build the docs
npm run make-docs
VERSION=`ls docs/github-api`
# switch to gh-pages and add the docs
mv docs/github-api/* out/
rm -rf docs/
git checkout gh-pages
mv out/* docs/
echo $VERSION >> _data/versions.csv
git add .
git commit -m "adding docs for v$VERSION"
git push
|
github-tools/github
|
release.sh
|
Shell
|
bsd-3-clause
| 764 |
#!/bin/bash
#
# Preprocessor for 'less'. Used when this environment variable is set:
# LESSOPEN="|lesspipe %s"
# TODO: handle compressed files better
[[ -n ${LESSDEBUG} ]] && set -x
trap 'exit 0' PIPE
guesscompress() {
case "$1" in
*.gz|*.z) echo "gunzip -c" ;;
*.bz2|*.bz) echo "bunzip2 -c" ;;
*.lz) echo "lzip -dc" ;;
*.lzma) echo "unlzma -c" ;;
*.lzo) echo "lzop -dc" ;;
*.xz) echo "xzdec" ;;
*) echo "cat" ;;
esac
}
lesspipe_file() {
local out=$(file -L -- "$1")
local suffix
case ${out} in
*" 7-zip archive"*) suffix="7z";;
*" ar archive"*) suffix="a";;
*" CAB-Installer"*) suffix="cab";;
*" cpio archive"*) suffix="cpio";;
*" ELF "*) suffix="elf";;
*" LHa"*archive*) suffix="lha";;
*" troff "*) suffix="man";;
*" script text"*) suffix="sh";;
*" shared object"*) suffix="so";;
*" tar archive"*) suffix="tar";;
*" Zip archive"*) suffix="zip";;
*": data") hexdump -C -- "$1"; return 0;;
*) return 1;;
esac
lesspipe "$1" ".${suffix}"
return 0
}
lesspipe() {
local match=$2
[[ -z ${match} ]] && match=$1
local DECOMPRESSOR=$(guesscompress "${match}")
# User filters
if [[ -x ~/.lessfilter ]] ; then
~/.lessfilter "$1" && exit 0
fi
local ignore
for ignore in ${LESSIGNORE} ; do
[[ ${match} == *.${ignore} ]] && exit 0
done
case "${match}" in
### Doc files ###
*.[0-9n]|*.man|\
*.[0-9n].bz2|*.man.bz2|\
*.[0-9n].gz|*.man.gz|\
*.[0-9n].lzma|*.man.lzma|\
*.[0-9n].xz|*.man.xz|\
*.[0-9][a-z].gz|*.[0-9][a-z].gz)
local out=$(${DECOMPRESSOR} -- "$1" | file -)
case ${out} in
*troff*)
# Need to make sure we pass path to man or it will try
# to locate "$1" in the man search paths
if [[ $1 == /* ]] ; then
man -- "$1"
else
man -- "./$1"
fi
;;
*text*)
${DECOMPRESSOR} -- "$1"
;;
*)
# We could have matched a library (libc.so.6), so let
# `file` figure out what the hell this thing is
lesspipe_file "$1"
;;
esac
;;
*.dvi) dvi2tty "$1" ;;
*.ps|*.pdf) ps2ascii "$1" || pstotext "$1" || pdftotext "$1" ;;
*.doc) antiword "$1" || catdoc "$1" ;;
*.rtf) unrtf --nopict --text "$1" ;;
*.conf|*.txt|*.log) ;; # force less to work on these directly #150256
### URLs ###
ftp://*|http://*|*.htm|*.html)
for b in links2 links lynx ; do
${b} -dump "$1" && exit 0
done
html2text -style pretty "$1"
;;
### Tar files ###
*.tar|\
*.tar.bz2|*.tar.bz|*.tar.gz|*.tar.z|\
*.tar.lz|*.tar.tlz|\
*.tar.lzma|*.tar.xz)
${DECOMPRESSOR} -- "$1" | tar tvvf -;;
*.tbz2|*.tbz|*.tgz|*.tlz|*.txz)
lesspipe "$1" "$1".tar.${1##*.t} ;;
### Misc archives ###
*.bz2|\
*.gz|*.z|\
*.lz|\
*.lzma|*.xz) ${DECOMPRESSOR} -- "$1" ;;
*.rpm) rpm -qpivl --changelog -- "$1" || rpm2tar -O "$1" | tar tvvf -;;
*.cpi|*.cpio) cpio -itv < "$1" ;;
*.ace) unace l "$1" ;;
*.arc) arc v "$1" ;;
*.arj) unarj l -- "$1" ;;
*.cab) cabextract -l -- "$1" ;;
*.lha|*.lzh) lha v "$1" ;;
*.zoo) zoo -list "$1" || unzoo -l "$1" ;;
*.7z|*.exe) 7z l -- "$1" || 7za l -- "$1" || 7zr l -- "$1" ;;
*.a) ar tv "$1" ;;
*.elf) readelf -a -W -- "$1" ;;
*.so) readelf -h -d -s -W -- "$1" ;;
*.mo|*.gmo) msgunfmt -- "$1" ;;
*.rar|.r[0-9][0-9]) unrar l -- "$1" ;;
*.jar|*.war|*.ear|*.xpi|*.zip)
unzip -v "$1" || miniunzip -l "$1" || miniunz -l "$1" || zipinfo -v "$1"
;;
*.deb|*.udeb)
if type -P dpkg > /dev/null ; then
dpkg --info "$1"
dpkg --contents "$1"
else
ar tv "$1"
ar p "$1" data.tar.gz | tar tzvvf -
fi
;;
### Filesystems ###
*.squashfs) unsquashfs -s "$1" && unsquashfs -ll "$1" ;;
### Media ###
*.bmp|*.gif|*.jpeg|*.jpg|*.ico|*.pcd|*.pcx|*.png|*.ppm|*.tga|*.tiff|*.tif|*.webp)
identify "$1" || file -L -- "$1"
;;
*.asf|*.avi|*.mov|*.mp4|*.mpeg|*.mpg|*.qt|*.ram|*.rm|*.webm|*.wmv)
midentify "$1" || file -L -- "$1"
;;
*.mp3) mp3info "$1" || id3info "$1" ;;
*.ogg) ogginfo "$1" ;;
*.flac) metaflac --list "$1" ;;
*.torrent) torrentinfo "$1" || torrentinfo-console "$1" || ctorrent -x "$1" ;;
*.bin|*.cue|*.raw)
# not all .bin/.raw files are cd images #285507
# fall back to lesspipe_file if .cue doesn't exist, or if
# cd-info failed to parse things sanely
[[ -e ${1%.*}.cue ]] \
&& cd-info --no-header --no-device-info "$1" \
|| lesspipe_file "$1"
;;
*.iso)
iso_info=$(isoinfo -d -i "$1")
echo "${iso_info}"
# Joliet output overrides Rock Ridge, so prefer the better Rock
case ${iso_info} in
*$'\n'"Rock Ridge"*) iso_opts="-R";;
*$'\n'"Joliet"*) iso_opts="-J";;
*) iso_opts="";;
esac
isoinfo -l ${iso_opts} -i "$1"
;;
### Encryption stuff ###
*.crl) openssl crl -hash -text -noout -in "$1" ;;
*.csr) openssl req -text -noout -in "$1" ;;
*.crt|*.pem) openssl x509 -hash -text -noout -in "$1" ;;
# May not be such a good idea :)
# ### Device nodes ###
# /dev/[hs]d[a-z]*)
# fdisk -l "${1:0:8}"
# [[ $1 == *hd* ]] && hdparm -I "${1:0:8}"
# ;;
### Everything else ###
*)
case $(( recur++ )) in
# Maybe we didn't match due to case issues ...
0) lesspipe "$1" "$(echo $1 | LC_ALL=C tr '[:upper:]' '[:lower:]')" ;;
# Maybe we didn't match because the file is named weird ...
1) lesspipe_file "$1" ;;
esac
# So no matches from above ... finally fall back to an external
# coloring package. No matching here so we don't have to worry
# about keeping in sync with random packages. Any coloring tool
# you use should not output errors about unsupported files to
# stdout. If it does, it's your problem.
# Allow people to flip color off if they dont want it
case ${LESSCOLOR} in
always) LESSCOLOR=2;;
[yY][eE][sS]|[yY]|1|true) LESSCOLOR=1;;
[nN][oO]|[nN]|0|false) LESSCOLOR=0;;
*) LESSCOLOR=0;; # default to no color #188835
esac
if [[ ${LESSCOLOR} != "0" ]] && [[ -n ${LESSCOLORIZER=code2color} ]] ; then
# 2: Only colorize if user forces it ...
# 1: ... or we know less will handle raw codes -- this will
# not detect -seiRM, so set LESSCOLORIZER yourself
if [[ ${LESSCOLOR} == "2" ]] || [[ " ${LESS} " == *" -"[rR]" "* ]] ; then
LESSQUIET=true ${LESSCOLORIZER} "$1"
fi
fi
# Nothing left to do but let less deal
exit 0
;;
esac
}
if [[ -z $1 ]] ; then
echo "Usage: lesspipe <file>"
elif [[ $1 == "-V" || $1 == "--version" ]] ; then
Id="cvsid"
cat <<-EOF
$Id: lesspipe.sh,v 1.54 2013/12/31 02:25:30 vapier Exp $
Copyright 2001-2013 Gentoo Foundation
Mike Frysinger <[email protected]>
(with plenty of ideas stolen from other projects/distros)
EOF
less -V
elif [[ $1 == "-h" || $1 == "--help" ]] ; then
cat <<-EOF
lesspipe: preprocess files before sending them to less
Usage: lesspipe <file>
lesspipe specific settings:
LESSCOLOR env - toggle colorizing of output (no/yes/always)
LESSCOLORIZER env - program used to colorize output (default: code2color)
LESSIGNORE - list of extensions to ignore (don't do anything fancy)
You can create per-user filters as well by creating the executable file:
~/.lessfilter
One argument is passed to it: the file to display.
To use lesspipe, simply add to your environment:
export LESSOPEN="|lesspipe %s"
Run 'less --help' or 'man less' for more info
EOF
elif [[ -d $1 ]] ; then
ls -alF -- "$1"
else
recur=0
[[ -n ${LESSDEBUG} ]] \
&& lesspipe "$1" \
|| lesspipe "$1" 2> /dev/null
fi
|
rafaelmartins/gentoo-rpi
|
sys-apps/less/files/lesspipe.sh
|
Shell
|
gpl-2.0
| 7,565 |
# -*- shell-script -*-
#
# Copyright (C) 1996-2014 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
set -e
# The variable '$required' should no longer be modified after this
# file gets sources.
required=${required-}; readonly required
# Source the actual code for test initialization and setup.
. test-lib.sh
. am-test-lib.sh
# Run that setup, and return control to the test script sourcing us.
am_test_setup
|
kuym/openocd
|
tools/automake-1.15/t/ax/test-init.sh
|
Shell
|
gpl-2.0
| 1,017 |
#!/bin/bash
cd $(dirname $0)
if [ ! -e config/config.json ]
then
cp config/config.json.example config/config.json
fi
python deltabot/test.py
|
allthefoxes/TDTMBot
|
runtests.sh
|
Shell
|
gpl-3.0
| 150 |
#!/bin/bash
python runtime/startserver.py "$@"
|
sethten/MoDesserts
|
mcp50/startserver.sh
|
Shell
|
gpl-3.0
| 47 |
#!/usr/bin/env bash
#
# Prerequisite:
# - install "grpc" gem
# gem install grpc
#
# To generate:
#
# git clone [email protected]:census-instrumentation/opencensus-proto.git
#
# cd opencensus-proto/src
# ./mkrubygen.sh
OUTDIR="../gen-ruby"
mkdir -p $OUTDIR
grpc_tools_ruby_protoc -I ./ --ruby_out=$OUTDIR --grpc_out=$OUTDIR opencensus/proto/stats/v1/stats.proto \
&& grpc_tools_ruby_protoc -I ./ --ruby_out=$OUTDIR --grpc_out=$OUTDIR opencensus/proto/metrics/v1/metrics.proto \
&& grpc_tools_ruby_protoc -I ./ --ruby_out=$OUTDIR --grpc_out=$OUTDIR opencensus/proto/resource/v1/resource.proto \
&& grpc_tools_ruby_protoc -I ./ --ruby_out=$OUTDIR --grpc_out=$OUTDIR opencensus/proto/trace/v1/trace.proto \
&& grpc_tools_ruby_protoc -I ./ --ruby_out=$OUTDIR --grpc_out=$OUTDIR opencensus/proto/trace/v1/trace_config.proto \
&& grpc_tools_ruby_protoc -I ./ --ruby_out=$OUTDIR --grpc_out=$OUTDIR opencensus/proto/agent/common/v1/common.proto \
&& grpc_tools_ruby_protoc -I ./ --ruby_out=$OUTDIR --grpc_out=$OUTDIR opencensus/proto/agent/metrics/v1/metrics_service.proto \
&& grpc_tools_ruby_protoc -I ./ --ruby_out=$OUTDIR --grpc_out=$OUTDIR opencensus/proto/agent/trace/v1/trace_service.proto
|
census-instrumentation/opencensus-proto
|
src/mkrubygen.sh
|
Shell
|
apache-2.0
| 1,219 |
#! /usr/bin/env bash
# Just to be sure
export WM_SCHEDULER=ccache
export CCACHE_DIR=/vagrant/ccache4vm
BOOTSTRAPLOG=/home/vagrant/bootstrapFoam.log
cd foam/foam-extend-3.1
source etc/bashrc
( cd wmake/src && make )
cd $WM_THIRD_PARTY_DIR
./AllMake.stage0 2>&1 | tee $BOOTSTRAPLOG
./AllMake.stage1 2>&1 | tee --append $BOOTSTRAPLOG
./AllMake.stage2 2>&1 | tee --append $BOOTSTRAPLOG
source $WM_PROJECT_DIR/etc/bashrc
if [ ! -e $MPI_ARCH_PATH/lib ]
then
# OpenSUSE needs this
ln -s $MPI_ARCH_PATH/lib64 $MPI_ARCH_PATH/lib
fi
./AllMake.stage3 2>&1 | tee --append $BOOTSTRAPLOG
cd $WM_PROJECT_DIR
# pick up installed packages
source etc/bashrc
./Allwmake 2>&1 | tee --append $BOOTSTRAPLOG
# compile swak4Foam
cd $WM_THIRD_PARTY_DIR
./AllMake.stage5 2>&1 | tee --append $BOOTSTRAPLOG
|
KangX1/openfoam-extend-foam-extend-3.1
|
vagrantSandbox/skel/bootstrapFoam.sh
|
Shell
|
gpl-3.0
| 802 |
#!/bin/bash
${JBOSS_HOME}/bin/standalone.sh -c standalone-ha.xml&
JBOSS_CONSOLE_LOG=${JBOSS_HOME}/standalone/log/server.log
STARTUP_WAIT=30
count=0
until [ $count -gt $STARTUP_WAIT ]
do
grep 'JBAS015874:' $JBOSS_CONSOLE_LOG > /dev/null
if [ $? -eq 0 ] ; then
launched=true
break
fi
sleep 5
let count=$count+1;
done
${JBOSS_HOME}/bin/jboss-cli.sh --connect <<EOF
batch
module add --name=org.mysql --resources=/mysql-connector-java-5.1.25.jar --dependencies=javax.api,javax.transaction.api
/subsystem=datasources/jdbc-driver=mysql:add(driver-module-name=org.mysql,driver-name=mysql,driver-class-name=com.mysql.jdbc.Driver)
deploy /billing.war
/subsystem=modcluster/mod-cluster-config=configuration/:write-attribute(name=advertise,value=false)
/subsystem=modcluster/mod-cluster-config=configuration/:write-attribute(name=sticky-session,value=true)
/subsystem=modcluster/mod-cluster-config=configuration/:write-attribute(name=proxy-list,value="balancer:6666")
/subsystem=modcluster/mod-cluster-config=configuration/:write-attribute(name=node-timeout,value=10)
/subsystem=modcluster/mod-cluster-config=configuration/:write-attribute(name=ping,value=2)
run-batch
exit
EOF
${JBOSS_HOME}/bin/jboss-cli.sh --connect shutdown
${JBOSS_HOME}/bin/standalone.sh -c standalone-ha.xml
|
jimma/microservices
|
eap/docker/billing/billing.sh
|
Shell
|
lgpl-3.0
| 1,318 |
#!/bin/sh
# cpuhotplug_hotplug.sh - Collection of functions for hotplugging
# operations.
# Routines in this library are set up to allow timing to be done
# by defining $TIME to a timing command.
TIME=${TIME:-""}
# get_all_irqs()
#
# Gets list of all available IRQs in the system
#
get_all_irqs()
{
echo `egrep [0-9]+: /proc/interrupts | cut -d ':' -f 1`
return
}
# migrate_irq(CPU, IRQS)
#
# Sets the smp_affinity for the list of $IRQS to the given
# CPU number
#
migrate_irq()
{
CPU=${1#cpu}
MASK=$((1<<${CPU}))
IRQS=$2
for irq in ${IRQS}
do
echo $MASK > /proc/irq/${irq}/smp_affinity || \
tst_resm TINFO "It is NOT permitted to change the IRQ $irq smp_affinity"
done
}
# get_affinity(PID)
#
# Echos the CPU affinity for the given process ID to stdout
#
get_affinity_mask()
{
AFFINITY=`taskset -p ${1}`
echo ${AFFINITY}
return
}
# set_affinity(PID, CPU)
#
# Sets the affinity for the given PID to the specified CPU.
#
set_affinity()
{
PID="$1"
CPU="$2"
MASK=$((1<<${CPU_TO_TEST}))
`taskset -p ${MASK} ${PID} > /dev/null 2>&1`
return $?
}
# online_cpu(CPU)
#
# Onlines the given CPU. Returns a true value if it was able
# to perform the online operation successfully, false otherwise.
#
# $CPU should either be a specific number like 4, or the cpu name,
# as in 'cpu4'.
#
online_cpu()
{
CPU=${1#cpu}
if [ ! -w /sys/devices/system/cpu/cpu${CPU}/online ]; then
return 1
fi
cpu_is_online ${CPU} && return 0
$TIME echo 1 > /sys/devices/system/cpu/cpu${CPU}/online
RC=$?
report_timing "Online cpu ${CPU}"
return $RC
}
# offline_cpu(CPU)
#
# Offlines the given CPU. Returns a true value if it was able
# to perform the offline operation successfully, false otherwise.
#
offline_cpu()
{
CPU=${1#cpu}
if [ ! -w /sys/devices/system/cpu/cpu${CPU}/online ]; then
return 1
fi
! cpu_is_online ${CPU} && return 0
$TIME echo 0 > /sys/devices/system/cpu/cpu${CPU}/online
RC=$?
report_timing "Offline cpu ${CPU}"
return $RC
}
# get_cpus_num()
#
# Prints the number of all available CPUs, regardless of whether they're
# currently online or offline.
#
get_cpus_num()
{
[ -d /sys/devices/system/cpu/cpu0 ] || return -1
NUM=`ls /sys/devices/system/cpu/ \
| grep -c "cpu[0-9][0-9]*"`
return $NUM
}
# get_all_cpus()
#
# Prints a list of all available CPUs, regardless of whether they're
# currently online or offline.
#
# This routine will work even if the CPUs are not hotpluggable, however
# it requires you have sysfs enabled in the kernel.
#
get_all_cpus()
{
[ -d /sys/devices/system/cpu ] || return 1
(cd /sys/devices/system/cpu; ls -d cpu[0-9]*)
}
# get_present_cpus()
#
# Prints a list of present CPUs, regardless of whether they're
# currently online or offline.
#
get_present_cpus()
{
local present_mask="/sys/devices/system/cpu/present"
local present_cpus=""
# if sysfs present mask is missing, assume all cpu are present
if [ ! -e "$present_mask" ]; then
get_all_cpus
return
fi
for part in $(cat $present_mask | tr "," " "); do
if echo $part | grep -q "-"; then
range_low=$(echo $part | cut -d - -f 1)
range_high=$(echo $part | cut -d - -f 2)
else
range_low=$(part)
range_high=$(part)
fi
for cpu in $(seq $range_low $range_high); do
if [ -e /sys/devices/system/cpu/cpu$cpu ]; then
present_cpus="$present_cpus cpu$cpu"
fi
done
done
echo $present_cpus
}
# get_present_cpus_num()
#
# Prints the number of present CPUs
#
get_present_cpus_num()
{
return $(get_present_cpus | wc -w)
}
# get_hotplug_cpus()
#
# Prints a list of present hotpluggable CPUs, regardless of whether they're
# currently online or offline.
#
get_hotplug_cpus()
{
local present_cpus=$(get_present_cpus)
local hotplug_cpus=""
for cpu in $present_cpus; do
if [ -e /sys/devices/system/cpu/$cpu/online ]; then
hotplug_cpus="$hotplug_cpus $cpu"
fi
done
echo $hotplug_cpus
}
# get_hotplug_cpus_num()
#
# Prints the number of hotpluggable CPUs
#
get_hotplug_cpus_num()
{
return $(get_hotplug_cpus | wc -w)
}
# get_all_cpu_states()
#
# Collects the current online/offline state of CPUs in the
# system, printing it in a format that can be passed to
# set_all_cpu_states() later.
#
get_all_cpu_states()
{
echo `cd /sys/devices/system/cpu/ && grep '' */online | \
sed -e 's/\/online//'`
return
}
# set_all_cpu_states(STATES)
#
# Sets all of the CPU states according to STATES, which must be
# of the form "cpuX:Y", where X is the CPU number and Y its state.
# Each must be on a separate line.
#
set_all_cpu_states()
{
for cpu_state in $1; do
cpu=`echo $cpu_state | cut -d: -f 1`
state=`echo $cpu_state | cut -d: -f 2`
if [ $state = 1 ]; then
online_cpu $cpu
else
offline_cpu $cpu
fi
done
}
# get_online_cpus()
#
# Prints a list of all CPUs currently online. This function only
# works if the system's CPUs have hotplug capabilities
#
get_online_cpus()
{
echo `cd /sys/devices/system/cpu/ && grep 1 */online | cut -d '/' -f 1`
return
}
# get_offline_cpus()
#
# Prints a list of all CPUs currently offline. This function only
# works if the system's CPUs have hotplug capabilities
#
get_offline_cpus()
{
echo `cd /sys/devices/system/cpu/ && grep 0 */online | cut -d '/' -f 1`
return
}
# cpu_is_valid(CPU)
#
# Checks to see if the given CPU number is available for hotplugging
# in the system. Returns 0 if the CPU is available, 1 otherwise.
#
cpu_is_valid()
{
CPU=${1#cpu}
echo "CPU is $CPU"
cat /sys/devices/system/cpu/cpu${CPU}/online > /dev/null 2>&1
return $?
}
# cpu_is_online(CPU)
#
# Returns a 0 value if the given CPU number is currently online,
# 1 otherwise. This function requires the system's CPUs have
# hotplug capabilities.
#
cpu_is_online()
{
CPU=${1#cpu}
if [ `cat /sys/devices/system/cpu/cpu${CPU}/online` = "1" ]; then
return 0
else
return 1
fi
}
|
bigzz/ltp
|
testcases/kernel/hotplug/cpu_hotplug/include/cpuhotplug_hotplug.sh
|
Shell
|
gpl-2.0
| 6,233 |
#!/bin/bash
# Copyright (C) 2020 Google, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
set -e # Fail on any error.
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd )"
ROOT_DIR="$( cd "${SCRIPT_DIR}/../.." >/dev/null 2>&1 && pwd )"
docker run --rm -i \
--volume "${ROOT_DIR}:${ROOT_DIR}" \
--workdir "${ROOT_DIR}" \
--env ROOT_DIR="${ROOT_DIR}" \
--env SCRIPT_DIR="${SCRIPT_DIR}" \
--entrypoint "${SCRIPT_DIR}/build-docker.sh" \
"gcr.io/shaderc-build/radial-build:latest"
sudo chown -R "$(id -u):$(id -g)" "${ROOT_DIR}"
|
google/filament
|
third_party/glslang/kokoro/linux-clang-gn/build.sh
|
Shell
|
apache-2.0
| 2,038 |
set -x
dd if=/dev/zero of=/EMPTY bs=1M
rm -f /EMPTY
|
maier/packer-templates
|
ubuntu-18.04/scripts/99minimize.sh
|
Shell
|
apache-2.0
| 54 |
#!/bin/bash
# nw.js app builder
# (C) 2015 Anton Skshidlevsky <[email protected]>, GPLv3
NW_VERSION="0.12.3"
# Supported platforms: win-ia32 win-x64 linux-ia32 linux-x64 osx-ia32 osx-x64
NW_PLATFORM="win-ia32 win-x64 linux-ia32 linux-x64 osx-ia32 osx-x64"
APP_NAME="itmoproctor"
APP_DIR="${PWD}/app-nw"
CACHE_DIR="${PWD}/cache"
DIST_DIR="${PWD}/public/dist"
download_nw()
{
local url=$1
local filename=$2
echo -n "Downloading ${filename##*/}... "
if [ ! -e "$filename" ]
then
[ ! -e "${CACHE_DIR}" ] && mkdir -p ${CACHE_DIR}
wget -O $filename $url 1>/dev/null 2>&1
fi
echo "done"
}
unpack_nw()
{
local filename=$1
local target_dir=$2
echo -n "Unpacking ${filename##*/} to ${target_dir##*/}... "
[ ! -e "${target_dir}" ] && mkdir -p ${target_dir}
if [ "${filename##*.}" = "zip" ]
then
unzip -q $filename -d $target_dir
local unpacked_dir=$(find $target_dir -mindepth 1 -maxdepth 1 -type d)
if [ -n "$unpacked_dir" ]
then
mv $unpacked_dir/* $target_dir
rmdir $unpacked_dir
fi
else
tar xzf $filename --strip 1 -C $target_dir
fi
echo "done"
}
clean_nw()
{
echo -n "Cleaning nw.js... "
local target_dir=$1
find $target_dir -name "credits.html" -o -name "nwjc" -o -name "nwjc.exe" | while read f; do rm $f; done
echo "done"
}
pack_upx()
{
local target_dir=$1
echo -n "Packaging nw.js using UPX... "
local files=$(find $target_dir -type f -name "nw" -o -name "nw.exe" | tr '\n' ' ')
[ -n "${files}" ] && upx ${files} > /dev/null
echo "done"
}
pack_zip()
{
local target_dir=$1
local archive=$2
echo -n "Packaging ${archive##*/}... "
[ -e "$archive" ] && rm $archive
(cd $target_dir; zip -qr $archive *)
echo "done"
}
pack_tgz()
{
local target_dir=$1
local archive=$2
echo -n "Packaging ${archive##*/}... "
[ -e "$archive" ] && rm $archive
tar czf $archive -C $target_dir .
echo "done"
}
pack_app()
{
local target_dir=$1
echo -n "Packaging ${APP_NAME} app... "
if [ -e "$target_dir/nw" ]
then
cat $target_dir/nw ${CACHE_DIR}/app.nw > $target_dir/${APP_NAME} && chmod +x $target_dir/${APP_NAME}
rm $target_dir/nw
fi
if [ -e "$target_dir/nw.exe" ]
then
cat $target_dir/nw.exe ${CACHE_DIR}/app.nw > $target_dir/${APP_NAME}.exe && chmod +x $target_dir/${APP_NAME}.exe
rm $target_dir/nw.exe
fi
if [ -d "$target_dir/nwjs.app" ]
then
cp ${CACHE_DIR}/app.nw $target_dir/nwjs.app/Contents/Resources/app.nw
mv $target_dir/nwjs.app $target_dir/${APP_NAME}.app
fi
echo "done"
}
build_app()
{
local url=$1
local target_dir=$2
local filename=${CACHE_DIR}/${url##*/}
download_nw $url $filename
unpack_nw $filename $target_dir
clean_nw $target_dir
pack_upx $target_dir
pack_app $target_dir
}
clean_dir()
{
local target_dir=$1
echo -n "Clearing ${target_dir##*/} directory... "
if [ -e "$target_dir" ]
then
rm -rf $target_dir
fi
mkdir -p $target_dir
echo "done"
}
mk_dir()
{
local target_dir=$1
[ ! -e "$target_dir" ] && mkdir -p $target_dir
}
mk_meta()
{
local app_version=$(node -pe 'JSON.parse(process.argv[1]).version' "$(cat ${APP_DIR}/package.json)")
local json="{ \"version\": \"${app_version}\", \"date\": \"$(date -u +'%Y-%m-%dT%H:%M:%SZ')\", \"md5\": { "
echo -n "Generating metadata... "
for file in $(ls ${DIST_DIR} | grep -v "metadata.json")
do
local md5=$(md5sum ${DIST_DIR}/${file} | cut -f1 -d' ')
[ -n "${first}" ] && json="${json}, "
local first=1
json="${json} \"${file}\": \"${md5}\""
done
json="${json} } }"
echo ${json} > ${DIST_DIR}/metadata.json
echo "done"
}
#
# Exec
#
mk_dir ${CACHE_DIR}
clean_dir ${DIST_DIR}
pack_zip ${APP_DIR} ${CACHE_DIR}/app.nw
for platform in ${NW_PLATFORM}
do
echo ">>> Processing: ${platform}"
platform_dir=${CACHE_DIR}/${platform}
case "${platform}" in
linux-*)
build_app "http://dl.nwjs.io/v${NW_VERSION}/nwjs-v${NW_VERSION}-${platform}.tar.gz" ${platform_dir}
pack_tgz ${platform_dir} ${DIST_DIR}/${APP_NAME}-${platform}.tar.gz
;;
win-*)
build_app "http://dl.nwjs.io/v${NW_VERSION}/nwjs-v${NW_VERSION}-${platform}.zip" ${platform_dir}
pack_zip ${platform_dir} ${DIST_DIR}/${APP_NAME}-${platform}.zip
;;
osx-*)
build_app "http://dl.nwjs.io/v${NW_VERSION}/nwjs-v${NW_VERSION}-${platform}.zip" ${platform_dir}
pack_zip ${platform_dir} ${DIST_DIR}/${APP_NAME}-${platform}.zip
;;
esac
[ -e "${platform_dir}" ] && rm -rf ${platform_dir}
done
mk_meta
|
Winnie3ePooh/ITMOlab
|
nw-pack.sh
|
Shell
|
gpl-3.0
| 4,614 |
#!/bin/sh
################################################################################
##
## Licensed to the Apache Software Foundation (ASF) under one or more
## contributor license agreements. See the NOTICE file distributed with
## this work for additional information regarding copyright ownership.
## The ASF licenses this file to You under the Apache License, Version 2.0
## (the "License"); you may not use this file except in compliance with
## the License. You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
################################################################################
cd ../SWFs/Assets
$SDK_DIR/bin/mxmlc -includes=mx.managers.systemClasses.MarshallingSupport -source-path=../../../../../as3/src/mustella -includes=UnitTester -theme=$SDK_DIR/frameworks/themes/Halo/halo.swc child_swfs/MP_Alert_Child.mxml
|
adufilie/flex-sdk
|
mustella/tests/components/Alert/Versioning/MP_Alert_Tests.sh
|
Shell
|
apache-2.0
| 1,222 |
#!/bin/sh
if [ $# -ne 1 ]; then
echo "USAGE: $0 <repo url | local repo name>"
exit 1
fi
repo_identifier="$1"
# this script retrieves repository statistics for a given repository with the
# following fields:
# [ timestamp <date> <time> ]
# [ root catalog hash ]
# [ referenced CAS objects ]
# [ revision number ]
# [ # regular files ]
# [ # directories ]
# [ # symlinks ]
# [ aggregated file size (bytes) ]
# [ # chunked files ]
# [ aggregated chunked file size (bytes) ]
# [ # referenced chunks ]
# [ # nested catalogs ]
root_info="$(./get_root_hash.py $repo_identifier)"
hashes="$(./get_referenced_hashes.py $repo_identifier | wc -l)"
stats="$(./get_info.py $repo_identifier)"
echo "$root_info $hashes $stats"
|
Gangbiao/cvmfs
|
add-ons/tools/get_head_statistics.sh
|
Shell
|
bsd-3-clause
| 744 |
# ltmain.sh - Provide generalized library-building support services.
# NOTE: Changing this file will not affect anything until you rerun ltconfig.
#
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001
# Free Software Foundation, Inc.
# Originally by Gordon Matzigkeit <[email protected]>, 1996
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
# Check that we have a working $echo.
if test "X$1" = X--no-reexec; then
# Discard the --no-reexec flag, and continue.
shift
elif test "X$1" = X--fallback-echo; then
# Avoid inline document here, it may be left over
:
elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
# Yippee, $echo works!
:
else
# Restart under the correct shell, and then maybe $echo will work.
exec $SHELL "$0" --no-reexec ${1+"$@"}
fi
if test "X$1" = X--fallback-echo; then
# used as fallback echo
shift
cat <<EOF
$*
EOF
exit 0
fi
# The name of this program.
progname=`$echo "$0" | sed 's%^.*/%%'`
modename="$progname"
# Constants.
PROGRAM=ltmain.sh
PACKAGE=libtool
VERSION=1.4a
TIMESTAMP=" (1.641.2.206mm 2001/04/03 21:47:47)"
default_mode=
help="Try \`$progname --help' for more information."
magic="%%%MAGIC variable%%%"
mkdir="mkdir"
mv="mv -f"
rm="rm -f"
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
Xsed='sed -e 1s/^X//'
sed_quote_subst='s/\([\\`\\"$\\\\]\)/\\\1/g'
SP2NL='tr \040 \012'
NL2SP='tr \015\012 \040\040'
# NLS nuisances.
# Only set LANG and LC_ALL to C if already set.
# These must not be set unconditionally because not all systems understand
# e.g. LANG=C (notably SCO).
# We save the old values to restore during execute mode.
if test "${LC_ALL+set}" = set; then
save_LC_ALL="$LC_ALL"; LC_ALL=C; export LC_ALL
fi
if test "${LANG+set}" = set; then
save_LANG="$LANG"; LANG=C; export LANG
fi
if test "$LTCONFIG_VERSION" != "$VERSION"; then
echo "$modename: ltconfig version \`$LTCONFIG_VERSION' does not match $PROGRAM version \`$VERSION'" 1>&2
echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
exit 1
fi
if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
echo "$modename: not configured to build any kind of library" 1>&2
echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
exit 1
fi
# Global variables.
mode=$default_mode
nonopt=
prev=
prevopt=
run=
show="$echo"
show_help=
execute_dlfiles=
lo2o="s/\\.lo\$/.${objext}/"
o2lo="s/\\.${objext}\$/.lo/"
# Parse our command line options once, thoroughly.
while test $# -gt 0
do
arg="$1"
shift
case $arg in
-*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;;
*) optarg= ;;
esac
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
case $prev in
execute_dlfiles)
execute_dlfiles="$execute_dlfiles $arg"
;;
tag)
tagname="$arg"
# Check whether tagname contains only valid characters
case $tagname in
*[!-_A-Za-z0-9,/]*)
echo "$progname: invalid tag name: $tagname" 1>&2
exit 1
;;
esac
if grep "^### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "$0" > /dev/null; then
taglist="$taglist $tagname"
# Evaluate the configuration.
eval "`sed -n -e '/^### BEGIN LIBTOOL TAG CONFIG: '$tagname'$/,/^### END LIBTOOL TAG CONFIG: '$tagname'$/p' < $0`"
else
echo "$progname: ignoring unknown tag $tagname" 1>&2
fi
;;
*)
eval "$prev=\$arg"
;;
esac
prev=
prevopt=
continue
fi
# Have we seen a non-optional argument yet?
case $arg in
--help)
show_help=yes
;;
--version)
echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP"
exit 0
;;
--config)
sed -n -e '/^### BEGIN LIBTOOL CONFIG/,/^### END LIBTOOL CONFIG/p' < "$0"
# Now print the configurations for the tags.
for tagname in $taglist; do
sed -n -e "/^### BEGIN LIBTOOL TAG CONFIG: $tagname$/,/^### END LIBTOOL TAG CONFIG: $tagname$/p" < "$0"
done
exit 0
;;
--debug)
echo "$progname: enabling shell trace mode"
set -x
;;
--dry-run | -n)
run=:
;;
--features)
echo "host: $host"
if test "$build_libtool_libs" = yes; then
echo "enable shared libraries"
else
echo "disable shared libraries"
fi
if test "$build_old_libs" = yes; then
echo "enable static libraries"
else
echo "disable static libraries"
fi
exit 0
;;
--finish) mode="finish" ;;
--mode) prevopt="--mode" prev=mode ;;
--mode=*) mode="$optarg" ;;
--quiet | --silent)
show=:
;;
--tag) prevopt="--tag" prev=tag ;;
--tag=*)
set tag "$optarg" ${1+"$@"}
shift
prev=tag
;;
-dlopen)
prevopt="-dlopen"
prev=execute_dlfiles
;;
-*)
$echo "$modename: unrecognized option \`$arg'" 1>&2
$echo "$help" 1>&2
exit 1
;;
*)
nonopt="$arg"
break
;;
esac
done
if test -n "$prevopt"; then
$echo "$modename: option \`$prevopt' requires an argument" 1>&2
$echo "$help" 1>&2
exit 1
fi
if test -z "$show_help"; then
# Infer the operation mode.
if test -z "$mode"; then
case $nonopt in
*cc | *++ | gcc* | *-gcc*)
mode=link
for arg
do
case $arg in
-c)
mode=compile
break
;;
esac
done
;;
*db | *dbx | *strace | *truss)
mode=execute
;;
*install*|cp|mv)
mode=install
;;
*rm)
mode=uninstall
;;
*)
# If we have no mode, but dlfiles were specified, then do execute mode.
test -n "$execute_dlfiles" && mode=execute
# Just use the default operation mode.
if test -z "$mode"; then
if test -n "$nonopt"; then
$echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2
else
$echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2
fi
fi
;;
esac
fi
# Only execute mode is allowed to have -dlopen flags.
if test -n "$execute_dlfiles" && test "$mode" != execute; then
$echo "$modename: unrecognized option \`-dlopen'" 1>&2
$echo "$help" 1>&2
exit 1
fi
# Change the help message to a mode-specific one.
generic_help="$help"
help="Try \`$modename --help --mode=$mode' for more information."
# These modes are in order of execution frequency so that they run quickly.
case $mode in
# libtool compile mode
compile)
modename="$modename: compile"
# Get the compilation command and the source file.
base_compile=
prev=
lastarg=
srcfile="$nonopt"
suppress_output=
user_target=no
for arg
do
case $prev in
"") ;;
xcompiler)
# Aesthetically quote the previous argument.
prev=
lastarg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
# Double-quote args containing other shell metacharacters.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
# Add the previous argument to base_compile.
if test -z "$base_compile"; then
base_compile="$lastarg"
else
base_compile="$base_compile $lastarg"
fi
continue
;;
esac
# Accept any command-line options.
case $arg in
-o)
if test "$user_target" != "no"; then
$echo "$modename: you cannot specify \`-o' more than once" 1>&2
exit 1
fi
user_target=next
;;
-static)
build_old_libs=yes
continue
;;
-prefer-pic)
pic_mode=yes
continue
;;
-prefer-non-pic)
pic_mode=no
continue
;;
-Xcompiler)
prev=xcompiler
continue
;;
-Wc,*)
args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"`
lastarg=
IFS="${IFS= }"; save_ifs="$IFS"; IFS=','
for arg in $args; do
IFS="$save_ifs"
# Double-quote args containing other shell metacharacters.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
lastarg="$lastarg $arg"
done
IFS="$save_ifs"
lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"`
# Add the arguments to base_compile.
if test -z "$base_compile"; then
base_compile="$lastarg"
else
base_compile="$base_compile $lastarg"
fi
continue
;;
esac
case $user_target in
next)
# The next one is the -o target name
user_target=yes
continue
;;
yes)
# We got the output file
user_target=set
libobj="$arg"
continue
;;
esac
# Accept the current argument as the source file.
lastarg="$srcfile"
srcfile="$arg"
# Aesthetically quote the previous argument.
# Backslashify any backslashes, double quotes, and dollar signs.
# These are the only characters that are still specially
# interpreted inside of double-quoted scrings.
lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"`
# Double-quote args containing other shell metacharacters.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
case $lastarg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
lastarg="\"$lastarg\""
;;
esac
# Add the previous argument to base_compile.
if test -z "$base_compile"; then
base_compile="$lastarg"
else
base_compile="$base_compile $lastarg"
fi
done
case $user_target in
set)
;;
no)
# Get the name of the library object.
libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'`
;;
*)
$echo "$modename: you must specify a target with \`-o'" 1>&2
exit 1
;;
esac
# Recognize several different file suffixes.
# If the user specifies -o file.o, it is replaced with file.lo
xform='[cCFSfmso]'
case $libobj in
*.ada) xform=ada ;;
*.adb) xform=adb ;;
*.ads) xform=ads ;;
*.asm) xform=asm ;;
*.c++) xform=c++ ;;
*.cc) xform=cc ;;
*.class) xform=class ;;
*.cpp) xform=cpp ;;
*.cxx) xform=cxx ;;
*.f90) xform=f90 ;;
*.for) xform=for ;;
*.java) xform=java ;;
esac
libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"`
case $libobj in
*.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;;
*)
$echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2
exit 1
;;
esac
# Infer tagged configuration to use if any are available and
# if one wasn't chosen via the "--tag" command line option.
# Only attempt this if the compiler in the base compile
# command doesn't match the default compiler.
if test -n "$available_tags" && test -z "$tagname"; then
case $base_compile in
"$CC "*) ;;
# Blanks in the command may have been stripped by the calling shell,
# but not from the CC environment variable when ltconfig was run.
"`$echo $CC` "*) ;;
*)
for z in $available_tags; do
if grep "^### BEGIN LIBTOOL TAG CONFIG: $z$" < "$0" > /dev/null; then
# Evaluate the configuration.
eval "`sed -n -e '/^### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^### END LIBTOOL TAG CONFIG: '$z'$/p' < $0`"
case $base_compile in
"$CC "*)
# The compiler in the base compile command matches
# the one in the tagged configuration.
# Assume this is the tagged configuration we want.
tagname=$z
break
;;
"`$echo $CC` "*)
tagname=$z
break
;;
esac
fi
done
# If $tagname still isn't set, then no tagged configuration
# was found and let the user know that the "--tag" command
# line option must be used.
if test -z "$tagname"; then
echo "$modename: unable to infer tagged configuration"
echo "$modename: specify a tag with \`--tag'" 1>&2
exit 1
# else
# echo "$modename: using $tagname tagged configuration"
fi
;;
esac
fi
objname=`$echo "X$obj" | $Xsed -e 's%^.*/%%'`
xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$obj"; then
xdir=
else
xdir=$xdir/
fi
lobj=${xdir}$objdir/$objname
if test -z "$base_compile"; then
$echo "$modename: you must specify a compilation command" 1>&2
$echo "$help" 1>&2
exit 1
fi
# Delete any leftover library objects.
if test "$build_old_libs" = yes; then
removelist="$obj $lobj $libobj ${libobj}T"
else
removelist="$lobj $libobj ${libobj}T"
fi
$run $rm $removelist
trap "$run $rm $removelist; exit 1" 1 2 15
# On Cygwin there's no "real" PIC flag so we must build both object types
case $host_os in
cygwin* | mingw* | pw32* | os2*)
pic_mode=default
;;
esac
if test $pic_mode = no && test "$deplibs_check_method" != pass_all; then
# non-PIC code in shared libraries is not supported
pic_mode=default
fi
# Calculate the filename of the output object if compiler does
# not support -o with -c
if test "$compiler_c_o" = no; then
output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext}
lockfile="$output_obj.lock"
removelist="$removelist $output_obj $lockfile"
trap "$run $rm $removelist; exit 1" 1 2 15
else
output_obj=
need_locks=no
lockfile=
fi
# Lock this critical section if it is needed
# We use this script file to make the link, it avoids creating a new file
if test "$need_locks" = yes; then
until $run ln "$0" "$lockfile" 2>/dev/null; do
$show "Waiting for $lockfile to be removed"
sleep 2
done
elif test "$need_locks" = warn; then
if test -f "$lockfile"; then
echo "\
*** ERROR, $lockfile exists and contains:
`cat $lockfile 2>/dev/null`
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit 1
fi
echo $srcfile > "$lockfile"
fi
if test -n "$fix_srcfile_path"; then
eval srcfile=\"$fix_srcfile_path\"
fi
$run $rm "$libobj" "${libobj}T"
# Create a libtool object file (analogous to a ".la" file),
# but don't create it if we're doing a dry run.
test -z "$run" && cat > ${libobj}T <<EOF
# $libobj - a libtool object file
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# Name of the PIC object.
EOF
# Only build a PIC object if we are building libtool libraries.
if test "$build_libtool_libs" = yes; then
# Without this assignment, base_compile gets emptied.
fbsd_hideous_sh_bug=$base_compile
if test "$pic_mode" != no; then
command="$base_compile $srcfile $pic_flag"
else
# Don't build PIC code
command="$base_compile $srcfile"
fi
if test ! -d ${xdir}$objdir; then
$show "$mkdir ${xdir}$objdir"
$run $mkdir ${xdir}$objdir
status=$?
if test $status -ne 0 && test ! -d ${xdir}$objdir; then
exit $status
fi
fi
if test -z "$output_obj"; then
# Place PIC objects in $objdir
command="$command -o $lobj"
fi
$run $rm "$lobj" "$output_obj"
$show "$command"
if $run eval "$command"; then :
else
test -n "$output_obj" && $run $rm $removelist
exit 1
fi
if test "$need_locks" = warn &&
test x"`cat $lockfile 2>/dev/null`" != x"$srcfile"; then
echo "\
*** ERROR, $lockfile contains:
`cat $lockfile 2>/dev/null`
but it should contain:
$srcfile
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit 1
fi
# Just move the object if needed, then go on to compile the next one
if test -n "$output_obj" && test "x$output_obj" != "x$lobj"; then
$show "$mv $output_obj $lobj"
if $run $mv $output_obj $lobj; then :
else
error=$?
$run $rm $removelist
exit $error
fi
fi
# Append the name of the PIC object to the libtool object file.
test -z "$run" && cat >> ${libobj}T <<EOF
pic_object='$objdir/$objname'
EOF
# Allow error messages only from the first compilation.
suppress_output=' >/dev/null 2>&1'
else
# No PIC object so indicate it doesn't exist in the libtool
# object file.
test -z "$run" && cat >> ${libobj}T <<EOF
pic_object=none
EOF
fi
# Only build a position-dependent object if we build old libraries.
if test "$build_old_libs" = yes; then
if test "$pic_mode" != yes; then
# Don't build PIC code
command="$base_compile $srcfile"
else
command="$base_compile $srcfile $pic_flag"
fi
if test "$compiler_c_o" = yes; then
command="$command -o $obj"
fi
# Suppress compiler output if we already did a PIC compilation.
command="$command$suppress_output"
$run $rm "$obj" "$output_obj"
$show "$command"
if $run eval "$command"; then :
else
$run $rm $removelist
exit 1
fi
if test "$need_locks" = warn &&
test x"`cat $lockfile 2>/dev/null`" != x"$srcfile"; then
echo "\
*** ERROR, $lockfile contains:
`cat $lockfile 2>/dev/null`
but it should contain:
$srcfile
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit 1
fi
# Just move the object if needed
if test -n "$output_obj" && test "x$output_obj" != "x$obj"; then
$show "$mv $output_obj $obj"
if $run $mv $output_obj $obj; then :
else
error=$?
$run $rm $removelist
exit $error
fi
fi
# Append the name of the non-PIC object the libtool object file.
# Only append if the libtool object file exists.
test -z "$run" && cat >> ${libobj}T <<EOF
# Name of the non-PIC object.
non_pic_object='$objname'
EOF
else
# Append the name of the non-PIC object the libtool object file.
# Only append if the libtool object file exists.
test -z "$run" && cat >> ${libobj}T <<EOF
# Name of the non-PIC object.
non_pic_object=none
EOF
fi
$run $mv "${libobj}T" "${libobj}"
# Unlock the critical section if it was locked
if test "$need_locks" != no; then
$run $rm "$lockfile"
fi
exit 0
;;
# libtool link mode
link | relink)
modename="$modename: link"
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
# It is impossible to link a dll without this setting, and
# we shouldn't force the makefile maintainer to figure out
# which system we are compiling for in order to pass an extra
# flag for every libtool invokation.
# allow_undefined=no
# FIXME: Unfortunately, there are problems with the above when trying
# to make a dll which has undefined symbols, in which case not
# even a static library is built. For now, we need to specify
# -no-undefined on the libtool link line when we can be certain
# that all symbols are satisfied, otherwise we get a static library.
allow_undefined=yes
;;
*)
allow_undefined=yes
;;
esac
libtool_args="$nonopt"
base_compile="$nonopt"
compile_command="$nonopt"
finalize_command="$nonopt"
compile_rpath=
finalize_rpath=
compile_shlibpath=
finalize_shlibpath=
convenience=
old_convenience=
deplibs=
old_deplibs=
add_flags=
compiler_flags=
linker_flags=
dllsearchpath=
lib_search_path=`pwd`
avoid_version=no
dlfiles=
dlprefiles=
dlself=no
export_dynamic=no
export_symbols=
export_symbols_regex=
generated=
libobjs=
ltlibs=
module=no
no_install=no
objs=
non_pic_objects=
prefer_static_libs=no
preload=no
prev=
prevarg=
release=
rpath=
xrpath=
perm_rpath=
temp_rpath=
thread_safe=no
vinfo=
# We need to know -static, to get the right output filenames.
for arg
do
case $arg in
-all-static | -static)
if test "X$arg" = "X-all-static"; then
if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
$echo "$modename: warning: complete static linking is impossible in this configuration" 1>&2
fi
if test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
else
if test -z "$pic_flag" && test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
fi
build_libtool_libs=no
build_old_libs=yes
prefer_static_libs=yes
break
;;
esac
done
# See if our shared archives depend on static archives.
test -n "$old_archive_from_new_cmds" && build_old_libs=yes
# Go through the arguments, transforming them on the way.
while test $# -gt 0; do
arg="$1"
base_compile="$base_compile $arg"
shift
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test
;;
*) qarg=$arg ;;
esac
libtool_args="$libtool_args $qarg"
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
case $prev in
output)
compile_command="$compile_command @OUTPUT@"
finalize_command="$finalize_command @OUTPUT@"
;;
esac
case $prev in
dlfiles|dlprefiles)
if test "$preload" = no; then
# Add the symbol object into the linking commands.
compile_command="$compile_command @SYMFILE@"
finalize_command="$finalize_command @SYMFILE@"
preload=yes
fi
case $arg in
*.la | *.lo) ;; # We handle these cases below.
force)
if test "$dlself" = no; then
dlself=needless
export_dynamic=yes
fi
prev=
continue
;;
self)
if test "$prev" = dlprefiles; then
dlself=yes
elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then
dlself=yes
else
dlself=needless
export_dynamic=yes
fi
prev=
continue
;;
*)
if test "$prev" = dlfiles; then
dlfiles="$dlfiles $arg"
else
dlprefiles="$dlprefiles $arg"
fi
prev=
continue
;;
esac
;;
expsyms)
export_symbols="$arg"
if test ! -f "$arg"; then
$echo "$modename: symbol file \`$arg' does not exist"
exit 1
fi
prev=
continue
;;
expsyms_regex)
export_symbols_regex="$arg"
prev=
continue
;;
release)
release="-$arg"
prev=
continue
;;
objectlist)
if test -f "$arg"; then
save_arg=$arg
moreargs=
for fil in `cat $save_arg`
do
# moreargs="$moreargs $fil"
arg=$fil
# A libtool-controlled object.
# Check to see that this really is a libtool object.
if (sed -e '2q' $arg | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
pic_object=
non_pic_object=
# Read the .lo file
# If there is no directory component, then add one.
case $arg in
*/* | *\\*) . $arg ;;
*) . ./$arg ;;
esac
if test -z "$pic_object" || \
test -z "$non_pic_object" ||
test "$pic_object" = none && \
test "$non_pic_object" = none; then
$echo "$modename: cannot find name of object for \`$arg'" 1>&2
exit 1
fi
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
if test "$pic_object" != none; then
# Prepend the subdirectory the object is found in.
pic_object="$xdir$pic_object"
if test "$prev" = dlfiles; then
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
dlfiles="$dlfiles $pic_object"
prev=
continue
else
# If libtool objects are unsupported, then we need to preload.
prev=dlprefiles
fi
fi
# CHECK ME: I think I busted this. -Ossama
if test "$prev" = dlprefiles; then
# Preload the old-style object.
dlprefiles="$dlprefiles $pic_object"
prev=
fi
# A PIC object.
libobjs="$libobjs $pic_object"
arg="$pic_object"
fi
# Non-PIC object.
if test "$non_pic_object" != none; then
# Prepend the subdirectory the object is found in.
non_pic_object="$xdir$non_pic_object"
# A standard non-PIC object
non_pic_objects="$non_pic_objects $non_pic_object"
if test -z "$pic_object" || test "$pic_object" = none ; then
arg="$non_pic_object"
fi
fi
else
# Only an error if not doing a dry-run.
if test -z "$run"; then
$echo "$modename: \`$arg' is not a valid libtool object" 1>&2
exit 1
else
# Dry-run case.
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"`
non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"`
libobjs="$libobjs $pic_object"
non_pic_objects="$non_pic_objects $non_pic_object"
fi
fi
done
else
$echo "$modename: link input file \`$save_arg' does not exist"
exit 1
fi
arg=$save_arg
prev=
continue
;;
rpath | xrpath)
# We need an absolute path.
case $arg in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
$echo "$modename: only absolute run-paths are allowed" 1>&2
exit 1
;;
esac
if test "$prev" = rpath; then
case "$rpath " in
*" $arg "*) ;;
*) rpath="$rpath $arg" ;;
esac
else
case "$xrpath " in
*" $arg "*) ;;
*) xrpath="$xrpath $arg" ;;
esac
fi
prev=
continue
;;
xcompiler)
compiler_flags="$compiler_flags $qarg"
prev=
compile_command="$compile_command $qarg"
finalize_command="$finalize_command $qarg"
continue
;;
xlinker)
linker_flags="$linker_flags $qarg"
compiler_flags="$compiler_flags $wl$qarg"
prev=
compile_command="$compile_command $wl$qarg"
finalize_command="$finalize_command $wl$qarg"
continue
;;
*)
eval "$prev=\"\$arg\""
prev=
continue
;;
esac
fi
prevarg="$arg"
case $arg in
-all-static)
if test -n "$link_static_flag"; then
compile_command="$compile_command $link_static_flag"
finalize_command="$finalize_command $link_static_flag"
fi
continue
;;
-allow-undefined)
# FIXME: remove this flag sometime in the future.
$echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2
continue
;;
-avoid-version)
avoid_version=yes
continue
;;
-dlopen)
prev=dlfiles
continue
;;
-dlpreopen)
prev=dlprefiles
continue
;;
-export-dynamic)
export_dynamic=yes
continue
;;
-export-symbols | -export-symbols-regex)
if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
$echo "$modename: not more than one -exported-symbols argument allowed"
exit 1
fi
if test "X$arg" = "X-export-symbols"; then
prev=expsyms
else
prev=expsyms_regex
fi
continue
;;
# The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
# so, if we see these flags be careful not to treat them like -L
-L[A-Z][A-Z]*:*)
case $with_gcc/$host in
no/*-*-irix*)
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
;;
esac
continue
;;
-L*)
dir=`$echo "X$arg" | $Xsed -e 's/^-L//'`
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
absdir=`cd "$dir" && pwd`
if test -z "$absdir"; then
$echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2
exit 1
fi
dir="$absdir"
;;
esac
case "$deplibs " in
*" -L$dir "*) ;;
*)
deplibs="$deplibs -L$dir"
lib_search_path="$lib_search_path $dir"
;;
esac
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
case :$dllsearchpath: in
*":$dir:"*) ;;
*) dllsearchpath="$dllsearchpath:$dir";;
esac
;;
esac
continue
;;
-l*)
if test "$arg" = "-lc"; then
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*)
# These systems don't actually have c library (as such)
continue
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C library is in the System framework
deplibs="$deplibs -framework System"
continue
;;
esac
elif test "$arg" = "-lm"; then
case $host in
*-*-cygwin* | *-*-pw32* | *-*-beos*)
# These systems don't actually have math library (as such)
continue
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody math library is in the System framework
deplibs="$deplibs -framework System"
continue
;;
esac
fi
deplibs="$deplibs $arg"
continue
;;
-module)
module=yes
continue
;;
-no-fast-install)
fast_install=no
continue
;;
-no-install)
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
# The PATH hackery in wrapper scripts is required on Windows
# in order for the loader to find any dlls it needs.
$echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2
$echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2
fast_install=no
;;
*)
no_install=yes
;;
esac
continue
;;
-no-undefined)
allow_undefined=no
continue
;;
-objectlist)
prev=objectlist
continue
;;
-o) prev=output ;;
-release)
prev=release
continue
;;
-rpath)
prev=rpath
continue
;;
-R)
prev=xrpath
continue
;;
-R*)
dir=`$echo "X$arg" | $Xsed -e 's/^-R//'`
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
$echo "$modename: only absolute run-paths are allowed" 1>&2
exit 1
;;
esac
case "$xrpath " in
*" $dir "*) ;;
*) xrpath="$xrpath $dir" ;;
esac
continue
;;
-static)
# The effects of -static are defined in a previous loop.
# We used to do the same as -all-static on platforms that
# didn't have a PIC flag, but the assumption that the effects
# would be equivalent was wrong. It would break on at least
# Digital Unix and AIX.
continue
;;
-thread-safe)
thread_safe=yes
continue
;;
-version-info)
prev=vinfo
continue
;;
-Wc,*)
args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'`
arg=
IFS="${IFS= }"; save_ifs="$IFS"; IFS=','
for flag in $args; do
IFS="$save_ifs"
case $flag in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
flag="\"$flag\""
;;
esac
arg="$arg $wl$flag"
compiler_flags="$compiler_flags $flag"
done
IFS="$save_ifs"
arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
;;
-Wl,*)
args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'`
arg=
IFS="${IFS= }"; save_ifs="$IFS"; IFS=','
for flag in $args; do
IFS="$save_ifs"
case $flag in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
flag="\"$flag\""
;;
esac
arg="$arg $wl$flag"
compiler_flags="$compiler_flags $wl$flag"
linker_flags="$linker_flags $flag"
done
IFS="$save_ifs"
arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
;;
-Xcompiler)
prev=xcompiler
continue
;;
-Xlinker)
prev=xlinker
continue
;;
# Some other compiler flag.
-* | +*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
add_flags="$add_flags $arg"
;;
*.$objext)
# A standard object.
objs="$objs $arg"
;;
*.lo)
# A libtool-controlled object.
# Check to see that this really is a libtool object.
if (sed -e '2q' $arg | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
pic_object=
non_pic_object=
# Read the .lo file
# If there is no directory component, then add one.
case $arg in
*/* | *\\*) . $arg ;;
*) . ./$arg ;;
esac
if test -z "$pic_object" || \
test -z "$non_pic_object" ||
test "$pic_object" = none && \
test "$non_pic_object" = none; then
$echo "$modename: cannot find name of object for \`$arg'" 1>&2
exit 1
fi
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
if test "$pic_object" != none; then
# Prepend the subdirectory the object is found in.
pic_object="$xdir$pic_object"
if test "$prev" = dlfiles; then
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
dlfiles="$dlfiles $pic_object"
prev=
continue
else
# If libtool objects are unsupported, then we need to preload.
prev=dlprefiles
fi
fi
# CHECK ME: I think I busted this. -Ossama
if test "$prev" = dlprefiles; then
# Preload the old-style object.
dlprefiles="$dlprefiles $pic_object"
prev=
fi
# A PIC object.
libobjs="$libobjs $pic_object"
arg="$pic_object"
fi
# Non-PIC object.
if test "$non_pic_object" != none; then
# Prepend the subdirectory the object is found in.
non_pic_object="$xdir$non_pic_object"
# A standard non-PIC object
non_pic_objects="$non_pic_objects $non_pic_object"
if test -z "$pic_object" || test "$pic_object" = none ; then
arg="$non_pic_object"
fi
fi
else
# Only an error if not doing a dry-run.
if test -z "$run"; then
$echo "$modename: \`$arg' is not a valid libtool object" 1>&2
exit 1
else
# Dry-run case.
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"`
non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"`
libobjs="$libobjs $pic_object"
non_pic_objects="$non_pic_objects $non_pic_object"
fi
fi
;;
*.$libext)
# An archive.
deplibs="$deplibs $arg"
old_deplibs="$old_deplibs $arg"
continue
;;
*.la)
# A libtool-controlled library.
if test "$prev" = dlfiles; then
# This library was specified with -dlopen.
dlfiles="$dlfiles $arg"
prev=
elif test "$prev" = dlprefiles; then
# The library was specified with -dlpreopen.
dlprefiles="$dlprefiles $arg"
prev=
else
deplibs="$deplibs $arg"
fi
continue
;;
# Some other compiler argument.
*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
add_flags="$add_flags $arg"
;;
esac
# Now actually substitute the argument into the commands.
if test -n "$arg"; then
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
fi
done
if test -n "$prev"; then
$echo "$modename: the \`$prevarg' option requires an argument" 1>&2
$echo "$help" 1>&2
exit 1
fi
# Infer tagged configuration to use if any are available and
# if one wasn't chosen via the "--tag" command line option.
# Only attempt this if the compiler in the base link
# command doesn't match the default compiler.
if test -n "$available_tags" && test -z "$tagname"; then
case $base_compile in
"$CC "*) ;;
# Blanks in the command may have been stripped by the calling shell,
# but not from the CC environment variable when ltconfig was run.
"`$echo $CC` "*) ;;
*)
for z in $available_tags; do
if grep "^### BEGIN LIBTOOL TAG CONFIG: $z$" < "$0" > /dev/null; then
# Evaluate the configuration.
eval "`sed -n -e '/^### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^### END LIBTOOL TAG CONFIG: '$z'$/p' < $0`"
case $base_compile in
"$CC "*)
# The compiler in $compile_command matches
# the one in the tagged configuration.
# Assume this is the tagged configuration we want.
tagname=$z
break
;;
"`$echo $CC` "*)
tagname=$z
break
;;
esac
fi
done
# If $tagname still isn't set, then no tagged configuration
# was found and let the user know that the "--tag" command
# line option must be used.
if test -z "$tagname"; then
echo "$modename: unable to infer tagged configuration"
echo "$modename: specify a tag with \`--tag'" 1>&2
exit 1
# else
# echo "$modename: using $tagname tagged configuration"
fi
;;
esac
fi
if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
eval arg=\"$export_dynamic_flag_spec\"
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
fi
oldlibs=
# calculate the name of the file, without its directory
outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'`
libobjs_save="$libobjs"
if test -n "$shlibpath_var"; then
# get the directories listed in $shlibpath_var
eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
else
shlib_search_path=
fi
eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'`
if test "X$output_objdir" = "X$output"; then
output_objdir="$objdir"
else
output_objdir="$output_objdir/$objdir"
fi
# Create the object directory.
if test ! -d $output_objdir; then
$show "$mkdir $output_objdir"
$run $mkdir $output_objdir
status=$?
if test $status -ne 0 && test ! -d $output_objdir; then
exit $status
fi
fi
# Determine the type of output
case $output in
"")
$echo "$modename: you must specify an output file" 1>&2
$echo "$help" 1>&2
exit 1
;;
*.$libext) linkmode=oldlib ;;
*.lo | *.$objext) linkmode=obj ;;
*.la) linkmode=lib ;;
*) linkmode=prog ;; # Anything else should be a program.
esac
specialdeplibs=
libs=
# Find all interdependent deplibs by searching for libraries
# that are linked more than once (e.g. -la -lb -la)
for deplib in $deplibs; do
case "$libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
libs="$libs $deplib"
done
if test $linkmode = lib; then
libs="$predeps $libs $compiler_lib_search_path $postdeps"
fi
deplibs=
newdependency_libs=
newlib_search_path=
need_relink=no # whether we're linking any uninstalled libtool libraries
uninst_deplibs= # uninstalled libtool libraries
uninst_path= # paths that contain uninstalled libtool libraries
case $linkmode in
lib)
passes="conv link"
for file in $dlfiles $dlprefiles; do
case $file in
*.la) ;;
*)
$echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2
exit 1
;;
esac
done
;;
prog)
compile_deplibs=
finalize_deplibs=
alldeplibs=no
newdlfiles=
newdlprefiles=
passes="conv scan dlopen dlpreopen link"
;;
*) passes="conv"
;;
esac
for pass in $passes; do
if test "$linkmode,$pass" = "lib,link" ||
test "$linkmode,$pass" = "prog,scan"; then
libs="$deplibs"
deplibs=
fi
if test $linkmode = prog; then
case $pass in
dlopen) libs="$dlfiles" ;;
dlpreopen) libs="$dlprefiles" ;;
link) libs="$deplibs %DEPLIBS% $dependency_libs" ;;
esac
fi
if test $pass = dlopen; then
# Collect dlpreopened libraries
save_deplibs="$deplibs"
deplibs=
fi
for deplib in $libs; do
lib=
found=no
case $deplib in
-l*)
if test $linkmode != lib && test $linkmode != prog; then
$echo "$modename: warning: \`-l' is ignored for archives/objects" 1>&2
continue
fi
if test $pass = conv; then
deplibs="$deplib $deplibs"
continue
fi
name=`$echo "X$deplib" | $Xsed -e 's/^-l//'`
for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do
# Search the libtool library
lib="$searchdir/lib${name}.la"
if test -f "$lib"; then
found=yes
break
fi
done
if test "$found" != yes; then
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
test $linkmode = lib && newdependency_libs="$deplib $newdependency_libs"
fi
continue
fi
;;
-L*)
case $linkmode in
lib)
deplibs="$deplib $deplibs"
test $pass = conv && continue
newdependency_libs="$deplib $newdependency_libs"
newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
;;
prog)
if test $pass = conv; then
deplibs="$deplib $deplibs"
continue
fi
if test $pass = scan; then
deplibs="$deplib $deplibs"
newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
;;
*)
$echo "$modename: warning: \`-L' is ignored for archives/objects" 1>&2
;;
esac
continue
;;
-R*)
if test $pass = link; then
dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'`
# Make sure the xrpath contains only unique directories.
case "$xrpath " in
*" $dir "*) ;;
*) xrpath="$xrpath $dir" ;;
esac
fi
deplibs="$deplib $deplibs"
continue
;;
*.la) lib="$deplib" ;;
*.$libext)
if test $pass = conv; then
deplibs="$deplib $deplibs"
continue
fi
case $linkmode in
lib)
if test "$deplibs_check_method" != pass_all; then
echo
echo "*** Warning: This library needs some functionality provided by $deplib."
echo "*** I have the capability to make that library automatically link in when"
echo "*** you link to this library. But I can only do this if you have a"
echo "*** shared version of the library, which you do not appear to have."
else
echo
echo "*** Warning: Linking the shared library $output against the"
echo "*** static library $deplib is not portable!"
deplibs="$deplib $deplibs"
fi
continue
;;
prog)
if test $pass != link; then
deplibs="$deplib $deplibs"
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
continue
;;
esac
;;
*.lo | *.$objext)
if test $pass = conv; then
deplibs="$deplib $deplibs"
elif test $linkmode = prog; then
if test $pass = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
# If there is no dlopen support or we're linking statically,
# we need to preload.
newdlprefiles="$newdlprefiles $deplib"
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
newdlfiles="$newdlfiles $deplib"
fi
fi
continue
;;
%DEPLIBS%)
alldeplibs=yes
continue
;;
esac
if test $found = yes || test -f "$lib"; then :
else
$echo "$modename: cannot find the library \`$lib'" 1>&2
exit 1
fi
# Check to see that this really is a libtool archive.
if (sed -e '2q' $lib | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit 1
fi
ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'`
test "X$ladir" = "X$lib" && ladir="."
dlname=
dlopen=
dlpreopen=
libdir=
library_names=
old_library=
# If the library was installed with an old release of libtool,
# it will not redefine variable installed.
installed=yes
# Read the .la file
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
if test "$linkmode,$pass" = "lib,link" ||
test "$linkmode,$pass" = "prog,scan" ||
{ test $linkmode != prog && test $linkmode != lib; }; then
test -n "$dlopen" && dlfiles="$dlfiles $dlopen"
test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen"
fi
if test $pass = conv; then
# only check for convenience libraries
deplibs="$lib $deplibs"
if test -z "$libdir"; then
if test -z "$old_library"; then
$echo "$modename: cannot find name of link library for \`$lib'" 1>&2
exit 1
fi
# It is a libtool convenience library, so add in its objects.
convenience="$convenience $ladir/$objdir/$old_library"
old_convenience="$old_convenience $ladir/$objdir/$old_library"
tmp_libs=
for deplib in $dependency_libs; do
deplibs="$deplib $deplibs"
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
tmp_libs="$tmp_libs $deplib"
done
elif test $linkmode != prog && test $linkmode != lib; then
$echo "$modename: \`$lib' is not a convenience library" 1>&2
exit 1
fi
continue
fi
# Get the name of the library we link against.
linklib=
for l in $old_library $library_names; do
linklib="$l"
done
if test -z "$linklib"; then
$echo "$modename: cannot find name of link library for \`$lib'" 1>&2
exit 1
fi
# This library was specified with -dlopen.
if test $pass = dlopen; then
if test -z "$libdir"; then
$echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2
exit 1
fi
if test -z "$dlname" || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
# If there is no dlname, no dlopen support or we're linking
# statically, we need to preload. We also need to preload any
# dependent libraries so libltdl's deplib preloader doesn't
# bomb out in the load deplibs phase.
dlprefiles="$dlprefiles $lib $dependency_libs"
else
newdlfiles="$newdlfiles $lib"
fi
continue
fi
# We need an absolute path.
case $ladir in
[\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;;
*)
abs_ladir=`cd "$ladir" && pwd`
if test -z "$abs_ladir"; then
$echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2
$echo "$modename: passing it literally to the linker, although it might fail" 1>&2
abs_ladir="$ladir"
fi
;;
esac
laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
# Find the relevant object directory and library name.
if test "X$installed" = Xyes; then
if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
$echo "$modename: warning: library \`$lib' was moved." 1>&2
dir="$ladir"
absdir="$abs_ladir"
libdir="$abs_ladir"
else
dir="$libdir"
absdir="$libdir"
fi
else
dir="$ladir/$objdir"
absdir="$abs_ladir/$objdir"
# Remove this search path later
uninst_path="$uninst_path $abs_ladir"
fi
name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
# This library was specified with -dlpreopen.
if test $pass = dlpreopen; then
if test -z "$libdir"; then
$echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2
exit 1
fi
# Prefer using a static library (so that no silly _DYNAMIC symbols
# are required to link).
if test -n "$old_library"; then
newdlprefiles="$newdlprefiles $dir/$old_library"
# Otherwise, use the dlname, so that lt_dlopen finds it.
elif test -n "$dlname"; then
newdlprefiles="$newdlprefiles $dir/$dlname"
else
newdlprefiles="$newdlprefiles $dir/$linklib"
fi
fi
if test -z "$libdir"; then
# link the convenience library
if test $linkmode = lib; then
deplibs="$dir/$old_library $deplibs"
elif test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$dir/$old_library $compile_deplibs"
finalize_deplibs="$dir/$old_library $finalize_deplibs"
else
deplibs="$lib $deplibs" # used for prog,scan pass
fi
continue
fi
if test $linkmode = prog && test $pass != link; then
newlib_search_path="$newlib_search_path $ladir"
deplibs="$lib $deplibs"
linkalldeplibs=no
if test "$link_all_deplibs" != no || test -z "$library_names" ||
test "$build_libtool_libs" = no; then
linkalldeplibs=yes
fi
tmp_libs=
for deplib in $dependency_libs; do
case $deplib in
-L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test
esac
# Need to link against all dependency_libs?
if test $linkalldeplibs = yes; then
deplibs="$deplib $deplibs"
else
# Need to hardcode shared library paths
# or/and link against static libraries
newdependency_libs="$deplib $newdependency_libs"
fi
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
tmp_libs="$tmp_libs $deplib"
done
continue
fi
if test "$linkmode,$pass" = "prog,link"; then
if test -n "$library_names" &&
{ test "$prefer_static_libs" = no || test -z "$old_library"; }; then
# We need to hardcode the library path
if test -n "$shlibpath_var"; then
# Make sure the rpath contains only unique directories.
case "$temp_rpath " in
*" $dir "*) ;;
*" $absdir "*) ;;
*) temp_rpath="$temp_rpath $dir" ;;
esac
fi
# Hardcode the library path.
# Skip directories that are in the system default run-time
# search path.
case " $sys_lib_dlsearch_path " in
*" $absdir "*) ;;
*)
case "$compile_rpath " in
*" $absdir "*) ;;
*) compile_rpath="$compile_rpath $absdir"
esac
;;
esac
case " $sys_lib_dlsearch_path " in
*" $libdir "*) ;;
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir"
esac
;;
esac
fi
if test "$alldeplibs" = yes &&
{ test "$deplibs_check_method" = pass_all ||
{ test "$build_libtool_libs" = yes &&
test -n "$library_names"; }; }; then
# We only need to search for static libraries
continue
fi
fi
link_static=no # Whether the deplib will be linked statically
if test -n "$library_names" &&
{ test "$prefer_static_libs" = no || test -z "$old_library"; }; then
if test "$installed" = no; then
uninst_deplibs="$uninst_deplibs $lib"
need_relink=yes
fi
# This is a shared library
if test $linkmode = lib && test "$hardcode_into_libs" = all; then
# Hardcode the library path.
# Skip directories that are in the system default run-time
# search path.
case " $sys_lib_dlsearch_path " in
*" $absdir "*) ;;
*)
case "$compile_rpath " in
*" $absdir "*) ;;
*) compile_rpath="$compile_rpath $absdir"
esac
;;
esac
case " $sys_lib_dlsearch_path " in
*" $libdir "*) ;;
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir"
esac
;;
esac
fi
if test -n "$old_archive_from_expsyms_cmds"; then
# figure out the soname
set dummy $library_names
realname="$2"
shift; shift
libname=`eval \\$echo \"$libname_spec\"`
# use dlname if we got it. it's perfectly good, no?
if test -n "$dlname"; then
soname="$dlname"
elif test -n "$soname_spec"; then
# bleh windows
case $host in
*cygwin*)
major=`expr $current - $age`
versuffix="-$major"
;;
esac
eval soname=\"$soname_spec\"
else
soname="$realname"
fi
# Make a new name for the extract_expsyms_cmds to use
soroot="$soname"
soname=`echo $soroot | sed -e 's/^.*\///'`
newlib="libimp-`echo $soname | sed 's/^lib//;s/\.dll$//'`.a"
# If the library has no export list, then create one now
if test -f "$output_objdir/$soname-def"; then :
else
$show "extracting exported symbol list from \`$soname'"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
eval cmds=\"$extract_expsyms_cmds\"
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# Create $newlib
if test -f "$output_objdir/$newlib"; then :; else
$show "generating import library for \`$soname'"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
eval cmds=\"$old_archive_from_expsyms_cmds\"
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# make sure the library variables are pointing to the new library
dir=$output_objdir
linklib=$newlib
fi
if test $linkmode = prog || test "$mode" != relink; then
add_shlibpath=
add_dir=
add=
lib_linked=yes
case $hardcode_action in
immediate | unsupported)
if test "$hardcode_direct" = no; then
add="$dir/$linklib"
elif test "$hardcode_minus_L" = no; then
case $host in
*-*-sunos*) add_shlibpath="$dir" ;;
esac
add_dir="-L$dir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = no; then
add_shlibpath="$dir"
add="-l$name"
else
lib_linked=no
fi
;;
relink)
if test "$hardcode_direct" = yes; then
add="$dir/$linklib"
elif test "$hardcode_minus_L" = yes; then
add_dir="-L$dir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
add_shlibpath="$dir"
add="-l$name"
else
lib_linked=no
fi
;;
*) lib_linked=no ;;
esac
if test "$lib_linked" != yes; then
$echo "$modename: configuration error: unsupported hardcode properties"
exit 1
fi
if test -n "$add_shlibpath"; then
case :$compile_shlibpath: in
*":$add_shlibpath:"*) ;;
*) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;;
esac
fi
if test $linkmode = prog; then
test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
test -n "$add" && compile_deplibs="$add $compile_deplibs"
else
test -n "$add_dir" && deplibs="$add_dir $deplibs"
test -n "$add" && deplibs="$add $deplibs"
if test "$hardcode_direct" != yes && \
test "$hardcode_minus_L" != yes && \
test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
*) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
fi
fi
fi
if test $linkmode = prog || test "$mode" = relink; then
add_shlibpath=
add_dir=
add=
# Finalize command for both is simple: just hardcode it.
if test "$hardcode_direct" = yes; then
add="$libdir/$linklib"
elif test "$hardcode_minus_L" = yes; then
add_dir="-L$libdir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
*) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
add="-l$name"
else
# We cannot seem to hardcode it, guess we'll fake it.
add_dir="-L$libdir"
add="-l$name"
fi
if test $linkmode = prog; then
test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
else
test -n "$add_dir" && deplibs="$add_dir $deplibs"
test -n "$add" && deplibs="$add $deplibs"
fi
fi
elif test $linkmode = prog; then
# Here we assume that one of hardcode_direct or hardcode_minus_L
# is not unsupported. This is valid on all known static and
# shared platforms.
if test "$hardcode_direct" != unsupported; then
test -n "$old_library" && linklib="$old_library"
compile_deplibs="$dir/$linklib $compile_deplibs"
finalize_deplibs="$dir/$linklib $finalize_deplibs"
else
compile_deplibs="-l$name -L$dir $compile_deplibs"
finalize_deplibs="-l$name -L$dir $finalize_deplibs"
fi
elif test "$build_libtool_libs" = yes; then
# Not a shared library
if test "$deplibs_check_method" != pass_all; then
# We're trying link a shared library against a static one
# but the system doesn't support it.
# Just print a warning and add the library to dependency_libs so
# that the program can be linked against the static library.
echo
echo "*** Warning: This library needs some functionality provided by $lib."
echo "*** I have the capability to make that library automatically link in when"
echo "*** you link to this library. But I can only do this if you have a"
echo "*** shared version of the library, which you do not appear to have."
else
convenience="$convenience $dir/$old_library"
old_convenience="$old_convenience $dir/$old_library"
deplibs="$dir/$old_library $deplibs"
link_static=yes
fi
fi
if test $linkmode = lib; then
if test -n "$dependency_libs" &&
{ test $hardcode_into_libs != yes || test $build_old_libs = yes ||
test $link_static = yes; }; then
# Extract -R from dependency_libs
temp_deplibs=
for libdir in $dependency_libs; do
case $libdir in
-R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'`
case " $xrpath " in
*" $temp_xrpath "*) ;;
*) xrpath="$xrpath $temp_xrpath";;
esac;;
*) temp_deplibs="$temp_deplibs $libdir";;
esac
done
dependency_libs="$temp_deplibs"
fi
newlib_search_path="$newlib_search_path $absdir"
# Link against this library
test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
# ... and its dependency_libs
tmp_libs=
for deplib in $dependency_libs; do
newdependency_libs="$deplib $newdependency_libs"
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
tmp_libs="$tmp_libs $deplib"
done
if test $link_all_deplibs != no; then
# Add the search paths of all dependency libraries
for deplib in $dependency_libs; do
case $deplib in
-L*) path="$deplib" ;;
*.la)
dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$deplib" && dir="."
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
*)
absdir=`cd "$dir" && pwd`
if test -z "$absdir"; then
$echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2
absdir="$dir"
fi
;;
esac
if grep "^installed=no" $deplib > /dev/null; then
path="-L$absdir/$objdir"
else
eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
if test -z "$libdir"; then
$echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
exit 1
fi
if test "$absdir" != "$libdir"; then
$echo "$modename: warning: \`$deplib' seems to be moved" 1>&2
fi
path="-L$absdir"
fi
;;
*) continue ;;
esac
case " $deplibs " in
*" $path "*) ;;
*) deplibs="$path $deplibs" ;;
esac
done
fi
fi
done
dependency_libs="$newdependency_libs"
if test $pass = dlpreopen; then
# Link the dlpreopened libraries before other libraries
for deplib in $save_deplibs; do
deplibs="$deplib $deplibs"
done
fi
if test $pass != dlopen; then
if test $pass != conv; then
# Make sure lib_search_path contains only unique directories.
lib_search_path=
for dir in $newlib_search_path; do
case "$lib_search_path " in
*" $dir "*) ;;
*) lib_search_path="$lib_search_path $dir" ;;
esac
done
newlib_search_path=
fi
if test "$linkmode,$pass" != "prog,link"; then
vars="deplibs"
else
vars="compile_deplibs finalize_deplibs"
fi
for var in $vars dependency_libs; do
# Make sure that $var contains only unique libraries
# and add them in reverse order
eval tmp_libs=\"\$$var\"
new_libs=
for deplib in $tmp_libs; do
case "$deplib" in
-L*) new_libs="$deplib $new_libs" ;;
*)
case " $specialdeplibs " in
*" $deplib "*) new_libs="$deplib $new_libs" ;;
*)
case " $new_libs " in
*" $deplib "*) ;;
*) new_libs="$deplib $new_libs" ;;
esac
;;
esac
;;
esac
done
tmp_libs=
for deplib in $new_libs; do
case $deplib in
-L*)
case " $tmp_libs " in
*" $deplib "*) ;;
*) tmp_libs="$tmp_libs $deplib" ;;
esac
;;
*) tmp_libs="$tmp_libs $deplib" ;;
esac
done
eval $var=\"$tmp_libs\"
done
fi
done
if test $linkmode = prog; then
dlfiles="$newdlfiles"
dlprefiles="$newdlprefiles"
fi
case $linkmode in
oldlib)
if test -n "$deplibs"; then
$echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2
fi
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2
fi
if test -n "$rpath"; then
$echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2
fi
if test -n "$xrpath"; then
$echo "$modename: warning: \`-R' is ignored for archives" 1>&2
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for archives" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for archives" 1>&2
fi
if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
$echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2
fi
# Now set the variables for building old libraries.
build_libtool_libs=no
oldlibs="$output"
objs="$objs$old_deplibs"
;;
lib)
# Make sure we only generate libraries of the form `libNAME.la'.
case $outputname in
lib*)
name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
eval libname=\"$libname_spec\"
;;
*)
if test "$module" = no; then
$echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2
$echo "$help" 1>&2
exit 1
fi
if test "$need_lib_prefix" != no; then
# Add the "lib" prefix for modules if required
name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
eval libname=\"$libname_spec\"
else
libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
fi
;;
esac
if test -n "$objs"; then
if test "$deplibs_check_method" != pass_all; then
$echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1
exit 1
else
echo
echo "*** Warning: Linking the shared library $output against the non-libtool"
echo "*** objects $objs is not portable!"
libobjs="$libobjs $objs"
fi
fi
if test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2
fi
set dummy $rpath
if test $# -gt 2; then
$echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2
fi
install_libdir="$2"
oldlibs=
if test -z "$rpath"; then
if test "$build_libtool_libs" = yes; then
# Building a libtool convenience library.
# Some compilers have problems with a `.al' extension so
# convenience libraries should have the same extension an
# archive normally would.
oldlibs="$output_objdir/$libname.$libext $oldlibs"
build_libtool_libs=convenience
build_old_libs=yes
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for convenience libraries" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2
fi
else
# Parse the version information argument.
IFS="${IFS= }"; save_ifs="$IFS"; IFS=':'
set dummy $vinfo 0 0 0
IFS="$save_ifs"
if test -n "$8"; then
$echo "$modename: too many parameters to \`-version-info'" 1>&2
$echo "$help" 1>&2
exit 1
fi
current="$2"
revision="$3"
age="$4"
# Check that each of the things are valid numbers.
case $current in
0 | [1-9] | [1-9][0-9]*) ;;
*)
$echo "$modename: CURRENT \`$current' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit 1
;;
esac
case $revision in
0 | [1-9] | [1-9][0-9]*) ;;
*)
$echo "$modename: REVISION \`$revision' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit 1
;;
esac
case $age in
0 | [1-9] | [1-9][0-9]*) ;;
*)
$echo "$modename: AGE \`$age' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit 1
;;
esac
if test $age -gt $current; then
$echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit 1
fi
# Calculate the version variables.
major=
versuffix=
verstring=
case $version_type in
none) ;;
darwin)
# Like Linux, but with the current version available in
# verstring for coding it into the library header
major=.`expr $current - $age`
versuffix="$major.$age.$revision"
# Darwin ld doesn't like 0 for these options...
minor_current=`expr $current + 1`
verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
;;
freebsd-aout)
major=".$current"
versuffix=".$current.$revision";
;;
freebsd-elf)
major=".$current"
versuffix=".$current";
;;
irix)
major=`expr $current - $age + 1`
verstring="sgi$major.$revision"
# Add in all the interfaces that we are compatible with.
loop=$revision
while test $loop != 0; do
iface=`expr $revision - $loop`
loop=`expr $loop - 1`
verstring="sgi$major.$iface:$verstring"
done
# Before this point, $major must not contain `.'.
major=.$major
versuffix="$major.$revision"
;;
linux)
major=.`expr $current - $age`
versuffix="$major.$age.$revision"
;;
osf)
major=`expr $current - $age`
versuffix=".$current.$age.$revision"
verstring="$current.$age.$revision"
# Add in all the interfaces that we are compatible with.
loop=$age
while test $loop != 0; do
iface=`expr $current - $loop`
loop=`expr $loop - 1`
verstring="$verstring:${iface}.0"
done
# Make executables depend on our current version.
verstring="$verstring:${current}.0"
;;
sunos)
major=".$current"
versuffix=".$current.$revision"
;;
windows)
# Use '-' rather than '.', since we only want one
# extension on DOS 8.3 filesystems.
major=`expr $current - $age`
versuffix="-$major"
;;
*)
$echo "$modename: unknown library version type \`$version_type'" 1>&2
echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
exit 1
;;
esac
# Clear the version info if we defaulted, and they specified a release.
if test -z "$vinfo" && test -n "$release"; then
major=
verstring="0.0"
if test "$need_version" = no; then
versuffix=
else
versuffix=".0.0"
fi
fi
# Remove version info from name if versioning should be avoided
if test "$avoid_version" = yes && test "$need_version" = no; then
major=
versuffix=
verstring=""
fi
# Check to see if the archive will have undefined symbols.
if test "$allow_undefined" = yes; then
if test "$allow_undefined_flag" = unsupported; then
$echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2
build_libtool_libs=no
build_old_libs=yes
fi
else
# Don't allow undefined symbols.
allow_undefined_flag="$no_undefined_flag"
fi
fi
if test "$mode" != relink; then
# Remove our outputs, but don't remove object files since they
# may have been created when compiling PIC objects.
removelist=
tempremovelist=`echo "$output_objdir/*"`
for p in $tempremovelist; do
case $p in
*.$objext)
;;
$output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*)
removelist="$removelist $p"
;;
*) ;;
esac
done
if test -n "$removelist"; then
$show "${rm}r $removelist"
$run ${rm}r $removelist
fi
fi
# Now set the variables for building old libraries.
if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
oldlibs="$oldlibs $output_objdir/$libname.$libext"
# Transform .lo files to .o files.
oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP`
fi
# Eliminate all temporary directories.
for path in $uninst_path; do
lib_search_path=`echo "$lib_search_path " | sed -e 's% $path % %g'`
deplibs=`echo "$deplibs " | sed -e 's% -L$path % %g'`
dependency_libs=`echo "$dependency_libs " | sed -e 's% -L$path % %g'`
done
if test -n "$xrpath"; then
# If the user specified any rpath flags, then add them.
temp_xrpath=
for libdir in $xrpath; do
temp_xrpath="$temp_xrpath -R$libdir"
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
esac
done
if test $hardcode_into_libs != yes || test $build_old_libs = yes; then
dependency_libs="$temp_xrpath $dependency_libs"
fi
fi
# Make sure dlfiles contains only unique files that won't be dlpreopened
old_dlfiles="$dlfiles"
dlfiles=
for lib in $old_dlfiles; do
case " $dlprefiles $dlfiles " in
*" $lib "*) ;;
*) dlfiles="$dlfiles $lib" ;;
esac
done
# Make sure dlprefiles contains only unique files
old_dlprefiles="$dlprefiles"
dlprefiles=
for lib in $old_dlprefiles; do
case "$dlprefiles " in
*" $lib "*) ;;
*) dlprefiles="$dlprefiles $lib" ;;
esac
done
if test "$build_libtool_libs" = yes; then
if test -n "$rpath"; then
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*)
# these systems don't actually have a c library (as such)!
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C library is in the System framework
deplibs="$deplibs -framework System"
;;
*)
# Add libc to deplibs on all other systems if necessary.
if test $build_libtool_need_lc = "yes"; then
deplibs="$deplibs -lc"
fi
;;
esac
fi
# Transform deplibs into only deplibs that can be linked in shared.
name_save=$name
libname_save=$libname
release_save=$release
versuffix_save=$versuffix
major_save=$major
# I'm not sure if I'm treating the release correctly. I think
# release should show up in the -l (ie -lgmp5) so we don't want to
# add it in twice. Is that correct?
release=""
versuffix=""
major=""
newdeplibs=
droppeddeps=no
case $deplibs_check_method in
pass_all)
# Don't check for shared/static. Everything works.
# This might be a little naive. We might want to check
# whether the library exists or not. But this is on
# osf3 & osf4 and I'm not really sure... Just
# implementing what was already the behaviour.
newdeplibs=$deplibs
;;
test_compile)
# This code stresses the "libraries are programs" paradigm to its
# limits. Maybe even breaks it. We compile a program, linking it
# against the deplibs as a proxy for the library. Then we can check
# whether they linked in statically or dynamically with ldd.
$rm conftest.c
cat > conftest.c <<EOF
int main() { return 0; }
EOF
$rm conftest
$LTCC -o conftest conftest.c $deplibs
if test $? -eq 0 ; then
ldd_output=`ldd conftest`
for i in $deplibs; do
name="`expr $i : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test "$name" != "" -a "$name" != "0"; then
libname=`eval \\$echo \"$libname_spec\"`
deplib_matches=`eval \\$echo \"$library_names_spec\"`
set dummy $deplib_matches
deplib_match=$2
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
newdeplibs="$newdeplibs $i"
else
droppeddeps=yes
echo
echo "*** Warning: This library needs some functionality provided by $i."
echo "*** I have the capability to make that library automatically link in when"
echo "*** you link to this library. But I can only do this if you have a"
echo "*** shared version of the library, which you do not appear to have."
fi
else
newdeplibs="$newdeplibs $i"
fi
done
else
# Error occured in the first compile. Let's try to salvage the situation:
# Compile a seperate program for each library.
for i in $deplibs; do
name="`expr $i : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test "$name" != "" -a "$name" != "0"; then
$rm conftest
$LTCC -o conftest conftest.c $i
# Did it work?
if test $? -eq 0 ; then
ldd_output=`ldd conftest`
libname=`eval \\$echo \"$libname_spec\"`
deplib_matches=`eval \\$echo \"$library_names_spec\"`
set dummy $deplib_matches
deplib_match=$2
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
newdeplibs="$newdeplibs $i"
else
droppeddeps=yes
echo
echo "*** Warning: This library needs some functionality provided by $i."
echo "*** I have the capability to make that library automatically link in when"
echo "*** you link to this library. But I can only do this if you have a"
echo "*** shared version of the library, which you do not appear to have."
fi
else
droppeddeps=yes
echo
echo "*** Warning! Library $i is needed by this library but I was not able to"
echo "*** make it link in! You will probably need to install it or some"
echo "*** library that it depends on before this library will be fully"
echo "*** functional. Installing it before continuing would be even better."
fi
else
newdeplibs="$newdeplibs $i"
fi
done
fi
;;
file_magic*)
set dummy $deplibs_check_method
file_magic_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
for a_deplib in $deplibs; do
name="`expr $a_deplib : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test "$name" != "" -a "$name" != "0"; then
libname=`eval \\$echo \"$libname_spec\"`
for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
# Follow soft links.
if ls -lLd "$potent_lib" 2>/dev/null \
| grep " -> " >/dev/null; then
continue
fi
# The statement above tries to avoid entering an
# endless loop below, in case of cyclic links.
# We might still enter an endless loop, since a link
# loop can be closed while we follow links,
# but so what?
potlib="$potent_lib"
while test -h "$potlib" 2>/dev/null; do
potliblink=`ls -ld $potlib | sed 's/.* -> //'`
case $potliblink in
[\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
*) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";;
esac
done
if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \
| sed 10q \
| egrep "$file_magic_regex" > /dev/null; then
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
break 2
fi
done
done
if test -n "$a_deplib" ; then
droppeddeps=yes
echo
echo "*** Warning: This library needs some functionality provided by $a_deplib."
echo "*** I have the capability to make that library automatically link in when"
echo "*** you link to this library. But I can only do this if you have a"
echo "*** shared version of the library, which you do not appear to have."
fi
else
# Add a -L argument.
newdeplibs="$newdeplibs $a_deplib"
fi
done # Gone through all deplibs.
;;
none | unknown | *)
newdeplibs=""
if $echo "X $deplibs" | $Xsed -e 's/ -lc$//' \
-e 's/ -[LR][^ ]*//g' -e 's/[ ]//g' |
grep . >/dev/null; then
echo
if test "X$deplibs_check_method" = "Xnone"; then
echo "*** Warning: inter-library dependencies are not supported in this platform."
else
echo "*** Warning: inter-library dependencies are not known to be supported."
fi
echo "*** All declared inter-library dependencies are being dropped."
droppeddeps=yes
fi
;;
esac
versuffix=$versuffix_save
major=$major_save
release=$release_save
libname=$libname_save
name=$name_save
if test "$droppeddeps" = yes; then
if test "$module" = yes; then
echo
echo "*** Warning: libtool could not satisfy all declared inter-library"
echo "*** dependencies of module $libname. Therefore, libtool will create"
echo "*** a static module, that should work as long as the dlopening"
echo "*** application is linked with the -dlopen flag."
if test -z "$global_symbol_pipe"; then
echo
echo "*** However, this would only work if libtool was able to extract symbol"
echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
echo "*** not find such a program. So, this module is probably useless."
echo "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
else
echo "*** The inter-library dependencies that have been dropped here will be"
echo "*** automatically added whenever a program is linked with this library"
echo "*** or is declared to -dlopen it."
if test $allow_undefined = no; then
echo
echo "*** Since this library must not contain undefined symbols,"
echo "*** because either the platform does not support them or"
echo "*** it was explicitly requested with -no-undefined,"
echo "*** libtool will only create a static version of it."
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
fi
fi
fi
# Done checking deplibs!
deplibs=$newdeplibs
fi
# All the library-specific variables (install_libdir is set above).
library_names=
old_library=
dlname=
# Test again, we may have decided not to build it any more
if test "$build_libtool_libs" = yes; then
if test $hardcode_into_libs = yes; then
# Hardcode the library paths
hardcode_libdirs=
dep_rpath=
rpath="$finalize_rpath"
test "$mode" != relink && rpath="$compile_rpath$rpath"
for libdir in $rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
dep_rpath="$dep_rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval dep_rpath=\"$hardcode_libdir_flag_spec\"
fi
if test -n "$runpath_var" && test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
rpath="$rpath$dir:"
done
eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
fi
test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
fi
shlibpath="$finalize_shlibpath"
test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
if test -n "$shlibpath"; then
eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
fi
# Get the real and link names of the library.
eval library_names=\"$library_names_spec\"
set dummy $library_names
realname="$2"
shift; shift
if test -n "$soname_spec"; then
eval soname=\"$soname_spec\"
else
soname="$realname"
fi
if test x$dlname = x; then
dlname=$soname
fi
lib="$output_objdir/$realname"
for link
do
linknames="$linknames $link"
done
# # Ensure that we have .o objects for linkers which dislike .lo
# # (e.g. aix) in case we are running --disable-static
# for obj in $libobjs; do
# xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
# if test "X$xdir" = "X$obj"; then
# xdir="."
# else
# xdir="$xdir"
# fi
# baseobj=`$echo "X$obj" | $Xsed -e 's%^.*/%%'`
# oldobj=`$echo "X$baseobj" | $Xsed -e "$lo2o"`
# if test ! -f $xdir/$oldobj && test "$baseobj" != "$oldobj"; then
# $show "(cd $xdir && ${LN_S} $baseobj $oldobj)"
# $run eval '(cd $xdir && ${LN_S} $baseobj $oldobj)' || exit $?
# fi
# done
# Use standard objects if they are pic
test -z "$pic_flag" && libobjs=`$echo "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
# Prepare the list of exported symbols
if test -z "$export_symbols"; then
if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then
$show "generating symbol list for \`$libname.la'"
export_symbols="$output_objdir/$libname.exp"
$run $rm $export_symbols
eval cmds=\"$export_symbols_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
if test -n "$export_symbols_regex"; then
$show "egrep -e \"$export_symbols_regex\" \"$export_symbols\" > \"${export_symbols}T\""
$run eval 'egrep -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
$show "$mv \"${export_symbols}T\" \"$export_symbols\""
$run eval '$mv "${export_symbols}T" "$export_symbols"'
fi
fi
fi
if test -n "$export_symbols" && test -n "$include_expsyms"; then
$run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"'
fi
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
save_libobjs=$libobjs
eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
else
gentop="$output_objdir/${outputname}x"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "$mkdir $gentop"
$run $mkdir "$gentop"
status=$?
if test $status -ne 0 && test ! -d "$gentop"; then
exit $status
fi
generated="$generated $gentop"
for xlib in $convenience; do
# Extract the objects.
case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
xdir="$gentop/$xlib"
$show "${rm}r $xdir"
$run ${rm}r "$xdir"
$show "$mkdir $xdir"
$run $mkdir "$xdir"
status=$?
if test $status -ne 0 && test ! -d "$xdir"; then
exit $status
fi
$show "(cd $xdir && $AR x $xabs)"
$run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
libobjs="$libobjs "`find $xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP`
done
fi
fi
if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
eval flag=\"$thread_safe_flag_spec\"
linker_flags="$linker_flags $flag"
fi
# Make a backup of the uninstalled library when relinking
if test "$mode" = relink && test "$hardcode_into_libs" = all; then
$run eval '(cd $output_objdir && $rm ${realname}U && $mv $realname ${realname}U)' || exit $?
fi
# Add all flags from the command line. We here create a library,
# but those flags were only added to compile_command and
# finalize_command, which are only used when creating executables.
# So do it by hand here.
compiler_flags="$compiler_flags $add_flags"
# Only add it to commands which use CC, instead of LD, i.e.
# only to $compiler_flags
#linker_flags="$linker_flags $add_flags"
# Do each of the archive commands.
if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
eval cmds=\"$archive_expsym_cmds\"
else
eval cmds=\"$archive_cmds\"
fi
if len=`expr "X$cmds" : ".*"` &&
test $len -le $max_cmd_len; then
:
else
# The command line is too long to link in one step, link piecewise.
$echo "creating reloadable object files..."
# Save the value of $output and $libobjs because we want to
# use them later. If we have whole_archive_flag_spec, we
# want to use save_libobjs as it was before
# whole_archive_flag_spec was expanded, because we can't
# assume the linker understands whole_archive_flag_spec.
# This may have to be revisited, in case too many
# convenience libraries get linked in and end up exceeding
# the spec.
if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then
save_libobjs=$libobjs
fi
save_output=$output
# Clear the reloadable object creation command queue and
# initialize k to one.
test_cmds=
concat_cmds=
objlist=
delfiles=
last_robj=
k=1
output=$output_objdir/$save_output-${k}.$objext
# Loop over the list of objects to be linked.
for obj in $save_libobjs
do
eval test_cmds=\"$reload_cmds $objlist $last_robj\"
if test "X$objlist" = X ||
{ len=`expr "X$test_cmds" : ".*"` &&
test $len -le $max_cmd_len; }; then
objlist="$objlist $obj"
else
# The command $test_cmds is almost too long, add a
# command to the queue.
if test $k -eq 1 ; then
# The first file doesn't have a previous command to add.
eval concat_cmds=\"$reload_cmds $objlist $last_robj\"
else
# All subsequent reloadable object files will link in
# the last one created.
eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj\"
fi
last_robj=$output_objdir/$save_output-${k}.$objext
k=`expr $k + 1`
output=$output_objdir/$save_output-${k}.$objext
objlist=$obj
len=1
fi
done
# Handle the remaining objects by creating one last
# reloadable object file. All subsequent reloadable object
# files will link in the last one created.
test -z "$concat_cmds" || concat_cmds=$concat_cmds~
eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\"
# Set up a command to remove the reloadale object files
# after they are used.
i=0
while test $i -lt $k
do
i=`expr $i + 1`
delfiles="$delfiles $output_objdir/$save_output-${i}.$objext"
done
$echo "creating a temporary reloadable object file: $output"
# Loop through the commands generated above and execute them.
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $concat_cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
libobjs=$output
# Restore the value of output.
output=$save_output
if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then
eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
fi
# Expand the library linking commands again to reset the
# value of $libobjs for piecewise linking.
# Do each of the archive commands.
if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
eval cmds=\"$archive_expsym_cmds\"
else
eval cmds=\"$archive_cmds\"
fi
# Append the command to remove the reloadable object files
# to the just-reset $cmds.
eval cmds=\"\$cmds~$rm $delfiles\"
fi
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
# Restore the uninstalled library and exit
if test "$mode" = relink && test "$hardcode_into_libs" = all; then
$run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $?
exit 0
fi
# Create links to the real library.
for linkname in $linknames; do
if test "$realname" != "$linkname"; then
$show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)"
$run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $?
fi
done
# If -module or -export-dynamic was specified, set the dlname.
if test "$module" = yes || test "$export_dynamic" = yes; then
# On all known operating systems, these are identical.
dlname="$soname"
fi
fi
;;
obj)
if test -n "$deplibs"; then
$echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2
fi
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2
fi
if test -n "$rpath"; then
$echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2
fi
if test -n "$xrpath"; then
$echo "$modename: warning: \`-R' is ignored for objects" 1>&2
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for objects" 1>&2
fi
case $output in
*.lo)
if test -n "$objs$old_deplibs"; then
$echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2
exit 1
fi
libobj="$output"
obj=`$echo "X$output" | $Xsed -e "$lo2o"`
;;
*)
libobj=
obj="$output"
;;
esac
# Delete the old objects.
$run $rm $obj $libobj
# Objects from convenience libraries. This assumes
# single-version convenience libraries. Whenever we create
# different ones for PIC/non-PIC, this we'll have to duplicate
# the extraction.
reload_conv_objs=
gentop=
# reload_cmds runs $LD directly, so let us get rid of
# -Wl from whole_archive_flag_spec
wl=
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\"
else
gentop="$output_objdir/${obj}x"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "$mkdir $gentop"
$run $mkdir "$gentop"
status=$?
if test $status -ne 0 && test ! -d "$gentop"; then
exit $status
fi
generated="$generated $gentop"
for xlib in $convenience; do
# Extract the objects.
case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
xdir="$gentop/$xlib"
$show "${rm}r $xdir"
$run ${rm}r "$xdir"
$show "$mkdir $xdir"
$run $mkdir "$xdir"
status=$?
if test $status -ne 0 && test ! -d "$xdir"; then
exit $status
fi
$show "(cd $xdir && $AR x $xabs)"
$run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
reload_conv_objs="$reload_objs "`find $xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP`
done
fi
fi
# Create the old-style object.
reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
output="$obj"
eval cmds=\"$reload_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
# Exit if we aren't doing a library object file.
if test -z "$libobj"; then
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
exit 0
fi
if test "$build_libtool_libs" != yes; then
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
# Create an invalid libtool object if no PIC, so that we don't
# accidentally link it into a program.
# $show "echo timestamp > $libobj"
# $run eval "echo timestamp > $libobj" || exit $?
exit 0
fi
if test -n "$pic_flag" || test "$pic_mode" != default; then
# Only do commands if we really have different PIC objects.
reload_objs="$libobjs $reload_conv_objs"
output="$libobj"
eval cmds=\"$reload_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
# else
# # Just create a symlink.
# $show $rm $libobj
# $run $rm $libobj
# xdir=`$echo "X$libobj" | $Xsed -e 's%/[^/]*$%%'`
# if test "X$xdir" = "X$libobj"; then
# xdir="."
# else
# xdir="$xdir"
# fi
# baseobj=`$echo "X$libobj" | $Xsed -e 's%^.*/%%'`
# oldobj=`$echo "X$baseobj" | $Xsed -e "$lo2o"`
# $show "(cd $xdir && $LN_S $oldobj $baseobj)"
# $run eval '(cd $xdir && $LN_S $oldobj $baseobj)' || exit $?
fi
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
exit 0
;;
prog)
case $host in
*cygwin*) output=`echo $output | sed -e 's,.exe$,,;s,$,.exe,'` ;;
esac
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for programs" 1>&2
fi
if test "$preload" = yes; then
if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown &&
test "$dlopen_self_static" = unknown; then
$echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support."
fi
fi
compile_command="$compile_command $compile_deplibs"
finalize_command="$finalize_command $finalize_deplibs"
if test -n "$rpath$xrpath"; then
# If the user specified any rpath flags, then add them.
for libdir in $rpath $xrpath; do
# This is the magic to use -rpath.
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
esac
done
fi
# Now hardcode the library paths
rpath=
hardcode_libdirs=
for libdir in $compile_rpath $finalize_rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
rpath="$rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
case :$dllsearchpath: in
*":$libdir:"*) ;;
*) dllsearchpath="$dllsearchpath:$libdir";;
esac
;;
esac
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval rpath=\" $hardcode_libdir_flag_spec\"
fi
compile_rpath="$rpath"
rpath=
hardcode_libdirs=
for libdir in $finalize_rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
rpath="$rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$finalize_perm_rpath " in
*" $libdir "*) ;;
*) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;;
esac
fi
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval rpath=\" $hardcode_libdir_flag_spec\"
fi
finalize_rpath="$rpath"
dlsyms=
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
if test -n "$NM" && test -n "$global_symbol_pipe"; then
dlsyms="${outputname}S.c"
else
$echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2
fi
fi
if test -n "$dlsyms"; then
case $dlsyms in
"") ;;
*.c)
# Discover the nlist of each of the dlfiles.
nlist="$output_objdir/${outputname}.nm"
$show "$rm $nlist ${nlist}S ${nlist}T"
$run $rm "$nlist" "${nlist}S" "${nlist}T"
# Parse the name list into a source file.
$show "creating $output_objdir/$dlsyms"
test -z "$run" && $echo > "$output_objdir/$dlsyms" "\
/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */
/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */
#ifdef __cplusplus
extern \"C\" {
#endif
/* Prevent the only kind of declaration conflicts we can make. */
#define lt_preloaded_symbols some_other_symbol
/* External symbol declarations for the compiler. */\
"
if test "$dlself" = yes; then
$show "generating symbol list for \`$output'"
test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist"
# Add our own program objects to the symbol list.
progfiles="$objs$old_deplibs"
for arg in $progfiles; do
$show "extracting global C symbols from \`$arg'"
$run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
done
if test -n "$exclude_expsyms"; then
$run eval 'egrep -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
$run eval '$mv "$nlist"T "$nlist"'
fi
if test -n "$export_symbols_regex"; then
$run eval 'egrep -e "$export_symbols_regex" "$nlist" > "$nlist"T'
$run eval '$mv "$nlist"T "$nlist"'
fi
# Prepare the list of exported symbols
if test -z "$export_symbols"; then
export_symbols="$output_objdir/$output.exp"
$run $rm $export_symbols
$run eval "sed -n -e '/^: @PROGRAM@$/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
else
$run eval "sed -e 's/\([][.*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$output.exp"'
$run eval 'grep -f "$output_objdir/$output.exp" < "$nlist" > "$nlist"T'
$run eval 'mv "$nlist"T "$nlist"'
fi
fi
for arg in $dlprefiles; do
$show "extracting global C symbols from \`$arg'"
name=`echo "$arg" | sed -e 's%^.*/%%'`
$run eval 'echo ": $name " >> "$nlist"'
$run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
done
if test -z "$run"; then
# Make sure we have at least an empty file.
test -f "$nlist" || : > "$nlist"
if test -n "$exclude_expsyms"; then
egrep -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
$mv "$nlist"T "$nlist"
fi
# Try sorting and uniquifying the output.
if grep -v "^: " < "$nlist" | sort +2 | uniq > "$nlist"S; then
:
else
grep -v "^: " < "$nlist" > "$nlist"S
fi
if test -f "$nlist"S; then
eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"'
else
echo '/* NONE */' >> "$output_objdir/$dlsyms"
fi
$echo >> "$output_objdir/$dlsyms" "\
#undef lt_preloaded_symbols
#if defined (__STDC__) && __STDC__
# define lt_ptr_t void *
#else
# define lt_ptr_t char *
# define const
#endif
/* The mapping between symbol names and symbols. */
const struct {
const char *name;
lt_ptr_t address;
}
lt_preloaded_symbols[] =
{\
"
sed -n -e 's/^: \([^ ]*\) $/ {\"\1\", (lt_ptr_t) 0},/p' \
-e 's/^. \([^ ]*\) \([^ ]*\)$/ {"\2", (lt_ptr_t) \&\2},/p' \
< "$nlist" >> "$output_objdir/$dlsyms"
$echo >> "$output_objdir/$dlsyms" "\
{0, (lt_ptr_t) 0}
};
/* This works around a problem in FreeBSD linker */
#ifdef FREEBSD_WORKAROUND
static const void *lt_preloaded_setup() {
return lt_preloaded_symbols;
}
#endif
#ifdef __cplusplus
}
#endif\
"
fi
pic_flag_for_symtable=
case $host in
# compiling the symbol table file with pic_flag works around
# a FreeBSD bug that causes programs to crash when -lm is
# linked before any other PIC object. But we must not use
# pic_flag when linking with -static. The problem exists in
# FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
*-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
case "$compile_command " in
*" -static "*) ;;
*) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND";;
esac;;
*-*-hpux*)
case "$compile_command " in
*" -static "*) ;;
*) pic_flag_for_symtable=" $pic_flag";;
esac
esac
# Now compile the dynamic symbol file.
$show "(cd $output_objdir && $LTCC -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")"
$run eval '(cd $output_objdir && $LTCC -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $?
# Clean up the generated files.
$show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T"
$run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T"
# Transform the symbol file into the correct name.
compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
;;
*)
$echo "$modename: unknown suffix for \`$dlsyms'" 1>&2
exit 1
;;
esac
else
# We keep going just in case the user didn't refer to
# lt_preloaded_symbols. The linker will fail if global_symbol_pipe
# really was required.
# Nullify the symbol file.
compile_command=`$echo "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"`
finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"`
fi
# AIX runtime linking requires linking programs with -Wl,-brtl and libs with -Wl,-G
# Also add -bnolibpath to the beginning of the link line, to clear the hardcoded runpath.
# Otherwise, things like the -L path to libgcc.a are accidentally hardcoded by ld.
# This does not apply on AIX for ia64, which uses a SysV linker.
case "$host" in
ia64-*-aix5*) ;;
*-*-aix4* | *-*-aix5*)
compile_command=`$echo "X$compile_command $wl-brtl" | $Xsed -e "s/\$CC/\$CC $wl-bnolibpath/1"`
finalize_command=`$echo "X$finalize_command $wl-brtl" | $Xsed -e "s/\$CC/\$CC $wl-bnolibpath/1"` ;;
esac
if test $need_relink = no || test "$build_libtool_libs" != yes; then
# Replace the output file specification.
compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
link_command="$compile_command$compile_rpath"
# We have no uninstalled library dependencies, so finalize right now.
$show "$link_command"
$run eval "$link_command"
status=$?
# Delete the generated files.
if test -n "$dlsyms"; then
$show "$rm $output_objdir/${outputname}S.${objext}"
$run $rm "$output_objdir/${outputname}S.${objext}"
fi
exit $status
fi
if test -n "$shlibpath_var"; then
# We should set the shlibpath_var
rpath=
for dir in $temp_rpath; do
case $dir in
[\\/]* | [A-Za-z]:[\\/]*)
# Absolute path.
rpath="$rpath$dir:"
;;
*)
# Relative path: add a thisdir entry.
rpath="$rpath\$thisdir/$dir:"
;;
esac
done
temp_rpath="$rpath"
fi
if test -n "$compile_shlibpath$finalize_shlibpath"; then
compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
fi
if test -n "$finalize_shlibpath"; then
finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
fi
compile_var=
finalize_var=
if test -n "$runpath_var"; then
if test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
rpath="$rpath$dir:"
done
compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
if test -n "$finalize_perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $finalize_perm_rpath; do
rpath="$rpath$dir:"
done
finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
fi
if test "$no_install" = yes; then
# We don't need to create a wrapper script.
link_command="$compile_var$compile_command$compile_rpath"
# Replace the output file specification.
link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
# Delete the old output file.
$run $rm $output
# Link the executable and exit
$show "$link_command"
$run eval "$link_command" || exit $?
exit 0
fi
if test "$hardcode_action" = relink; then
# Fast installation is not supported
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
$echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2
$echo "$modename: \`$output' will be relinked during installation" 1>&2
else
if test "$fast_install" != no; then
link_command="$finalize_var$compile_command$finalize_rpath"
if test "$fast_install" = yes; then
relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'`
else
# fast_install is set to needless
relink_command=
fi
else
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
fi
fi
# Replace the output file specification.
link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
# Delete the old output files.
$run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname
$show "$link_command"
$run eval "$link_command" || exit $?
# Now create the wrapper script.
$show "creating $output"
# Quote the relink command for shipping.
if test -n "$relink_command"; then
# Preserve any variables that may affect compiler behavior
for var in $variables_saved_for_relink; do
if eval test -z \"\${$var+set}\"; then
relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
elif eval var_value=\$$var; test -z "$var_value"; then
relink_command="$var=; export $var; $relink_command"
else
var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
relink_command="$var=\"$var_value\"; export $var; $relink_command"
fi
done
relink_command="cd `pwd`; $relink_command"
relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
fi
# Quote $echo for shipping.
if test "X$echo" = "X$SHELL $0 --fallback-echo"; then
case $0 in
[\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $0 --fallback-echo";;
*) qecho="$SHELL `pwd`/$0 --fallback-echo";;
esac
qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"`
else
qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"`
fi
# Only actually do things if our run command is non-null.
if test -z "$run"; then
# win32 will think the script is a binary if it has
# a .exe suffix, so we strip it off here.
case $output in
*.exe) output=`echo $output|sed 's,.exe$,,'` ;;
esac
# test for cygwin because mv fails w/o .exe extensions
case $host in
*cygwin*) exeext=.exe ;;
*) exeext= ;;
esac
$rm $output
trap "$rm $output; exit 1" 1 2 15
$echo > $output "\
#! $SHELL
# $output - temporary wrapper script for $objdir/$outputname
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# The $output program cannot be directly executed until all the libtool
# libraries that it depends on are installed.
#
# This wrapper script should never be moved out of the build directory.
# If it is, it will not operate correctly.
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
Xsed='sed -e 1s/^X//'
sed_quote_subst='$sed_quote_subst'
# The HP-UX ksh and POSIX shell print the target directory to stdout
# if CDPATH is set.
if test \"\${CDPATH+set}\" = set; then CDPATH=:; export CDPATH; fi
relink_command=\"$relink_command\"
# This environment variable determines our operation mode.
if test \"\$libtool_install_magic\" = \"$magic\"; then
# install mode needs the following variable:
uninst_deplibs='$uninst_deplibs'
else
# When we are sourced in execute mode, \$file and \$echo are already set.
if test \"\$libtool_execute_magic\" != \"$magic\"; then
echo=\"$qecho\"
file=\"\$0\"
# Make sure echo works.
if test \"X\$1\" = X--no-reexec; then
# Discard the --no-reexec flag, and continue.
shift
elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then
# Yippee, \$echo works!
:
else
# Restart under the correct shell, and then maybe \$echo will work.
exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"}
fi
fi\
"
$echo >> $output "\
# Find the directory that this script lives in.
thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\`
test \"x\$thisdir\" = \"x\$file\" && thisdir=.
# Follow symbolic links until we get to the real thisdir.
file=\`ls -ld \"\$file\" | sed -n 's/.*-> //p'\`
while test -n \"\$file\"; do
destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\`
# If there was a directory component, then change thisdir.
if test \"x\$destdir\" != \"x\$file\"; then
case \"\$destdir\" in
[\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
*) thisdir=\"\$thisdir/\$destdir\" ;;
esac
fi
file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\`
file=\`ls -ld \"\$thisdir/\$file\" | sed -n 's/.*-> //p'\`
done
# Try to get the absolute directory name.
absdir=\`cd \"\$thisdir\" && pwd\`
test -n \"\$absdir\" && thisdir=\"\$absdir\"
"
if test "$fast_install" = yes; then
echo >> $output "\
program=lt-'$outputname'$exeext
progdir=\"\$thisdir/$objdir\"
if test ! -f \"\$progdir/\$program\" || \\
{ file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | sed 1q\`; \\
test \"X\$file\" != \"X\$progdir/\$program\"; }; then
file=\"\$\$-\$program\"
if test ! -d \"\$progdir\"; then
$mkdir \"\$progdir\"
else
$rm \"\$progdir/\$file\"
fi"
echo >> $output "\
# relink executable if necessary
if test -n \"\$relink_command\"; then
if (eval \$relink_command); then :
else
$rm \"\$progdir/\$file\"
exit 1
fi
fi
$mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
{ $rm \"\$progdir/\$program\";
$mv \"\$progdir/\$file\" \"\$progdir/\$program\"; }
$rm \"\$progdir/\$file\"
fi"
else
echo >> $output "\
program='$outputname'
progdir=\"\$thisdir/$objdir\"
"
fi
echo >> $output "\
if test -f \"\$progdir/\$program\"; then"
# Export our shlibpath_var if we have one.
if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
$echo >> $output "\
# Add our own library path to $shlibpath_var
$shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
# Some systems cannot cope with colon-terminated $shlibpath_var
# The second colon is a workaround for a bug in BeOS R4 sed
$shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\`
export $shlibpath_var
"
fi
# fixup the dll searchpath if we need to.
if test -n "$dllsearchpath"; then
$echo >> $output "\
# Add the dll search path components to the executable PATH
PATH=$dllsearchpath:\$PATH
"
fi
$echo >> $output "\
if test \"\$libtool_execute_magic\" != \"$magic\"; then
# Run the actual program with our arguments.
"
case $host in
# win32 systems need to use the prog path for dll
# lookup to work
*-*-cygwin* | *-*-pw32*)
$echo >> $output "\
exec \$progdir/\$program \${1+\"\$@\"}
"
;;
# Backslashes separate directories on plain windows
*-*-mingw | *-*-os2*)
$echo >> $output "\
exec \$progdir\\\\\$program \${1+\"\$@\"}
"
;;
*)
$echo >> $output "\
# Export the path to the program.
PATH=\"\$progdir:\$PATH\"
export PATH
exec \$program \${1+\"\$@\"}
"
;;
esac
$echo >> $output "\
\$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\"
exit 1
fi
else
# The program doesn't exist.
\$echo \"\$0: error: \$progdir/\$program does not exist\" 1>&2
\$echo \"This script is just a wrapper for \$program.\" 1>&2
echo \"See the $PACKAGE documentation for more information.\" 1>&2
exit 1
fi
fi\
"
chmod +x $output
fi
exit 0
;;
esac
# See if we need to build an old-fashioned archive.
for oldlib in $oldlibs; do
if test "$build_libtool_libs" = convenience; then
oldobjs="$libobjs_save"
addlibs="$convenience"
build_libtool_libs=no
else
if test "$build_libtool_libs" = module; then
oldobjs="$libobjs_save"
build_libtool_libs=no
else
oldobjs="$objs$old_deplibs $non_pic_objects"
fi
addlibs="$old_convenience"
fi
if test -n "$addlibs"; then
gentop="$output_objdir/${outputname}x"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "$mkdir $gentop"
$run $mkdir "$gentop"
status=$?
if test $status -ne 0 && test ! -d "$gentop"; then
exit $status
fi
generated="$generated $gentop"
# Add in members from convenience archives.
for xlib in $addlibs; do
# Extract the objects.
case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
xdir="$gentop/$xlib"
$show "${rm}r $xdir"
$run ${rm}r "$xdir"
$show "$mkdir $xdir"
$run $mkdir "$xdir"
status=$?
if test $status -ne 0 && test ! -d "$xdir"; then
exit $status
fi
$show "(cd $xdir && $AR x $xabs)"
$run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
oldobjs="$oldobjs "`find $xdir -name \*.${objext} -print | $NL2SP`
done
fi
# Do each command in the archive commands.
if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
eval cmds=\"$old_archive_from_new_cmds\"
else
# # Ensure that we have .o objects in place in case we decided
# # not to build a shared library, and have fallen back to building
# # static libs even though --disable-static was passed!
# for oldobj in $oldobjs; do
# if test ! -f $oldobj; then
# xdir=`$echo "X$oldobj" | $Xsed -e 's%/[^/]*$%%'`
# if test "X$xdir" = "X$oldobj"; then
# xdir="."
# else
# xdir="$xdir"
# fi
# baseobj=`$echo "X$oldobj" | $Xsed -e 's%^.*/%%'`
# obj=`$echo "X$baseobj" | $Xsed -e "$o2lo"`
# $show "(cd $xdir && ${LN_S} $obj $baseobj)"
# $run eval '(cd $xdir && ${LN_S} $obj $baseobj)' || exit $?
# fi
# done
compiler_flags="$compiler_flags $add_flags"
eval cmds=\"$old_archive_cmds\"
if len=`expr "X$cmds" : ".*"` &&
test $len -le $max_cmd_len; then
:
else
# the command line is too long to link in one step, link in parts
$echo "using piecewise archive linking..."
save_RANLIB=$RANLIB
RANLIB=:
objlist=
concat_cmds=
save_oldobjs=$oldobjs
for obj in $save_oldobjs
do
oldobjs="$objlist $obj"
objlist="$objlist $obj"
eval test_cmds=\"$old_archive_cmds\"
if len=`expr "X$test_cmds" : ".*"` &&
test $len -le $max_cmd_len; then
:
else
# the above command should be used before it gets too long
oldobjs=$objlist
test -z "$concat_cmds" || concat_cmds=$concat_cmds~
eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\"
objlist=
fi
done
RANLIB=$save_RANLIB
oldobjs=$objlist
eval cmds=\"\$concat_cmds~$old_archive_cmds\"
fi
fi
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
done
if test -n "$generated"; then
$show "${rm}r$generated"
$run ${rm}r$generated
fi
# Now create the libtool archive.
case $output in
*.la)
old_library=
test "$build_old_libs" = yes && old_library="$libname.$libext"
$show "creating $output"
# Preserve any variables that may affect compiler behavior
for var in $variables_saved_for_relink; do
if eval test -z \"\${$var+set}\"; then
relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
elif eval var_value=\$$var; test -z "$var_value"; then
relink_command="$var=; export $var; $relink_command"
else
var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
relink_command="$var=\"$var_value\"; export $var; $relink_command"
fi
done
# Quote the link command for shipping.
relink_command="cd `pwd`; $SHELL $0 --mode=relink $libtool_args"
relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
# Only create the output if not a dry run.
if test -z "$run"; then
for installed in no yes; do
if test "$installed" = yes; then
if test -z "$install_libdir"; then
break
fi
output="$output_objdir/$outputname"i
# Replace all uninstalled libtool libraries with the installed ones
newdependency_libs=
for deplib in $dependency_libs; do
case $deplib in
*.la)
name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'`
eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
if test -z "$libdir"; then
$echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
exit 1
fi
newdependency_libs="$newdependency_libs $libdir/$name"
;;
*) newdependency_libs="$newdependency_libs $deplib" ;;
esac
done
dependency_libs="$newdependency_libs"
newdlfiles=
for lib in $dlfiles; do
name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
if test -z "$libdir"; then
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit 1
fi
newdlfiles="$newdlfiles $libdir/$name"
done
dlfiles="$newdlfiles"
newdlprefiles=
for lib in $dlprefiles; do
name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
if test -z "$libdir"; then
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit 1
fi
newdlprefiles="$newdlprefiles $libdir/$name"
done
dlprefiles="$newdlprefiles"
fi
$rm $output
# place dlname in correct position for cygwin
tdlname=$dlname
case $host,$output,$installed,$module,$dlname in
*cygwin*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;;
esac
$echo > $output "\
# $outputname - a libtool library file
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# The name that we can dlopen(3).
dlname='$tdlname'
# Names of this library.
library_names='$library_names'
# The name of the static archive.
old_library='$old_library'
# Libraries that this one depends upon.
dependency_libs='$dependency_libs'
# Version information for $libname.
current=$current
age=$age
revision=$revision
# Is this an already installed library?
installed=$installed
# Files to dlopen/dlpreopen
dlopen='$dlfiles'
dlpreopen='$dlprefiles'
# Directory that this library needs to be installed in:
libdir='$install_libdir'"
if test $hardcode_into_libs = all &&
test "$installed" = no && test $need_relink = yes; then
$echo >> $output "\
relink_command=\"$relink_command\""
fi
done
fi
# Do a symbolic link so that the libtool archive can be found in
# LD_LIBRARY_PATH before the program is installed.
$show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)"
$run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $?
;;
esac
exit 0
;;
# libtool install mode
install)
modename="$modename: install"
# There may be an optional sh(1) argument at the beginning of
# install_prog (especially on Windows NT).
if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
# Allow the use of GNU shtool's install command.
$echo "X$nonopt" | $Xsed | grep shtool > /dev/null; then
# Aesthetically quote it.
arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
esac
install_prog="$arg "
arg="$1"
shift
else
install_prog=
arg="$nonopt"
fi
# The real first argument should be the name of the installation program.
# Aesthetically quote it.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
esac
install_prog="$install_prog$arg"
# We need to accept at least all the BSD install flags.
dest=
files=
opts=
prev=
install_type=
isdir=no
stripme=
for arg
do
if test -n "$dest"; then
files="$files $dest"
dest="$arg"
continue
fi
case $arg in
-d) isdir=yes ;;
-f) prev="-f" ;;
-g) prev="-g" ;;
-m) prev="-m" ;;
-o) prev="-o" ;;
-s)
stripme=" -s"
continue
;;
-*) ;;
*)
# If the previous option needed an argument, then skip it.
if test -n "$prev"; then
prev=
else
dest="$arg"
continue
fi
;;
esac
# Aesthetically quote the argument.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
esac
install_prog="$install_prog $arg"
done
if test -z "$install_prog"; then
$echo "$modename: you must specify an install program" 1>&2
$echo "$help" 1>&2
exit 1
fi
if test -n "$prev"; then
$echo "$modename: the \`$prev' option requires an argument" 1>&2
$echo "$help" 1>&2
exit 1
fi
if test -z "$files"; then
if test -z "$dest"; then
$echo "$modename: no file or destination specified" 1>&2
else
$echo "$modename: you must specify a destination" 1>&2
fi
$echo "$help" 1>&2
exit 1
fi
# Strip any trailing slash from the destination.
dest=`$echo "X$dest" | $Xsed -e 's%/$%%'`
# Check to see that the destination is a directory.
test -d "$dest" && isdir=yes
if test "$isdir" = yes; then
destdir="$dest"
destname=
else
destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'`
test "X$destdir" = "X$dest" && destdir=.
destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'`
# Not a directory, so check to see that there is only one file specified.
set dummy $files
if test $# -gt 2; then
$echo "$modename: \`$dest' is not a directory" 1>&2
$echo "$help" 1>&2
exit 1
fi
fi
case $destdir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
for file in $files; do
case $file in
*.lo) ;;
*)
$echo "$modename: \`$destdir' must be an absolute directory name" 1>&2
$echo "$help" 1>&2
exit 1
;;
esac
done
;;
esac
# This variable tells wrapper scripts just to set variables rather
# than running their programs.
libtool_install_magic="$magic"
staticlibs=
future_libdirs=
current_libdirs=
for file in $files; do
# Do each installation.
case $file in
*.$libext)
# Do the static libraries later.
staticlibs="$staticlibs $file"
;;
*.la)
# Check to see that this really is a libtool archive.
if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$file' is not a valid libtool archive" 1>&2
$echo "$help" 1>&2
exit 1
fi
library_names=
old_library=
relink_command=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Add the libdir to current_libdirs if it is the destination.
if test "X$destdir" = "X$libdir"; then
case "$current_libdirs " in
*" $libdir "*) ;;
*) current_libdirs="$current_libdirs $libdir" ;;
esac
else
# Note the libdir as a future libdir.
case "$future_libdirs " in
*" $libdir "*) ;;
*) future_libdirs="$future_libdirs $libdir" ;;
esac
fi
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/
test "X$dir" = "X$file/" && dir=
dir="$dir$objdir"
if test "$hardcode_into_libs" = all && test -n "$relink_command"; then
$echo "$modename: warning: relinking \`$file'" 1>&2
$show "$relink_command"
if $run eval "$relink_command"; then :
else
$echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
continue
fi
fi
# See the names of the shared library.
set dummy $library_names
if test -n "$2"; then
realname="$2"
shift
shift
srcname="$realname"
test "$hardcode_into_libs" = all && test -n "$relink_command" && srcname="$realname"T
# Install the shared library and build the symlinks.
$show "$install_prog $dir/$srcname $destdir/$realname"
$run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $?
if test -n "$stripme" && test -n "$striplib"; then
$show "$striplib $destdir/$realname"
$run eval "$striplib $destdir/$realname" || exit $?
fi
if test $# -gt 0; then
# Delete the old symlinks, and create new ones.
for linkname
do
if test "$linkname" != "$realname"; then
$show "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
$run eval "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
fi
done
fi
# Do each command in the postinstall commands.
lib="$destdir/$realname"
eval cmds=\"$postinstall_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# Install the pseudo-library for information purposes.
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
instname="$dir/$name"i
$show "$install_prog $instname $destdir/$name"
$run eval "$install_prog $instname $destdir/$name" || exit $?
# Maybe install the static library, too.
test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library"
;;
*.lo)
# Install (i.e. copy) a libtool object.
# Figure out destination file name, if it wasn't already specified.
if test -n "$destname"; then
destfile="$destdir/$destname"
else
destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
destfile="$destdir/$destfile"
fi
# Deduce the name of the destination old-style object file.
case $destfile in
*.lo)
staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"`
;;
*.$objext)
staticdest="$destfile"
destfile=
;;
*)
$echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2
$echo "$help" 1>&2
exit 1
;;
esac
# Install the libtool object if requested.
if test -n "$destfile"; then
$show "$install_prog $file $destfile"
$run eval "$install_prog $file $destfile" || exit $?
fi
# Install the old object if enabled.
if test "$build_old_libs" = yes; then
# Deduce the name of the old-style object file.
staticobj=`$echo "X$file" | $Xsed -e "$lo2o"`
$show "$install_prog $staticobj $staticdest"
$run eval "$install_prog \$staticobj \$staticdest" || exit $?
fi
exit 0
;;
*)
# Figure out destination file name, if it wasn't already specified.
if test -n "$destname"; then
destfile="$destdir/$destname"
else
destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
destfile="$destdir/$destfile"
fi
# Do a test to see if this is really a libtool program.
if (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
uninst_deplibs=
relink_command=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Check the variables that should have been set.
if test -z "$uninst_deplibs"; then
$echo "$modename: invalid libtool wrapper script \`$file'" 1>&2
exit 1
fi
finalize=yes
for lib in $uninst_deplibs; do
# Check to see that each library is installed.
libdir=
if test -f "$lib"; then
# If there is no directory component, then add one.
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
fi
libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test
if test -n "$libdir" && test ! -f "$libfile"; then
$echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2
finalize=no
fi
done
relink_command=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
outputname=
if test "$fast_install" = no && test -n "$relink_command"; then
if test "$finalize" = yes && test -z "$run"; then
tmpdir="/tmp"
test -n "$TMPDIR" && tmpdir="$TMPDIR"
tmpdir="$tmpdir/libtool-$$"
if $mkdir -p "$tmpdir" && chmod 700 "$tmpdir"; then :
else
$echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2
continue
fi
file=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
outputname="$tmpdir/$file"
# Replace the output file specification.
relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'`
$show "$relink_command"
if $run eval "$relink_command"; then :
else
$echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
${rm}r "$tmpdir"
continue
fi
file="$outputname"
else
$echo "$modename: warning: cannot relink \`$file'" 1>&2
fi
else
# Install the binary that we compiled earlier.
file=`$echo "X$file" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"`
fi
fi
# remove .exe since cygwin /usr/bin/install will append another
# one anyways
case $install_prog,$host in
*/usr/bin/install*,*cygwin*)
case $file:$destfile in
*.exe:*.exe)
# this is ok
;;
*.exe:*)
destfile=$destfile.exe
;;
*:*.exe)
destfile=`echo $destfile | sed -e 's,.exe$,,'`
;;
esac
;;
esac
$show "$install_prog$stripme $file $destfile"
$run eval "$install_prog\$stripme \$file \$destfile" || exit $?
test -n "$outputname" && ${rm}r "$tmpdir"
;;
esac
done
for file in $staticlibs; do
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
# Set up the ranlib parameters.
oldlib="$destdir/$name"
$show "$install_prog $file $oldlib"
$run eval "$install_prog \$file \$oldlib" || exit $?
if test -n "$stripme" && test -n "$striplib"; then
$show "$old_striplib $oldlib"
$run eval "$old_striplib $oldlib" || exit $?
fi
# Do each command in the postinstall commands.
eval cmds=\"$old_postinstall_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
done
if test -n "$future_libdirs"; then
$echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2
fi
if test -n "$current_libdirs"; then
# Maybe just do a dry run.
test -n "$run" && current_libdirs=" -n$current_libdirs"
exec $SHELL $0 --finish$current_libdirs
exit 1
fi
exit 0
;;
# libtool finish mode
finish)
modename="$modename: finish"
libdirs="$nonopt"
admincmds=
if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
for dir
do
libdirs="$libdirs $dir"
done
for libdir in $libdirs; do
if test -n "$finish_cmds"; then
# Do each command in the finish commands.
eval cmds=\"$finish_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || admincmds="$admincmds
$cmd"
done
IFS="$save_ifs"
fi
if test -n "$finish_eval"; then
# Do the single finish_eval.
eval cmds=\"$finish_eval\"
$run eval "$cmds" || admincmds="$admincmds
$cmds"
fi
done
fi
# Exit here if they wanted silent mode.
exit 0
echo "----------------------------------------------------------------------"
echo "Libraries have been installed in:"
for libdir in $libdirs; do
echo " $libdir"
done
echo
echo "If you ever happen to want to link against installed libraries"
echo "in a given directory, LIBDIR, you must either use libtool, and"
echo "specify the full pathname of the library, or use \`-LLIBDIR'"
echo "flag during linking and do at least one of the following:"
if test -n "$shlibpath_var"; then
echo " - add LIBDIR to the \`$shlibpath_var' environment variable"
echo " during execution"
fi
if test -n "$runpath_var"; then
echo " - add LIBDIR to the \`$runpath_var' environment variable"
echo " during linking"
fi
if test -n "$hardcode_libdir_flag_spec"; then
libdir=LIBDIR
eval flag=\"$hardcode_libdir_flag_spec\"
echo " - use the \`$flag' linker flag"
fi
if test -n "$admincmds"; then
echo " - have your system administrator run these commands:$admincmds"
fi
if test -f /etc/ld.so.conf; then
echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
fi
echo
echo "See any operating system documentation about shared libraries for"
echo "more information, such as the ld(1) and ld.so(8) manual pages."
echo "----------------------------------------------------------------------"
exit 0
;;
# libtool execute mode
execute)
modename="$modename: execute"
# The first argument is the command name.
cmd="$nonopt"
if test -z "$cmd"; then
$echo "$modename: you must specify a COMMAND" 1>&2
$echo "$help"
exit 1
fi
# Handle -dlopen flags immediately.
for file in $execute_dlfiles; do
if test ! -f "$file"; then
$echo "$modename: \`$file' is not a file" 1>&2
$echo "$help" 1>&2
exit 1
fi
dir=
case $file in
*.la)
# Check to see that this really is a libtool archive.
if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
$echo "$help" 1>&2
exit 1
fi
# Read the libtool library.
dlname=
library_names=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Skip this library if it cannot be dlopened.
if test -z "$dlname"; then
# Warn if it was a shared library.
test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'"
continue
fi
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$file" && dir=.
if test -f "$dir/$objdir/$dlname"; then
dir="$dir/$objdir"
else
$echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2
exit 1
fi
;;
*.lo)
# Just add the directory containing the .lo file.
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$file" && dir=.
;;
*)
$echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2
continue
;;
esac
# Get the absolute pathname.
absdir=`cd "$dir" && pwd`
test -n "$absdir" && dir="$absdir"
# Now add the directory to shlibpath_var.
if eval "test -z \"\$$shlibpath_var\""; then
eval "$shlibpath_var=\"\$dir\""
else
eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
fi
done
# This variable tells wrapper scripts just to set shlibpath_var
# rather than running their programs.
libtool_execute_magic="$magic"
# Check if any of the arguments is a wrapper script.
args=
for file
do
case $file in
-*) ;;
*)
# Do a test to see if this is really a libtool program.
if (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Transform arg to wrapped name.
file="$progdir/$program"
fi
;;
esac
# Quote arguments (to preserve shell metacharacters).
file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"`
args="$args \"$file\""
done
if test -z "$run"; then
if test -n "$shlibpath_var"; then
# Export the shlibpath_var.
eval "export $shlibpath_var"
fi
# Restore saved enviroment variables
if test "${save_LC_ALL+set}" = set; then
LC_ALL="$save_LC_ALL"; export LC_ALL
fi
if test "${save_LANG+set}" = set; then
LANG="$save_LANG"; export LANG
fi
# Now actually exec the command.
eval "exec \$cmd$args"
$echo "$modename: cannot exec \$cmd$args"
exit 1
else
# Display what would be done.
if test -n "$shlibpath_var"; then
eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\""
$echo "export $shlibpath_var"
fi
$echo "$cmd$args"
exit 0
fi
;;
# libtool clean and uninstall mode
clean | uninstall)
modename="$modename: $mode"
rm="$nonopt"
files=
# This variable tells wrapper scripts just to set variables rather
# than running their programs.
libtool_install_magic="$magic"
for arg
do
case $arg in
-*) rm="$rm $arg" ;;
*) files="$files $arg" ;;
esac
done
if test -z "$rm"; then
$echo "$modename: you must specify an RM program" 1>&2
$echo "$help" 1>&2
exit 1
fi
rmdirs=
for file in $files; do
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
if test "X$dir" = "X$file"; then
dir=.
objdir="$objdir"
else
objdir="$dir/$objdir"
fi
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
test $mode = uninstall && objdir="$dir"
# Remember objdir for removal later, being careful to avoid duplicates
if test $mode = clean; then
case " $rmdirs " in
*" $objdir "*) ;;
*) rmdirs="$rmdirs $objdir" ;;
esac
fi
rmfiles="$file"
case $name in
*.la)
# Possibly a libtool archive, so verify it.
if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
. $dir/$name
# Delete the libtool libraries and symlinks.
for n in $library_names; do
rmfiles="$rmfiles $objdir/$n"
done
test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library"
test $mode = clean && rmfiles="$rmfiles $objdir/$name $objdir/${name}i"
if test $mode = uninstall; then
if test -n "$library_names"; then
# Do each command in the postuninstall commands.
eval cmds=\"$postuninstall_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd"
done
IFS="$save_ifs"
fi
if test -n "$old_library"; then
# Do each command in the old_postuninstall commands.
eval cmds=\"$old_postuninstall_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd"
done
IFS="$save_ifs"
fi
# FIXME: should reinstall the best remaining shared library.
fi
fi
;;
*.lo)
# Possibly a libtool object, so verify it.
if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
# Read the .lo file
. $dir/$name
# Add PIC object to the list of files to remove.
if test -n "$pic_object" \
&& test "$pic_object" != none; then
rmfiles="$rmfiles $dir/$pic_object"
fi
# Add non-PIC object to the list of files to remove.
if test -n "$non_pic_object" \
&& test "$non_pic_object" != none; then
rmfiles="$rmfiles $dir/$non_pic_object"
fi
fi
;;
*)
# Do a test to see if this is a libtool program.
if test $mode = clean &&
(sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
relink_command=
. $dir/$file
rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}"
if test "$fast_install" = yes && test -n "$relink_command"; then
rmfiles="$rmfiles $objdir/lt-$name"
fi
fi
;;
esac
$show "$rm $rmfiles"
$run $rm $rmfiles
done
# Try to remove the ${objdir}s in the directories where we deleted files
for dir in $rmdirs; do
if test -d "$dir"; then
$show "rmdir $dir"
$run rmdir $dir >/dev/null 2>&1
fi
done
exit 0
;;
"")
$echo "$modename: you must specify a MODE" 1>&2
$echo "$generic_help" 1>&2
exit 1
;;
esac
$echo "$modename: invalid operation mode \`$mode'" 1>&2
$echo "$generic_help" 1>&2
exit 1
fi # test -z "$show_help"
# We need to display help for each of the modes.
case $mode in
"") $echo \
"Usage: $modename [OPTION]... [MODE-ARG]...
Provide generalized library-building support services.
--config show all configuration variables
--debug enable verbose shell tracing
-n, --dry-run display commands without modifying any files
--features display basic configuration information and exit
--finish same as \`--mode=finish'
--help display this help message and exit
--mode=MODE use operation mode MODE [default=inferred from MODE-ARGS]
--quiet same as \`--silent'
--silent don't print informational messages
--tag=TAG use configuration variables from tag TAG
--version print version information
MODE must be one of the following:
clean remove files from the build directory
compile compile a source file into a libtool object
execute automatically set library path, then run a program
finish complete the installation of libtool libraries
install install libraries or executables
link create a library or an executable
uninstall remove libraries from an installed directory
MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for
a more detailed description of MODE."
exit 0
;;
clean)
$echo \
"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
Remove files from the build directory.
RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
to RM.
If FILE is a libtool library, object or program, all the files associated
with it are deleted. Otherwise, only FILE itself is deleted using RM."
;;
compile)
$echo \
"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
Compile a source file into a libtool library object.
This mode accepts the following additional options:
-o OUTPUT-FILE set the output file name to OUTPUT-FILE
-static always build a \`.o' file suitable for static linking
COMPILE-COMMAND is a command to be used in creating a \`standard' object file
from the given SOURCEFILE.
The output file name is determined by removing the directory component from
SOURCEFILE, then substituting the C source code suffix \`.c' with the
library object suffix, \`.lo'."
;;
execute)
$echo \
"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]...
Automatically set library path, then run a program.
This mode accepts the following additional options:
-dlopen FILE add the directory containing FILE to the library path
This mode sets the library path environment variable according to \`-dlopen'
flags.
If any of the ARGS are libtool executable wrappers, then they are translated
into their corresponding uninstalled binary, and any of their required library
directories are added to the library path.
Then, COMMAND is executed, with ARGS as arguments."
;;
finish)
$echo \
"Usage: $modename [OPTION]... --mode=finish [LIBDIR]...
Complete the installation of libtool libraries.
Each LIBDIR is a directory that contains libtool libraries.
The commands that this mode executes may require superuser privileges. Use
the \`--dry-run' option if you just want to see what would be executed."
;;
install)
$echo \
"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND...
Install executables or libraries.
INSTALL-COMMAND is the installation command. The first component should be
either the \`install' or \`cp' program.
The rest of the components are interpreted as arguments to that command (only
BSD-compatible install options are recognized)."
;;
link)
$echo \
"Usage: $modename [OPTION]... --mode=link LINK-COMMAND...
Link object files or libraries together to form another library, or to
create an executable program.
LINK-COMMAND is a command using the C compiler that you would use to create
a program from several object files.
The following components of LINK-COMMAND are treated specially:
-all-static do not do any dynamic linking at all
-avoid-version do not add a version suffix if possible
-dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime
-dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols
-export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
-export-symbols SYMFILE
try to export only the symbols listed in SYMFILE
-export-symbols-regex REGEX
try to export only the symbols matching REGEX
-LLIBDIR search LIBDIR for required installed libraries
-lNAME OUTPUT-FILE requires the installed library libNAME
-module build a library that can dlopened
-no-fast-install disable the fast-install mode
-no-install link a not-installable executable
-no-undefined declare that a library does not refer to external symbols
-o OUTPUT-FILE create OUTPUT-FILE from the specified objects
-objectlist FILE Use a list of object files found in FILE to specify objects
-release RELEASE specify package release information
-rpath LIBDIR the created library will eventually be installed in LIBDIR
-R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries
-static do not do any dynamic linking of libtool libraries
-version-info CURRENT[:REVISION[:AGE]]
specify library version info [each variable defaults to 0]
All other options (arguments beginning with \`-') are ignored.
Every other argument is treated as a filename. Files ending in \`.la' are
treated as uninstalled libtool libraries, other files are standard or library
object files.
If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
only library objects (\`.lo' files) may be specified, and \`-rpath' is
required, except when creating a convenience library.
If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
using \`ar' and \`ranlib', or on Windows using \`lib'.
If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
is created, otherwise an executable program is created."
;;
uninstall)
$echo \
"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
Remove libraries from an installation directory.
RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
to RM.
If FILE is a libtool library, all the files associated with it are deleted.
Otherwise, only FILE itself is deleted using RM."
;;
*)
$echo "$modename: invalid operation mode \`$mode'" 1>&2
$echo "$help" 1>&2
exit 1
;;
esac
echo
$echo "Try \`$modename --help' for more information about other modes."
exit 0
# The TAGs below are defined such that we never get into a situation
# in which we disable both kinds of libraries. Given conflicting
# choices, we go for a static library, that is the most portable,
# since we can't tell whether shared libraries were disabled because
# the user asked for that or because the platform doesn't support
# them. This is particularly important on AIX, because we don't
# support having both static and shared libraries enabled at the same
# time on that platform, so we default to a shared-only configuration.
# If a disable-shared tag is given, we'll fallback to a static-only
# configuration. But we'll never go from static-only to shared-only.
### BEGIN LIBTOOL TAG CONFIG: disable-shared
build_libtool_libs=no
build_old_libs=yes
### END LIBTOOL TAG CONFIG: disable-shared
### BEGIN LIBTOOL TAG CONFIG: disable-static
build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac`
### END LIBTOOL TAG CONFIG: disable-static
# Local Variables:
# mode:shell-script
# sh-indentation:2
# End:
|
doxxx/katy
|
admin/old-ltmain.sh
|
Shell
|
gpl-2.0
| 153,798 |
#!/bin/bash
mkdir -p "$PREFIX/bin"
export MACHTYPE=x86_64
export BINDIR=$(pwd)/bin
export L="${LDFLAGS}"
mkdir -p "$BINDIR"
(cd kent/src/lib && make)
(cd kent/src/htslib && make)
(cd kent/src/jkOwnLib && make)
(cd kent/src/hg/lib && make)
(cd kent/src/hg/pslPairs && make)
cp bin/pslPairs "$PREFIX/bin"
chmod +x "$PREFIX/bin/pslPairs"
|
ostrokach/bioconda-recipes
|
recipes/ucsc-pslpairs/build.sh
|
Shell
|
mit
| 335 |
#!/bin/sh
#
# Copyright (c) 2018 Jiang Xin
#
test_description='Test git pack-redundant
In order to test git-pack-redundant, we will create a number of objects and
packs in the repository `main.git`. The relationship between packs (P1-P8)
and objects (T, A-R) is showed in the following chart. Objects of a pack will
be marked with letter x, while objects of redundant packs will be marked with
exclamation point, and redundant pack itself will be marked with asterisk.
| T A B C D E F G H I J K L M N O P Q R
----+--------------------------------------
P1 | x x x x x x x x
P2* | ! ! ! ! ! ! !
P3 | x x x x x x
P4* | ! ! ! ! !
P5 | x x x x
P6* | ! ! !
P7 | x x
P8* | !
----+--------------------------------------
ALL | x x x x x x x x x x x x x x x x x x x
Another repository `shared.git` has unique objects (X-Z), while other objects
(marked with letter s) are shared through alt-odb (of `main.git`). The
relationship between packs and objects is as follows:
| T A B C D E F G H I J K L M N O P Q R X Y Z
----+----------------------------------------------
Px1 | s s s x x x
Px2 | s s s x x x
'
. ./test-lib.sh
main_repo=main.git
shared_repo=shared.git
git_pack_redundant='git pack-redundant --i-still-use-this'
# Create commits in <repo> and assign each commit's oid to shell variables
# given in the arguments (A, B, and C). E.g.:
#
# create_commits_in <repo> A B C
#
# NOTE: Avoid calling this function from a subshell since variable
# assignments will disappear when subshell exits.
create_commits_in () {
repo="$1" &&
if ! parent=$(git -C "$repo" rev-parse HEAD^{} 2>/dev/null)
then
parent=
fi &&
T=$(git -C "$repo" write-tree) &&
shift &&
while test $# -gt 0
do
name=$1 &&
test_tick &&
if test -z "$parent"
then
oid=$(echo $name | git -C "$repo" commit-tree $T)
else
oid=$(echo $name | git -C "$repo" commit-tree -p $parent $T)
fi &&
eval $name=$oid &&
parent=$oid &&
shift ||
return 1
done &&
git -C "$repo" update-ref refs/heads/main $oid
}
# Create pack in <repo> and assign pack id to variable given in the 2nd argument
# (<name>). Commits in the pack will be read from stdin. E.g.:
#
# create_pack_in <repo> <name> <<-EOF
# ...
# EOF
#
# NOTE: commits from stdin should be given using heredoc, not using pipe, and
# avoid calling this function from a subshell since variable assignments will
# disappear when subshell exits.
create_pack_in () {
repo="$1" &&
name="$2" &&
pack=$(git -C "$repo/objects/pack" pack-objects -q pack) &&
eval $name=$pack &&
eval P$pack=$name:$pack
}
format_packfiles () {
sed \
-e "s#.*/pack-\(.*\)\.idx#\1#" \
-e "s#.*/pack-\(.*\)\.pack#\1#" |
sort -u |
while read p
do
if test -z "$(eval echo \${P$p})"
then
echo $p
else
eval echo "\${P$p}"
fi
done |
sort
}
test_expect_success 'setup main repo' '
git init --bare "$main_repo" &&
create_commits_in "$main_repo" A B C D E F G H I J K L M N O P Q R
'
test_expect_success 'main: pack-redundant works with no packfile' '
(
cd "$main_repo" &&
cat >expect <<-EOF &&
fatal: Zero packs found!
EOF
test_must_fail $git_pack_redundant --all >actual 2>&1 &&
test_cmp expect actual
)
'
#############################################################################
# Chart of packs and objects for this test case
#
# | T A B C D E F G H I J K L M N O P Q R
# ----+--------------------------------------
# P1 | x x x x x x x x
# ----+--------------------------------------
# ALL | x x x x x x x x
#
#############################################################################
test_expect_success 'main: pack-redundant works with one packfile' '
create_pack_in "$main_repo" P1 <<-EOF &&
$T
$A
$B
$C
$D
$E
$F
$R
EOF
(
cd "$main_repo" &&
$git_pack_redundant --all >out &&
test_must_be_empty out
)
'
#############################################################################
# Chart of packs and objects for this test case
#
# | T A B C D E F G H I J K L M N O P Q R
# ----+--------------------------------------
# P1 | x x x x x x x x
# P2 | x x x x x x x
# P3 | x x x x x x
# ----+--------------------------------------
# ALL | x x x x x x x x x x x x x x x
#
#############################################################################
test_expect_success 'main: no redundant for pack 1, 2, 3' '
create_pack_in "$main_repo" P2 <<-EOF &&
$B
$C
$D
$E
$G
$H
$I
EOF
create_pack_in "$main_repo" P3 <<-EOF &&
$F
$I
$J
$K
$L
$M
EOF
(
cd "$main_repo" &&
$git_pack_redundant --all >out &&
test_must_be_empty out
)
'
#############################################################################
# Chart of packs and objects for this test case
#
# | T A B C D E F G H I J K L M N O P Q R
# ----+--------------------------------------
# P1 | x x x x x x x x
# P2 | x x x x x x x
# P3* | ! ! ! ! ! !
# P4 | x x x x x
# P5 | x x x x
# ----+--------------------------------------
# ALL | x x x x x x x x x x x x x x x x x x
#
#############################################################################
test_expect_success 'main: one of pack-2/pack-3 is redundant' '
create_pack_in "$main_repo" P4 <<-EOF &&
$J
$K
$L
$M
$P
EOF
create_pack_in "$main_repo" P5 <<-EOF &&
$G
$H
$N
$O
EOF
(
cd "$main_repo" &&
cat >expect <<-EOF &&
P3:$P3
EOF
$git_pack_redundant --all >out &&
format_packfiles <out >actual &&
test_cmp expect actual
)
'
#############################################################################
# Chart of packs and objects for this test case
#
# | T A B C D E F G H I J K L M N O P Q R
# ----+--------------------------------------
# P1 | x x x x x x x x
# P2* | ! ! ! ! ! ! !
# P3 | x x x x x x
# P4* | ! ! ! ! !
# P5 | x x x x
# P6* | ! ! !
# P7 | x x
# ----+--------------------------------------
# ALL | x x x x x x x x x x x x x x x x x x x
#
#############################################################################
test_expect_success 'main: pack 2, 4, and 6 are redundant' '
create_pack_in "$main_repo" P6 <<-EOF &&
$N
$O
$Q
EOF
create_pack_in "$main_repo" P7 <<-EOF &&
$P
$Q
EOF
(
cd "$main_repo" &&
cat >expect <<-EOF &&
P2:$P2
P4:$P4
P6:$P6
EOF
$git_pack_redundant --all >out &&
format_packfiles <out >actual &&
test_cmp expect actual
)
'
#############################################################################
# Chart of packs and objects for this test case
#
# | T A B C D E F G H I J K L M N O P Q R
# ----+--------------------------------------
# P1 | x x x x x x x x
# P2* | ! ! ! ! ! ! !
# P3 | x x x x x x
# P4* | ! ! ! ! !
# P5 | x x x x
# P6* | ! ! !
# P7 | x x
# P8* | !
# ----+--------------------------------------
# ALL | x x x x x x x x x x x x x x x x x x x
#
#############################################################################
test_expect_success 'main: pack-8 (subset of pack-1) is also redundant' '
create_pack_in "$main_repo" P8 <<-EOF &&
$A
EOF
(
cd "$main_repo" &&
cat >expect <<-EOF &&
P2:$P2
P4:$P4
P6:$P6
P8:$P8
EOF
$git_pack_redundant --all >out &&
format_packfiles <out >actual &&
test_cmp expect actual
)
'
test_expect_success 'main: clean loose objects' '
(
cd "$main_repo" &&
git prune-packed &&
find objects -type f | sed -e "/objects\/pack\//d" >out &&
test_must_be_empty out
)
'
test_expect_success 'main: remove redundant packs and pass fsck' '
(
cd "$main_repo" &&
$git_pack_redundant --all | xargs rm &&
git fsck &&
$git_pack_redundant --all >out &&
test_must_be_empty out
)
'
# The following test cases will execute inside `shared.git`, instead of
# inside `main.git`.
test_expect_success 'setup shared.git' '
git clone --mirror "$main_repo" "$shared_repo" &&
(
cd "$shared_repo" &&
printf "../../$main_repo/objects\n" >objects/info/alternates
)
'
test_expect_success 'shared: all packs are redundant, but no output without --alt-odb' '
(
cd "$shared_repo" &&
$git_pack_redundant --all >out &&
test_must_be_empty out
)
'
#############################################################################
# Chart of packs and objects for this test case
#
# ================= main.git ================
# | T A B C D E F G H I J K L M N O P Q R <----------+
# ----+-------------------------------------- |
# P1 | x x x x x x x x |
# P3 | x x x x x x |
# P5 | x x x x |
# P7 | x x |
# ----+-------------------------------------- |
# ALL | x x x x x x x x x x x x x x x x x x x |
# |
# |
# ================ shared.git =============== |
# | T A B C D E F G H I J K L M N O P Q R <objects/info/alternates>
# ----+--------------------------------------
# P1* | s s s s s s s s
# P3* | s s s s s s
# P5* | s s s s
# P7* | s s
# ----+--------------------------------------
# ALL | x x x x x x x x x x x x x x x x x x x
#
#############################################################################
test_expect_success 'shared: show redundant packs in stderr for verbose mode' '
(
cd "$shared_repo" &&
cat >expect <<-EOF &&
P1:$P1
P3:$P3
P5:$P5
P7:$P7
EOF
$git_pack_redundant --all --verbose >out 2>out.err &&
test_must_be_empty out &&
grep "pack$" out.err | format_packfiles >actual &&
test_cmp expect actual
)
'
test_expect_success 'shared: remove redundant packs, no packs left' '
(
cd "$shared_repo" &&
cat >expect <<-EOF &&
fatal: Zero packs found!
EOF
$git_pack_redundant --all --alt-odb | xargs rm &&
git fsck &&
test_must_fail $git_pack_redundant --all --alt-odb >actual 2>&1 &&
test_cmp expect actual
)
'
test_expect_success 'shared: create new objects and packs' '
create_commits_in "$shared_repo" X Y Z &&
create_pack_in "$shared_repo" Px1 <<-EOF &&
$X
$Y
$Z
$A
$B
$C
EOF
create_pack_in "$shared_repo" Px2 <<-EOF
$X
$Y
$Z
$D
$E
$F
EOF
'
test_expect_success 'shared: no redundant without --alt-odb' '
(
cd "$shared_repo" &&
$git_pack_redundant --all >out &&
test_must_be_empty out
)
'
#############################################################################
# Chart of packs and objects for this test case
#
# ================= main.git ================
# | T A B C D E F G H I J K L M N O P Q R <----------------+
# ----+-------------------------------------- |
# P1 | x x x x x x x x |
# P3 | x x x x x x |
# P5 | x x x x |
# P7 | x x |
# ----+-------------------------------------- |
# ALL | x x x x x x x x x x x x x x x x x x x |
# |
# |
# ================ shared.git ======================= |
# | T A B C D E F G H I J K L M N O P Q R X Y Z <objects/info/alternates>
# ----+----------------------------------------------
# Px1 | s s s x x x
# Px2*| s s s ! ! !
# ----+----------------------------------------------
# ALL | s s s s s s s s s s s s s s s s s s s x x x
#
#############################################################################
test_expect_success 'shared: one pack is redundant with --alt-odb' '
(
cd "$shared_repo" &&
$git_pack_redundant --all --alt-odb >out &&
format_packfiles <out >actual &&
test_line_count = 1 actual
)
'
#############################################################################
# Chart of packs and objects for this test case
#
# ================= main.git ================
# | T A B C D E F G H I J K L M N O P Q R <----------------+
# ----+-------------------------------------- |
# P1 | x x x x x x x x |
# P3 | x x x x x x |
# P5 | x x x x |
# P7 | x x |
# ----+-------------------------------------- |
# ALL | x x x x x x x x x x x x x x x x x x x |
# |
# |
# ================ shared.git ======================= |
# | T A B C D E F G H I J K L M N O P Q R X Y Z <objects/info/alternates>
# ----+----------------------------------------------
# Px1*| s s s i i i
# Px2*| s s s i i i
# ----+----------------------------------------------
# ALL | s s s s s s s s s s s s s s s s s s s i i i
# (ignored objects, marked with i)
#
#############################################################################
test_expect_success 'shared: ignore unique objects and all two packs are redundant' '
(
cd "$shared_repo" &&
cat >expect <<-EOF &&
Px1:$Px1
Px2:$Px2
EOF
$git_pack_redundant --all --alt-odb >out <<-EOF &&
$X
$Y
$Z
EOF
format_packfiles <out >actual &&
test_cmp expect actual
)
'
test_done
|
abg1979/git
|
t/t5323-pack-redundant.sh
|
Shell
|
gpl-2.0
| 14,899 |
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script tests the Hadoop cloud scripts by running through a minimal
# sequence of steps to start a persistent (EBS) cluster, run a job, then
# shutdown the cluster.
#
# Example usage:
# HADOOP_HOME=~/dev/hadoop-0.20.1/ ./persistent-cluster.sh
#
function wait_for_volume_detachment() {
set +e
set +x
while true; do
attached=`$HADOOP_CLOUD_SCRIPT list-storage --config-dir=$CONFIG_DIR \
$CLUSTER | awk '{print $6}' | grep 'attached'`
sleep 5
if [ -z "$attached" ]; then
break
fi
done
set -e
set -x
}
set -e
set -x
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
WORKSPACE=${WORKSPACE:-`pwd`}
CONFIG_DIR=${CONFIG_DIR:-$WORKSPACE/.hadoop-cloud}
CLUSTER=${CLUSTER:-hadoop-cloud-ebs-$USER-test-cluster}
IMAGE_ID=${IMAGE_ID:-ami-6159bf08} # default to Fedora 32-bit AMI
AVAILABILITY_ZONE=${AVAILABILITY_ZONE:-us-east-1c}
KEY_NAME=${KEY_NAME:-$USER}
AUTO_SHUTDOWN=${AUTO_SHUTDOWN:-15}
LOCAL_HADOOP_VERSION=${LOCAL_HADOOP_VERSION:-0.20.1}
HADOOP_HOME=${HADOOP_HOME:-$WORKSPACE/hadoop-$LOCAL_HADOOP_VERSION}
HADOOP_CLOUD_HOME=${HADOOP_CLOUD_HOME:-$bin/../py}
HADOOP_CLOUD_PROVIDER=${HADOOP_CLOUD_PROVIDER:-ec2}
SSH_OPTIONS=${SSH_OPTIONS:-"-i ~/.$HADOOP_CLOUD_PROVIDER/id_rsa-$KEY_NAME \
-o StrictHostKeyChecking=no"}
HADOOP_CLOUD_SCRIPT=$HADOOP_CLOUD_HOME/hadoop-$HADOOP_CLOUD_PROVIDER
export HADOOP_CONF_DIR=$CONFIG_DIR/$CLUSTER
# Install Hadoop locally
if [ ! -d $HADOOP_HOME ]; then
wget http://archive.apache.org/dist/hadoop/core/hadoop-\
$LOCAL_HADOOP_VERSION/hadoop-$LOCAL_HADOOP_VERSION.tar.gz
tar zxf hadoop-$LOCAL_HADOOP_VERSION.tar.gz -C $WORKSPACE
rm hadoop-$LOCAL_HADOOP_VERSION.tar.gz
fi
# Create storage
$HADOOP_CLOUD_SCRIPT create-storage --config-dir=$CONFIG_DIR \
--availability-zone=$AVAILABILITY_ZONE $CLUSTER nn 1 \
$bin/ebs-storage-spec.json
$HADOOP_CLOUD_SCRIPT create-storage --config-dir=$CONFIG_DIR \
--availability-zone=$AVAILABILITY_ZONE $CLUSTER dn 1 \
$bin/ebs-storage-spec.json
# Launch a cluster
$HADOOP_CLOUD_SCRIPT launch-cluster --config-dir=$CONFIG_DIR \
--image-id=$IMAGE_ID --key-name=$KEY_NAME --auto-shutdown=$AUTO_SHUTDOWN \
--availability-zone=$AVAILABILITY_ZONE $CLIENT_CIDRS $ENVS $CLUSTER 1
# Run a proxy and save its pid in HADOOP_CLOUD_PROXY_PID
eval `$HADOOP_CLOUD_SCRIPT proxy --config-dir=$CONFIG_DIR \
--ssh-options="$SSH_OPTIONS" $CLUSTER`
# Run a job and check it works
$HADOOP_HOME/bin/hadoop fs -mkdir input
$HADOOP_HOME/bin/hadoop fs -put $HADOOP_HOME/LICENSE.txt input
$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-*-examples.jar grep \
input output Apache
# following returns a non-zero exit code if no match
$HADOOP_HOME/bin/hadoop fs -cat 'output/part-00000' | grep Apache
# Shutdown the cluster
kill $HADOOP_CLOUD_PROXY_PID
$HADOOP_CLOUD_SCRIPT terminate-cluster --config-dir=$CONFIG_DIR --force $CLUSTER
sleep 5 # wait for termination to take effect
# Relaunch the cluster
$HADOOP_CLOUD_SCRIPT launch-cluster --config-dir=$CONFIG_DIR \
--image-id=$IMAGE_ID --key-name=$KEY_NAME --auto-shutdown=$AUTO_SHUTDOWN \
--availability-zone=$AVAILABILITY_ZONE $CLIENT_CIDRS $ENVS $CLUSTER 1
# Run a proxy and save its pid in HADOOP_CLOUD_PROXY_PID
eval `$HADOOP_CLOUD_SCRIPT proxy --config-dir=$CONFIG_DIR \
--ssh-options="$SSH_OPTIONS" $CLUSTER`
# Check output is still there
$HADOOP_HOME/bin/hadoop fs -cat 'output/part-00000' | grep Apache
# Shutdown the cluster
kill $HADOOP_CLOUD_PROXY_PID
$HADOOP_CLOUD_SCRIPT terminate-cluster --config-dir=$CONFIG_DIR --force $CLUSTER
sleep 5 # wait for termination to take effect
# Cleanup
$HADOOP_CLOUD_SCRIPT delete-cluster --config-dir=$CONFIG_DIR $CLUSTER
wait_for_volume_detachment
$HADOOP_CLOUD_SCRIPT delete-storage --config-dir=$CONFIG_DIR --force $CLUSTER
|
ZhangXFeng/hadoop
|
src/hadoop-mapreduce1-project/src/contrib/cloud/src/integration-test/persistent-cluster.sh
|
Shell
|
apache-2.0
| 4,539 |
#!/bin/sh
#
# Copyright (C) 2011-2012 OpenWrt.org
#
_ubootenv_add_uci_config() {
local cfgtype=$1
local dev=$2
local offset=$3
local envsize=$4
local secsize=$5
local numsec=$6
uci batch <<EOF
add ubootenv $cfgtype
set ubootenv.@$cfgtype[-1].dev='$dev'
set ubootenv.@$cfgtype[-1].offset='$offset'
set ubootenv.@$cfgtype[-1].envsize='$envsize'
set ubootenv.@$cfgtype[-1].secsize='$secsize'
set ubootenv.@$cfgtype[-1].numsec='$numsec'
EOF
uci commit ubootenv
}
ubootenv_add_uci_config() {
_ubootenv_add_uci_config "ubootenv" "$@"
}
ubootenv_add_uci_sys_config() {
_ubootenv_add_uci_config "ubootsys" "$@"
}
ubootenv_add_app_config() {
local cfgtype
local dev
local offset
local envsize
local secsize
local numsec
config_get cfgtype "$1" TYPE
config_get dev "$1" dev
config_get offset "$1" offset
config_get envsize "$1" envsize
config_get secsize "$1" secsize
config_get numsec "$1" numsec
grep -q "^[[:space:]]*${dev}[[:space:]]*${offset}" "/etc/fw_${cfgtype#uboot}.config" || echo "$dev $offset $envsize $secsize $numsec" >>"/etc/fw_${cfgtype#uboot}.config"
}
|
the2masters/openwrt
|
package/boot/uboot-envtools/files/uboot-envtools.sh
|
Shell
|
gpl-2.0
| 1,087 |
#!/bin/sh
################################################################################
## ##
## Copyright (c) International Business Machines Corp., 2001,2005 ##
## ##
## This program is free software; you can redistribute it and#or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation; either version 2 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ##
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ##
## for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program; if not, write to the Free Software ##
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ##
## ##
################################################################################
# File: runltplite
#
# Description: This script can be used to run a subset the tests in the LTP test suite
# This script is typically used as a quick test to check an install base.
#
# Authors: Manoj Iyer - [email protected]
# Robbie Williamson - [email protected]
# Marty Ridgeway - [email protected]
#
# History: Created runltplite script to run a subset of the LTP testsuite
#
#
#
#
#
#
#
#
. "$(dirname $0)/runltp"
setup()
{
cd `dirname $0` || \
{
echo "FATAL: unable to change directory to $(dirname $0)"
exit 1
}
export LTPROOT=${PWD}
export TMPBASE="/tmp"
export TMP="${TMPBASE}/ltp-$$"
export PATH="${PATH}:${LTPROOT}/testcases/bin"
export LTP_DEV=""
export LTP_DEV_FS_TYPE="ext2"
[ -d $LTPROOT/testcases/bin ] ||
{
echo "FATAL: LTP not installed correctly"
echo "INFO: Follow directions in INSTALL!"
exit 1
}
[ -e $LTPROOT/bin/ltp-pan ] ||
{
echo "FATAL: Test suite driver 'ltp-pan' not found"
echo "INFO: Follow directions in INSTALL!"
exit 1
}
}
usage()
{
cat <<-EOF >&2
usage: ${0##*/} -c [-d TMPDIR] [-i # (in Mb)]
[ -l LOGFILE ] [ -o OUTPUTFILE ] [ -m # (in Mb)] -N -q
[ -r LTPROOT ] -v
-c NUM_PROCS Run LTP under additional background CPU load.
-d TMPDIR Directory where temporary files will be created.
-h Help. Prints all available options.
-i # (in Mb) Run LTP with a _min_ IO load of # Mb in background.
-l LOGFILE Log results of test in a logfile.
-m # (in Mb) Run LTP with a _min_ memory load of # Mb in background.
-N Run all the networking tests.
-o OUTPUTFILE Redirect test output to a file.
-p Human readable format logfiles.
-q Print less verbose output to screen.
-r LTPROOT Fully qualified path where testsuite is installed.
-b DEVICE Some tests require an unmounted block device to run
correctly.
-B LTP_DEV_FS_TYPE The file system of test block devices.
example: ${0##*/} -i 1024 -m 128 -p -q -l /tmp/resultlog.$$ -d ${PWD}
EOF
exit 0
}
main()
{
local CMDFILE="ltplite"
local PRETTY_PRT=""
local ALT_DIR=0
local RUN_NETEST=0
local QUIET_MODE=""
local VERBOSE_MODE=""
local NETPIPE=0
local GENLOAD=0
local MEMSIZE=0
local DURATION=""
local BYTESIZE=0
local LOGFILE=""
local PRETTY_PRT=""
local TAG_RESTRICT_STRING=""
local PAN_COMMAND=""
local scenfile=""
while getopts c:d:hi:l:m:No:pqr:b:B: arg
do case $arg in
c)
NUM_PROCS=$(($OPTARG))
$LTPROOT/testcases/bin/genload --cpu $NUM_PROCS >/dev/null 2>&1 &
GENLOAD=1 ;;
d) # append $$ to TMP, as it is recursively
# removed at end of script.
TMPBASE=$OPTARG
TMP="${TMPBASE}/ltp-$$"
export TMPDIR="$TMP";;
h) usage;;
i)
BYTESIZE=$(($OPTARG * 1024 * 1024))
$LTPROOT/testcases/bin/genload --io 1 >/dev/null 2>&1 &
$LTPROOT/testcases/bin/genload --hdd 0 --hdd-bytes $BYTESIZE \
>/dev/null 2>&1 &
GENLOAD=1 ;;
l)
[ ! -d $LTPROOT/results ] && \
{
echo "INFO: creating $LTPROOT/results directory"
mkdir -p $LTPROOT/results || \
{
echo "ERROR: failed to create $LTPROOT/results"
exit 1
}
}
case $OPTARG in
/*)
LOGFILE="-l $OPTARG" ;;
*)
LOGFILE="-l $LTPROOT/results/$OPTARG"
ALT_DIR=1 ;;
esac ;;
m)
MEMSIZE=$(($OPTARG * 1024 * 1024))
$LTPROOT/testcases/bin/genload --vm 0 --vm-bytes $MEMSIZE \
>/dev/null 2>&1 &
GENLOAD=1;;
N) RUN_NETEST=1;;
o) OUTPUTFILE="-o $OPTARG" ;;
p) PRETTY_PRT=" -p ";;
q) QUIET_MODE=" -q ";;
r) LTPROOT=$OPTARG;;
b) DEVICE=$OPTARG;;
B) LTP_DEV_FS_TYPE=$OPTARG;;
\?) usage;;
esac
done
mkdir -p $TMP || \
{
echo "FATAL: Unable to make temporary directory $TMP"
exit 1
}
cd $TMP || \
{
echo "could not cd ${TMP} ... exiting"
exit 1
}
# Run Networking tests ?
[ "$RUN_NETEST" -eq 1 ] && \
{
[ -z "$RHOST" ] || [ -z "$PASSWD" ] && \
{
[ -z "$RHOST" ] && \
{
echo \
"INFO: Enter RHOST = 'name of the remote host machine'"
echo -n "-> "
read RHOST
}
[ -z "$PASSWD" ] && \
{
echo " "
echo \
"INFO: Enter PASSWD = 'root passwd of the remote host machine'"
echo -n "-> "
read PASSWD
}
export RHOST=$RHOST
export PASSWD=$PASSWD
echo "WARNING: security of $RHOST may be compromised"
}
}
# If user does not provide a command file select a default set of testcases
# to execute.
if [ -f $CMDFILE ] || \
CMDFILE="$LTPROOT/runtest/$CMDFILE"
then
cat $CMDFILE > ${TMP}/alltests || \
{
echo "FATAL: Unable to create command file"
exit 1
}
fi
if [ "$RUN_NETEST" -eq 1 ]; then
SCENARIO_LISTS="$SCENARIO_LISTS $LTPROOT/scenario_groups/network"
fi
# DO NOT INDENT/DEDENT!
if [ -n "$SCENARIO_LISTS" ]; then
# Insurance to make sure that the first element in the pipe
# completed successfully.
cat_ok_sentinel=$TMP/cat_ok.$$
(cat $SCENARIO_LISTS && touch "$cat_ok_sentinel") | \
while read scenfile; do
scenfile=${LTPROOT}/runtest/$scenfile
# Skip over non-existent scenario files; things are
# robust enough now that the build will fail if these
# files don't exist.
[ -f "$scenfile" ] || continue
cat $scenfile >> "$TMP/alltests" || {
echo "FATAL: unable to append to command file"
rm -Rf "$TMP"
rm -f "$cat_ok_sentinel"
exit 1
}
done
rm -f "$cat_ok_sentinel"
fi
# ^^DO NOT INDENT/DEDENT!^^
# The fsx-linux tests use the SCRATCHDEV environment variable as a location
# that can be reformatted and run on. Set SCRATCHDEV if you want to run
# these tests. As a safeguard, this is disabled.
unset SCRATCHDEV
[ -n "$SCRATCHDEV" ] && \
{
cat ${LTPROOT}/runtest/fsx >> ${TMP}/alltests ||
{
echo "FATAL: unable to create fsx-linux tests command file"
exit 1
}
}
# check for required users and groups
${LTPROOT}/IDcheck.sh &>/dev/null || \
{
echo "WARNING: required users and groups not present"
echo "WARNING: some test cases may fail"
}
[ -n "$CMDFILES" ] && \
{
for scenfile in `echo "$CMDFILES" | tr ',' ' '`
do
[ -f "$scenfile" ] || scenfile="$LTPROOT/runtest/$scenfile"
cat "$scenfile" >> ${TMP}/alltests || \
{
echo "FATAL: unable to create command file"
rm -Rf "$TMP"
exit 1
}
done
}
# display versions of installed software
[ -z "$QUIET_MODE" ] && \
{
${LTPROOT}/ver_linux || \
{
echo "WARNING: unable to display versions of software installed"
exit 1
}
}
set_block_device
[ ! -z "$QUIET_MODE" ] && { echo "INFO: Test start time: $(date)" ; }
PAN_COMMAND="${LTPROOT}/bin/ltp-pan $QUIET_MODE -e -S $INSTANCES $DURATION -a $$ \
-n $$ $PRETTY_PRT -f ${TMP}/alltests $LOGFILE $OUTPUTFILE"
if [ ! -z "$VERBOSE_MODE" ] ; then
echo "COMMAND: $PAN_COMMAND"
if [ ! -z "$TAG_RESTRICT_STRING" ] ; then
echo "INFO: Restricted to $TAG_RESTRICT_STRING"
fi
fi
#$PAN_COMMAND #Duplicated code here, because otherwise if we fail, only "PAN_COMMAND" gets output
# Some tests need to run inside the "bin" directory.
cd "${LTPROOT}/testcases/bin"
${LTPROOT}/bin/ltp-pan $QUIET_MODE -e -S $INSTANCES $DURATION -a $$ \
-n $$ $PRETTY_PRT -f ${TMP}/alltests $LOGFILE $OUTPUTFILE
if [ $? -eq 0 ]; then
echo "INFO: ltp-pan reported all tests PASS"
VALUE=0
else
echo "INFO: ltp-pan reported some tests FAIL"
VALUE=1
fi
cd ..
[ ! -z "$QUIET_MODE" ] && { echo "INFO: Test end time: $(date)" ; }
[ "$GENLOAD" -eq 1 ] && { killall -9 genload ; }
[ "$NETPIPE" -eq 1 ] && { killall -9 NPtcp ; }
[ "$ALT_DIR" -eq 1 ] && \
{
cat <<-EOF >&1
###############################################################"
Done executing testcases."
result log is in the $LTPROOT/results directory"
###############################################################"
EOF
}
exit $VALUE
}
cleanup()
{
rm -rf ${TMP}
}
trap "cleanup" 0
setup
main "$@"
#vim: syntax=sh
|
liaoqingwei/ltp
|
runltplite.sh
|
Shell
|
gpl-2.0
| 11,062 |
#!/bin/bash
if [ "${JAVA_HOME}" = "" ]; then
export JAVA_HOME=/usr/lib/jvm/java
echo "Setting JAVA_HOME to: ${JAVA_HOME}"
fi
export NUTCH_HOME=/usr/share/nutch
export NUTCH_CONF_DIR=`pwd`/crawl_jsp/conf
export NUTCH_OPTS=-Djava.util.logging.config.file="${NUTCH_CONF}/logging.properties"
export NUTCH_LOG_DIR=`pwd`/logs
export NUTCH_LOG_FILE=$0.log
export OUTPUT_DIR=`pwd`/data/crawl_jsp
echo "NUTCH_HOME = ${NUTCH_HOME}"
echo "NUTCH_CONF_DIR = ${NUTCH_CONF_DIR}"
echo "NUTCH_OPTS = ${NUTCH_OPTS}"
echo "NUTCH_LOG_DIR = ${NUTCH_LOG_DIR}"
echo "NUTCH_LOG_FILE = ${NUTCH_LOG_FILE}"
echo "OUTPUT_DIR = ${OUTPUT_DIR}"
if [ ! -d ${NUTCH_LOG_DIR} ]; then
echo "Creating log directory ${NUTCH_LOG_DIR}"
mkdir ${NUTCH_LOG_DIR}
fi
if [ ! -d ${OUTPUT_DIR} ]; then
echo "Creating output directory ${OUTPUT_DIR}"
mkdir -p ${OUTPUT_DIR}
fi
#Need to adjust nutch RPM so it's scripts under bin are executable by all
${NUTCH_HOME}/bin/nutch crawl ${NUTCH_CONF_DIR}/../urls -dir ${OUTPUT_DIR} -depth 10 -threads 50
|
moio/spacewalk
|
search-server/spacewalk-search/nutch/crawl_jsp.sh
|
Shell
|
gpl-2.0
| 1,030 |
# fMBT, free Model Based Testing tool
# Copyright (c) 2011, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU Lesser General Public License,
# version 2.1, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
# This file includes test reporting functions for test scripts.
teststep() {
TESTSTEP_DESCRIPTION=""
TESTSTEP_PROGRESS="1"
printf "%-50s" "$1"
echo "##########################################" >>$LOGFILE
echo "# $1" >>$LOGFILE
}
teststep_quiet() {
# Quiet teststep is reported to stdout only if failed
TESTSTEP_DESCRIPTION=$(printf "%-50s" "$1")
TESTSTEP_PROGRESS="1"
echo "##########################################" >>$LOGFILE
echo "# $1" >>$LOGFILE
}
testpassed() {
if [ -z "$TESTSTEP_PROGRESS" ]; then
return
fi
if [ -z "$TESTSTEP_DESCRIPTION" ]; then
printf "passed.\n"
fi
echo "# passed." >>$LOGFILE
TESTSTEP_PROGRESS=""
}
testfailed() {
if [ -z "$TESTSTEP_PROGRESS" ]; then
return
fi
if [ -z "$TESTSTEP_DESCRIPTION" ]; then
printf "failed, see $LOGFILE\n"
else
printf "$TESTSTEP_DESCRIPTION"
printf "failed, see $LOGFILE\n"
fi
echo "# failed." >>$LOGFILE
echo "### $LOGFILE CONTENTS ###"
cat "$LOGFILE"
echo "### END OF $LOGFILE ###"
TESTSTEP_PROGRESS=""
exit 1
}
testskipped() {
if [ -z "$TESTSTEP_PROGRESS" ]; then
return
fi
if [ -z "$TESTSTEP_DESCRIPTION" ]; then
printf "skipped.\n"
else
printf "$TESTSTEP_DESCRIPTION"
printf "skipped.\n"
fi
echo "# skipped." >>$LOGFILE
TESTSTEP_PROGRESS=""
}
check_file_exists() {
FILENAME=$1
if [ ! -f $FILENAME ]; then
echo "$FILENAME does not exist." >> $LOGFILE
testfailed
fi
}
check_minimum_num_of_lines() {
FILENAME=$1
MINIMUM_NUMOFLINES=$2
check_file_exists $FILENAME
FOUND_LINES=$(wc -l $FILENAME | awk '{print $1}')
if [ $FOUND_LINES -lt $MINIMUM_NUMOFLINES ]; then
echo "$FILENAME too short." >> $LOGFILE
echo " $MINIMUM_NUMOFLINES lines required," >> $LOGFILE
echo " $FOUND_LINES lines found." >> $LOGFILE
testfailed
fi
}
if [ "$1" == "installed" ]; then
SKIP_PATH_CHECKS=1
teststep_quiet "check that PATH already includes fmbt..."
WHICH_FMBT="$(which fmbt)"
echo "using installed fmbt: $WHICH_FMBT" >> $LOGFILE
if [ "$(basename "$WHICH_FMBT")" != "fmbt" ]; then
testfailed
fi
testpassed
fi
if [ -z "$SKIP_PATH_CHECKS" ]; then
teststep_quiet "check that utils/fmbt-gt is used..."
dirandfmbtgt=$(which fmbt-gt | sed 's:.*\(utils/.*\):\1:')
echo "fmbt-gt: $dirandfmbtgt" >> $LOGFILE
if [ "$dirandfmbtgt" != "utils/fmbt-gt" ]; then
testfailed
fi
testpassed
teststep_quiet "check that fmbt is used from source tree..."
dirandfmbt=$(which fmbt | sed 's:.*\(src/.*\):\1:')
echo "using: $dirandfmbt" >> $LOGFILE
echo "fmbt: $dirandfmbt" >> $LOGFILE
if [ "$dirandfmbt" != "src/fmbt" ] && [ "$dirandfmbt" != "./fmbt" ]; then
testfailed
fi
testpassed
teststep_quiet "check working python version..."
pyver=$(/usr/bin/env python --version 2>&1 | awk '{if ($2 >= "2.6") print "ok"}')
if [ "$pyver" != "ok" ]; then
echo "Python >= 2.6 required, you run $(/usr/bin/env python --version 2>&1)" >> $LOGFILE
testfailed
fi
testpassed
fi
|
pyykkis/fMBT
|
test/functions.sh
|
Shell
|
lgpl-2.1
| 3,987 |
#!/bin/bash
set -x -e
mkdir -p "${PREFIX}/bin"
export INCLUDE_PATH="${PREFIX}/include/:${PREFIX}/include/bamtools/"
export LIBRARY_PATH="${PREFIX}/lib"
export LD_LIBRARY_PATH="${PREFIX}/lib"
export BOOST_INCLUDE_DIR="${PREFIX}/include"
export BOOST_LIBRARY_DIR="${PREFIX}/lib"
export LIBS='-lboost_regex -lboost_system -lboost_program_options -lboost_filesystem -lboost_timer'
export BAMTOOLS_INCLUDE_DIR="${BUILD_PREFIX}/include/bamtools/"
export BAMTOOLS_LIBRARY_DIR="${BUILD_PREFIX}/lib/"
export CXXFLAGS="$CXXFLAGS -DUSE_BOOST -I${BOOST_INCLUDE_DIR} -L${BOOST_LIBRARY_DIR}"
export LDFLAGS="$LDFLAGS -L${BOOST_LIBRARY_DIR} -lboost_regex -lboost_filesystem -lboost_system"
sed -i.bak "s#g++#$CXX#g" src/CMakeLists.txt
mkdir build
cd build
cmake ../src -DCMAKE_INSTALL_PREFIX=$PREFIX -DCMAKE_CXX_COMPILER=${CXX} -DCMAKE_CXX_FLAGS="$CXXFLAGS"
make CXX=$CXX
cd ..
cp TransComb $PREFIX/bin
cp build/Assemble $PREFIX/bin
cp build/CorrectName $PREFIX/bin
cp build/Pre_Alignment $PREFIX/bin
|
cokelaer/bioconda-recipes
|
recipes/transcomb/build.sh
|
Shell
|
mit
| 994 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MASTER_ADDRESS=${1:-"8.8.8.18"}
ETCD_SERVERS=${2:-"http://8.8.8.18:2379"}
SERVICE_CLUSTER_IP_RANGE=${3:-"10.10.10.0/24"}
ADMISSION_CONTROL=${4:-""}
cat <<EOF >/opt/kubernetes/cfg/kube-apiserver
# --logtostderr=true: log to standard error instead of files
KUBE_LOGTOSTDERR="--logtostderr=true"
# --v=0: log level for V logs
KUBE_LOG_LEVEL="--v=4"
# --etcd-servers=[]: List of etcd servers to watch (http://ip:port),
# comma separated. Mutually exclusive with -etcd-config
KUBE_ETCD_SERVERS="--etcd-servers=${ETCD_SERVERS}"
# --insecure-bind-address=127.0.0.1: The IP address on which to serve the --insecure-port.
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
# --insecure-port=8080: The port on which to serve unsecured, unauthenticated access.
KUBE_API_PORT="--insecure-port=8080"
# --kubelet-port=10250: Kubelet port
NODE_PORT="--kubelet-port=10250"
# --advertise-address=<nil>: The IP address on which to advertise
# the apiserver to members of the cluster.
KUBE_ADVERTISE_ADDR="--advertise-address=${MASTER_ADDRESS}"
# --allow-privileged=false: If true, allow privileged containers.
KUBE_ALLOW_PRIV="--allow-privileged=false"
# --service-cluster-ip-range=<nil>: A CIDR notation IP range from which to assign service cluster IPs.
# This must not overlap with any IP ranges assigned to nodes for pods.
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
# --admission-control="AlwaysAdmit": Ordered list of plug-ins
# to do admission control of resources into cluster.
# Comma-delimited list of:
# LimitRanger, AlwaysDeny, SecurityContextDeny, NamespaceExists,
# NamespaceLifecycle, NamespaceAutoProvision,
# AlwaysAdmit, ServiceAccount, ResourceQuota, DefaultStorageClass
KUBE_ADMISSION_CONTROL="--admission-control=${ADMISSION_CONTROL}"
# --client-ca-file="": If set, any request presenting a client certificate signed
# by one of the authorities in the client-ca-file is authenticated with an identity
# corresponding to the CommonName of the client certificate.
KUBE_API_CLIENT_CA_FILE="--client-ca-file=/srv/kubernetes/ca.crt"
# --tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any,
# concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file
# and --tls-private-key-file are not provided, a self-signed certificate and key are
# generated for the public address and saved to /var/run/kubernetes.
KUBE_API_TLS_CERT_FILE="--tls-cert-file=/srv/kubernetes/server.cert"
# --tls-private-key-file="": File containing x509 private key matching --tls-cert-file.
KUBE_API_TLS_PRIVATE_KEY_FILE="--tls-private-key-file=/srv/kubernetes/server.key"
EOF
KUBE_APISERVER_OPTS=" \${KUBE_LOGTOSTDERR} \\
\${KUBE_LOG_LEVEL} \\
\${KUBE_ETCD_SERVERS} \\
\${KUBE_API_ADDRESS} \\
\${KUBE_API_PORT} \\
\${NODE_PORT} \\
\${KUBE_ADVERTISE_ADDR} \\
\${KUBE_ALLOW_PRIV} \\
\${KUBE_SERVICE_ADDRESSES} \\
\${KUBE_ADMISSION_CONTROL} \\
\${KUBE_API_CLIENT_CA_FILE} \\
\${KUBE_API_TLS_CERT_FILE} \\
\${KUBE_API_TLS_PRIVATE_KEY_FILE}"
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver ${KUBE_APISERVER_OPTS}
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver
|
wongma7/efs-provisioner
|
vendor/k8s.io/kubernetes/cluster/centos/master/scripts/apiserver.sh
|
Shell
|
apache-2.0
| 4,424 |
#
# Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# @test
# @bug 4858522
# @summary
# @author Steve Bohne
#
# @run shell GetMaxFileDescriptorCount.sh
#
#Set appropriate jdk
if [ ! -z "${TESTJAVA}" ] ; then
jdk="$TESTJAVA"
else
echo "--Error: TESTJAVA must be defined as the pathname of a jdk to test."
exit 1
fi
runOne()
{
echo "runOne $@"
$TESTJAVA/bin/javac -d $TESTCLASSES $TESTSRC/[email protected] || exit 2
$TESTJAVA/bin/java -classpath $TESTCLASSES $@ || exit 3
}
# Test GetMaxFileDescriptorCount if we are running on Unix
case `uname -s` in
SunOS | Linux )
runOne GetMaxFileDescriptorCount
;;
* )
echo "Ignore test when not run on Solaris or Linux"
exit 0
;;
esac
exit 0
|
greghaskins/openjdk-jdk7u-jdk
|
test/com/sun/management/UnixOperatingSystemMXBean/GetMaxFileDescriptorCount.sh
|
Shell
|
gpl-2.0
| 1,736 |
#!/bin/bash
for i in `rgrep -R '*.php' -i -l 'http://ez.no/home/licences/professional' .`; do
echo "Fixing $i"
mv "$i" "$i.tmp"
cat "$i.tmp" | sed 's#http://ez.no/home/licences/professional#http://ez.no/products/licences/professional#' > "$i"
done
|
Mapsred/ezplatform_formation
|
ezpublish_legacy/bin/shell/fixlicense.sh
|
Shell
|
gpl-2.0
| 262 |
#!/bin/sh -eux
# Install unzip since Consul is distributed as a zip
apt-get install -y unzip
# Download Consul
cd /tmp
wget https://dl.bintray.com/mitchellh/consul/0.5.2_linux_amd64.zip -O consul.zip
# Install Consul
unzip consul.zip >/dev/null
rm consul.zip
chmod +x consul
mv consul /usr/local/bin/consul
mkdir -p /etc/consul.d
mkdir -p /mnt/consul
mkdir -p /etc/service
# Install the upstart service
cat >/etc/init/consul.conf <<EOF
description "Consul agent"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
# This is to avoid Upstart re-spawning the process upon \`consul leave\`
normal exit 0 INT
# stop consul will not mark node as failed but left
kill signal INT
script
if [ -f "/etc/service/consul" ]; then
. /etc/service/consul
fi
# Make sure to use all our CPUs, because Consul can block a scheduler thread
export GOMAXPROCS=\`nproc\`
# Get the public IP
BIND=\`ifconfig eth0 | grep "inet addr" | awk '{ print substr(\$2,6) }'\`
exec /usr/local/bin/consul agent \\
-config-dir="/etc/consul.d" \\
-bind=\$BIND \\
\${CONSUL_FLAGS} \\
>>/var/log/consul.log 2>&1
end script
EOF
# Install dnsmasq
apt-get install -y dnsmasq
echo "server=/consul/127.0.0.1#8600" > /etc/dnsmasq.d/10-consul
/etc/init.d/dnsmasq restart
|
jeremiahyan/otto
|
images/scripts/common/consul.sh
|
Shell
|
mpl-2.0
| 1,278 |
#!/bin/bash
cd jspm_packages
curl -O https://raw.githubusercontent.com/systemjs/systemjs/0.19.17/dist/system.js
curl -O https://raw.githubusercontent.com/systemjs/systemjs/0.19.17/dist/system.js.map
curl -O https://raw.githubusercontent.com/systemjs/systemjs/0.19.17/dist/system.src.js
curl -O https://raw.githubusercontent.com/systemjs/systemjs/0.19.17/dist/system-polyfills.js
curl -O https://raw.githubusercontent.com/systemjs/systemjs/0.19.17/dist/system-polyfills.js.map
curl -O https://raw.githubusercontent.com/systemjs/systemjs/0.19.17/dist/system-polyfills.src.js
curl -O https://raw.githubusercontent.com/systemjs/systemjs/0.19.17/dist/system-csp-production.js
curl -O https://raw.githubusercontent.com/systemjs/systemjs/0.19.17/dist/system-csp-production.js.map
curl -O https://raw.githubusercontent.com/systemjs/systemjs/0.19.17/dist/system-csp-production.src.js
|
ChrisBauer/cb.io
|
client/aurelia/reset-system.sh
|
Shell
|
isc
| 877 |
cd ../mct-core && npm test
cd ../generator-m && npm test
cd ../generator-mcap && npm test
cd ../generator-m-server && npm test
cd ../mcap-cli && npm test
cd ../mcap-deploy && npm test
cd ../mcap-log && npm test
cd ../mcap-application-validation && npm test
|
mwaylabs/mcap-developer-utils
|
test.sh
|
Shell
|
mit
| 256 |
export PYTHONPATH=/opt
ps -ef|grep 'uwsgi -s :11000'|grep -v grep |awk '{print $2} '|xargs kill -9
uwsgi -s :11000 -w index -p 2 -d /data/log/www/uwsgi_fwrite.log -M -p 4 -t 30 -R 11000
|
fooying/fwrite
|
restart.sh
|
Shell
|
mit
| 189 |
#!/bin/bash -xv
source "$(dirname $(readlink -f $0))/actions"
for n in 90 -90 90 -90 ; do
turn $n
sleep 0.5
done
|
ryuichiueda/RPiM
|
201510/run.2.bash
|
Shell
|
mit
| 117 |
#!/usr/bin/env bash
DOTFILES=$(pwd)/$(dirname $0)
cd $DOTFILES
zsh -c 'nixfmt **/*.nix'
nix_cmd() {
nix --extra-experimental-features flakes --extra-experimental-features nix-command $@
}
nix_cmd flake update $DOTFILES/nix
nix_cmd flake update
if $(uname -a | grep -q "Darwin"); then
nix_cmd flake update $DOTFILES/private/flakes/darwin
else
nix_cmd flake update $DOTFILES/private/flakes/nixos
fi
git add .
|
macalinao/dotfiles
|
update-flakes.sh
|
Shell
|
mit
| 419 |
#!/bin/sh
function _emacsfun
{
# get list of available X windows.
x=`emacsclient --alternate-editor '' --eval '(x-display-list)' 2>/dev/null`
if [ -z "$x" ] || [ "$x" = "nil" ] ;then
# Create one if there is no X window yet.
emacsclient --alternate-editor "" --create-frame "$@"
else
# prevent creating another X frame if there is at least one present.
emacsclient --alternate-editor "" "$@"
fi
}
# adopted from https://github.com/davidshepherd7/emacs-read-stdin/blob/master/emacs-read-stdin.sh
# If the second argument is - then write stdin to a tempfile and open the
# tempfile. (first argument will be `--no-wait` passed in by the plugin.zsh)
if [[ $# -ge 2 ]] && [[ "$2" == - ]]; then
tempfile="$(mktemp emacs-stdin-$USER.XXXXXXX --tmpdir)"
cat - > "$tempfile"
_emacsfun --no-wait $tempfile
else
_emacsfun "$@"
fi
|
yoshw/oh-my-zsh
|
plugins/emacs/emacsclient.sh
|
Shell
|
mit
| 890 |
# qpushd, qpopd: quiet alternatives to pushd, popd
function qpushd() {
pushd $@ > /dev/null
}
function qpopd() {
popd $@ > /dev/null
}
# update_master_repo: Updates our copy of the master git repo
# Args: path_to_repo_dir
function update_master_repo() {
if [ -z "$1" ]; then
echo "update_master_repo: missing path to repository"
exit 1
fi
if [ -d "$1" ]; then
qpushd $1
git fetch --all
git checkout origin/master
qpopd
else
git clone https://github.com/Nuand/bladeRF.git $1
fi
}
# get_commit_id: Echos the HEAD commit for the repo
# Args: repo_dir [revision]
function get_commit_id() {
if [ -z "$1" ]; then
echo "get_commit_id: missing directory"
exit 1
fi
if [ -z "$2" ]; then
_rev=${REVISION}
else
_rev=$2
fi
qpushd $1
_result=$(git rev-list ${_rev} -n 1)
qpopd
}
# clone_build_dir: Clones a build dir from the master
# Args: master_dir revbuilds_dir target_subdir revid
function clone_build_dir() {
if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ] || [ -z "$4" ]; then
echo "clone_build_dir: missing args"
exit 1
fi
_master_dir=$1
_revbuilds_dir=$2
_target_subdir=$3
_revid=$4
qpushd ${_revbuilds_dir}
git clone ${_master_dir} ${_target_subdir}
qpushd ${_target_subdir}
git checkout ${_revid}
qpopd
qpopd
}
# prep_build: Prepares a build subdirectory within the source tree.
# args: [buildtype [gitrevision [cmake arguments ...]]]
function prep_build() {
if [ -z "$1" ]; then
_build_type="Release"
else
_build_type="$1"
shift
fi
if [ -z "$1" ]; then
_git_revision="unknown"
else
_git_revision="$1"
shift
fi
# Hack CMakeLists as required
#for dir in host fx3_firmware host/libraries/libbladeRF
#do
# qpushd $dir
# _CMAKE_CLD=`pwd` # remember where we are...
# sed --in-place=.bak --expression="s:\${CMAKE_CURRENT_LIST_DIR}:${_CMAKE_CLD}:g" \
# --expression="s:cmake_minimum_required(VERSION 2.8.3):cmake_minimum_required(VERSION 2.8):g" \
# --expression="s:include(GNUInstallDirs):\#include(GNUInstallDirs):g" \
# --expression="s:cmake_minimum_required(VERSION 2.8.5):cmake_minimum_required(VERSION 2.8):g" \
# CMakeLists.txt
# qpopd
#done
# We're running the Lunix here
qpushd fx3_firmware
# XXX: is the sed still necessary?
sed 's/HOST_IS_WINDOWS := y/HOST_IS_WINDOWS := n/' make/toolchain.mk.sample > make/toolchain.mk
qpopd
# Create the build dir and run cmake
mkdir build
qpushd build
cmake -DCMAKE_INSTALL_LIBDIR=lib \
-DGIT_EXECUTABLE=/usr/bin/git \
-DGIT_FOUND=True \
-DCMAKE_BUILD_TYPE=${_build_type} \
-DVERSION_INFO_OVERRIDE:STRING=git-${_git_revision}-buildomatic \
-DBUILD_DOCUMENTATION=YES \
-DENABLE_FX3_BUILD=ON $* \
../
qpopd
}
# build_bladerf_fpga: Builds an FPGA image
# Args: revision size
# Returns the path of the output artifact, or nothing if it failed
function build_bladerf_fpga() {
if [ -z "$1" ] || [ -z "$2" ]; then
echo "build_bladerf_fpga: missing args"
exit 1
fi
_revision=$1
_size=$2
${QUARTUS_PATH}/nios2eds/nios2_command_shell.sh ./build_bladerf.sh -r $revision -s $size
_result=$(ls ${revision}x${size}*/${revision}x${size}.rbf | head -1)
}
# build_bladerf_firmware: builds firmware for the fx3
# Optional arg: image_type, which can be Debug or something else
function build_bladerf_firmware() {
qpushd build
if [ "Debug" = "$1" ]; then
DEBUG=yes make fx3_firmware
else
make fx3_firmware
fi
if [ -f "fx3_firmware/build/bladeRF.img" ]
then
_result=build/fx3_firmware/build/bladeRF.img
else
_result=""
fi
qpopd
}
# build_bladerf_doxygen: builds a doxygen tree for libbladeRF
function build_bladerf_doxygen()
{
qpushd build
make libbladeRF-doxygen
if [ -d "host/libraries/libbladeRF/doc/doxygen/html" ]
then
_result=build/host/libraries/libbladeRF/doc/doxygen/html
else
_result=""
fi
qpopd
}
# run_bladerf_clangscan: runs clang's scan-build
function run_bladerf_clangscan() {
mkdir clang_scan
qpushd clang_scan
cmake -DCMAKE_C_COMPILER=/usr/share/clang/scan-build/ccc-analyzer \
../
/usr/share/clang/scan-build/scan-build -analyze-headers -maxloop 100 -o ./report make
if [ -d "./report" ]
then
_result="clang_scan/report"
else
_result=""
fi
qpopd
}
# build_bladerf_coverity: builds a coverity tarball
function build_coverity_tarball()
{
_oldpath=$PATH
PATH=$PATH:$COVERITY_PATH
cov-configure --comptype gcc --compiler /opt/cypress/fx3_sdk/arm-2011.03/bin/arm-none-eabi-gcc
qpushd build
make clean
rm -rf cov-int/
cov-build --dir cov-int/ make
tar -czvf bladeRF_coverity.tgz cov-int/
if [ -f "bladeRF_coverity.tgz" ]
then
_result=build/bladeRF_coverity.tgz
else
_result=""
fi
qpopd
}
# consider_latest_symlink: updates the "latest" symlink to point at this
# revision, if certain conditions are met
# Args: builds_root revid
function consider_latest_symlink()
{
if [ -z "$1" ] || [ -z "$2" ]; then
echo "consider_latest_symlink: missing args"
exit 1
fi
_builds_root=$1
_revid=$2
_artifacts_dir=${_builds_root}/${_revid}/artifacts
qpushd $_artifacts_dir
if [ -f "hostedx40.rbf" ] && [ -f "hostedx115.rbf" ] && [ -f "firmware.img" ] && [ -d "libbladeRF_doxygen" ]
then
echo "Pointing 'latest' at build ${_revid}"
qpushd $_builds_root
[ -h "latest" ] && rm -f latest
ln -s ${_revid} latest
qpopd
else
echo "Did NOT update 'latest' due to missing artifacts on ${_revid}!"
fi
qpopd
}
|
rtucker/bladeRF-buildbot
|
docker-autobuild/inc/autobuild_inc.sh
|
Shell
|
mit
| 6,378 |
#!/bin/sh
(
# Exit on errors
set -e
nsupdate -k /var/named/${domain}.key /var/lib/comodit/applications/bind-server/nsupdate.conf
echo -----
) >> /var/log/comodit/openshift-bind-server/configure.log 2>&1
|
comodit/demos
|
openshift/openshift-bind-server/files/configure.sh
|
Shell
|
mit
| 205 |
config() {
NEW="$1"
OLD="$(dirname $NEW)/$(basename $NEW .new)"
# If there's no config file by that name, mv it over:
if [ ! -r $OLD ]; then
mv $NEW $OLD
elif [ "$(cat $OLD | md5sum)" = "$(cat $NEW | md5sum)" ]; then
# toss the redundant copy
rm $NEW
fi
# Otherwise, we leave the .new copy for the admin to consider...
}
find etc/nagiosgraph/ -type f -name *.new \
| while read cfg ; do config $cfg ; done
# Create rrd directory and add log files
( cd var/nagios; mkdir -p rrd; chown nagios rrd;
touch nagiosgraph-cgi.log nagiosgraph.log;
chown apache nagiosgraph-cgi.log nagiosgraph.log )
|
panosmdma/SlackOnly-SlackBuilds
|
network/nagiosgraph/doinst.sh
|
Shell
|
mit
| 625 |
#! /bin/sh
set -e
cd `dirname $0`/../..
CONF_FILE=config/vagrant_proxy.conf
if [ -f $CONF_FILE ]; then
source $CONF_FILE
echo Proxy Configuration (sandbox/config/vagrant_proxy.conf)
echo ----------------------------------------------------------------
echo HTTP: $http_proxy
echo HTTPS: $https_proxy
echo NO Proxy: $no_proxy
echo ----------------------------------------------------------------
echo Installing vagrant-proxyconf plugin via proxy.
echo If installation fails, edit config file and execute use-proxy.command again.
vagrant plugin install vagrant-proxyconf
else
echo 'export http_proxy="http://user:[email protected]:8080"' > $CONF_FILE
echo 'export https_proxy="http://user:[email protected]:8080"' >> $CONF_FILE
echo 'export no_proxy="localhost,127.0.0.1"' >> $CONF_FILE
echo ----------------------------------------------------------------
echo Sample proxy configuration file created.
echo Edit sandbox/config/vagrant_proxy.conf, and execute
echo use-proxy.command again.
echo ----------------------------------------------------------------
fi
|
koseki/vagrant-layout
|
sandbox/osx/manage/use-proxy.command
|
Shell
|
mit
| 1,130 |
#!/bin/bash -e
docker build --tag mariusrumpf/buildnginx .
|
MariusRumpf/docker-rpi-nginx
|
build/build.sh
|
Shell
|
mit
| 60 |
#!/bin/bash
cd "$( dirname "${BASH_SOURCE[0]}" )"
cd ..
export PATH="$(pwd)/runtime/bin:$PATH"
npm start
|
istvan-antal/overseer
|
scripts/start.sh
|
Shell
|
mit
| 104 |
#!/bin/bash
queries="../queries"
results="../results/fluent_queries"
mkdir -p "$results"
while read i
do
echo -n "$i..."
sed s/,T\)/,"$i"\)/g "$queries/fluent_queries.pl" > ground_fluent_queries.pl
cat ground_fluent_queries.pl ../event_narrative.pl ../effect_clauses.pl ../initial_beliefs.pl ../inference_rules.pl ../sec_axioms.pl > kb.pl
problog kb.pl --output "$results/kb-$i.log"
rm ground_fluent_queries.pl
rm kb.pl
echo "done."
done < timepoints.rc
|
kevinmcareavey/secproblog
|
scripts/run-fluent_queries.sh
|
Shell
|
mit
| 461 |
#!/bin/bash
KEYWORDS_SCOTLAND="Scotland"
KEYWORDS_SCOTLAND_ALL="$KEYWORDS_SCOTLAND"
if [ "$1" == "" ]; #Normal operation
then
debug_start "Scotland"
SCOTLAND=$(egrep -i "$KEYWORDS_SCOTLAND" "$NEWPAGES")
categorize "SCOTLAND" "Scotland"
debug_end "Scotland"
fi
|
MW-autocat-script/MW-autocat-script
|
catscripts/Government/Countries/United_Kingdom/Scotland/Scotland.sh
|
Shell
|
mit
| 274 |
#! /bin/bash
# Setup Build Directorys and Brunch Build the Application
set -e
echo 'building application...'
mkdir public
npm install .
brunch build -o
timestamp=`date -u +%Y-%m-%d_%H-%M-%S`
# Add timestamps
echo 'build timestamps...'
mv public/javascripts/app.js "public/javascripts/app-$timestamp.js"
mv public/javascripts/vendor.js "public/javascripts/vendor-$timestamp.js"
mv public/stylesheets/app.css "public/stylesheets/app-$timestamp.css"
mv public/stylesheets/vendor.css "public/stylesheets/vendor-$timestamp.css"
# Compress Assests
echo 'compress assets...'
gzip -9 -c "public/javascripts/app-$timestamp.js" > "public/javascripts/app-$timestamp.js.gz"
gzip -9 -c "public/javascripts/vendor-$timestamp.js" > "public/javascripts/vendor-$timestamp.js.gz"
gzip -9 -c "public/stylesheets/app-$timestamp.css" > "public/stylesheets/app-$timestamp.css.gz"
gzip -9 -c "public/stylesheets/vendor-$timestamp.css" > "public/stylesheets/vendor-$timestamp.css.gz"
# Substitue timestamped files
mv public/index.html public/index.old.html
sed "s/app\.\([a-z]*\)/app-$timestamp.\1/g;s/vendor\.\([a-z]*\)/vendor-$timestamp.\1/g" <public/index.old.html > public/index.html
rm public/index.old.html
echo 'build successful!'
|
edpaget/Reno
|
build.sh
|
Shell
|
mit
| 1,221 |
#!/bin/bash
if command -v flow > /dev/null; then
flow
else
echo 'Flow not found.'
fi
|
foss-haas/rotunda
|
bin/flow.sh
|
Shell
|
mit
| 89 |
for file in ./gp5/*
do
filename=$(basename "$file")
filename="${filename%.*}"
gtimeout 10 java -jar converter/TabConverter.jar "$file" midi "./midi/$filename.midi"
echo "$filename"
done
|
mmathys/songsterr-crawler
|
convertGP_mac.sh
|
Shell
|
mit
| 194 |
#!/bin/bash
echo "fetching latest data file"
wget ftp://GenericFiles:[email protected]/GNRC/generic.zip
echo "unzipping"
unzip generic.zip
echo "building csv files"
python ./python/convert_to_csv.py ./GENERIC/Data\ Files/Property.txt
python ./python/convert_to_csv.py ./GENERIC/Data\ Files/Areas.txt
python ./python/convert_to_csv.py ./GENERIC/Data\ Files/Zone.txt
python ./python/convert_to_csv.py ./GENERIC/Data\ Files/Building.txt
python ./python/convert_to_csv.py ./GENERIC/Data\ Files/Sales.txt
echco "flattening"
python ./schema/flatten.py ./GENERIC/Support\ Tables/
echo "building database"
python ./schema/build.py
echo "populating database"
python ./schema/load.py ./
echo "cleaning up"
rm *.csv
rm -rf ./GENERIC
rm generic.zip
|
hack4reno2013/housemind
|
build.sh
|
Shell
|
mit
| 747 |
#!/bin/sh
exec /usr/sbin/php-fpm7.0 --nodaemonize --fpm-config /etc/php/7.0/fpm/php-fpm.conf
|
velaluqa/docker-iredmail
|
build/services/php-fpm.sh
|
Shell
|
mit
| 94 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2011:0153
#
# Security announcement date: 2011-04-14 23:47:57 UTC
# Script generation date: 2017-01-01 21:10:11 UTC
#
# Operating System: CentOS 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - exim.x86_64:4.63-5.el5_6.2
# - exim-mon.x86_64:4.63-5.el5_6.2
# - exim-sa.x86_64:4.63-5.el5_6.2
#
# Last versions recommanded by security team:
# - exim.x86_64:4.63-5.el5_6.2
# - exim-mon.x86_64:4.63-5.el5_6.2
# - exim-sa.x86_64:4.63-5.el5_6.2
#
# CVE List:
# - CVE-2010-4345
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install exim.x86_64-4.63 -y
sudo yum install exim-mon.x86_64-4.63 -y
sudo yum install exim-sa.x86_64-4.63 -y
|
Cyberwatch/cbw-security-fixes
|
CentOS_5/x86_64/2011/CESA-2011:0153.sh
|
Shell
|
mit
| 822 |
# CYBERWATCH SAS - 2016
#
# Security fix for RHSA-2013:0848
#
# Security announcement date: 2013-05-21 19:35:21 UTC
# Script generation date: 2016-01-11 19:14:11 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - spacewalk-backend:1.2.13-79.el6sat.noarch
# - spacewalk-backend-app:1.2.13-79.el6sat.noarch
# - spacewalk-backend-applet:1.2.13-79.el6sat.noarch
# - spacewalk-backend-config-files:1.2.13-79.el6sat.noarch
# - spacewalk-backend-config-files-common:1.2.13-79.el6sat.noarch
# - spacewalk-backend-config-files-tool:1.2.13-79.el6sat.noarch
# - spacewalk-backend-iss:1.2.13-79.el6sat.noarch
# - spacewalk-backend-iss-export:1.2.13-79.el6sat.noarch
# - spacewalk-backend-libs:1.2.13-79.el6sat.noarch
# - spacewalk-backend-package-push-server:1.2.13-79.el6sat.noarch
# - spacewalk-backend-server:1.2.13-79.el6sat.noarch
# - spacewalk-backend-sql:1.2.13-79.el6sat.noarch
# - spacewalk-backend-sql-oracle:1.2.13-79.el6sat.noarch
# - spacewalk-backend-tools:1.2.13-79.el6sat.noarch
# - spacewalk-backend-upload-server:1.2.13-79.el6sat.noarch
# - spacewalk-backend-xml-export-libs:1.2.13-79.el6sat.noarch
# - spacewalk-backend-xmlrpc:1.2.13-79.el6sat.noarch
# - spacewalk-backend-xp:1.2.13-79.el6sat.noarch
# - spacewalk-backend:1.7.38-45.el6sat.noarch
# - spacewalk-backend-app:1.7.38-45.el6sat.noarch
# - spacewalk-backend-applet:1.7.38-45.el6sat.noarch
# - spacewalk-backend-config-files:1.7.38-45.el6sat.noarch
# - spacewalk-backend-config-files-common:1.7.38-45.el6sat.noarch
# - spacewalk-backend-config-files-tool:1.7.38-45.el6sat.noarch
# - spacewalk-backend-iss:1.7.38-45.el6sat.noarch
# - spacewalk-backend-iss-export:1.7.38-45.el6sat.noarch
# - spacewalk-backend-libs:1.7.38-45.el6sat.noarch
# - spacewalk-backend-package-push-server:1.7.38-45.el6sat.noarch
# - spacewalk-backend-server:1.7.38-45.el6sat.noarch
# - spacewalk-backend-sql:1.7.38-45.el6sat.noarch
# - spacewalk-backend-sql-oracle:1.7.38-45.el6sat.noarch
# - spacewalk-backend-tools:1.7.38-45.el6sat.noarch
# - spacewalk-backend-xml-export-libs:1.7.38-45.el6sat.noarch
# - spacewalk-backend-xmlrpc:1.7.38-45.el6sat.noarch
# - spacewalk-backend-xp:1.7.38-45.el6sat.noarch
#
# Last versions recommanded by security team:
# - spacewalk-backend:2.3.3-23.el6sat.noarch
# - spacewalk-backend-app:2.3.3-23.el6sat.noarch
# - spacewalk-backend-applet:2.3.3-23.el6sat.noarch
# - spacewalk-backend-config-files:2.3.3-23.el6sat.noarch
# - spacewalk-backend-config-files-common:2.3.3-23.el6sat.noarch
# - spacewalk-backend-config-files-tool:2.3.3-23.el6sat.noarch
# - spacewalk-backend-iss:2.3.3-23.el6sat.noarch
# - spacewalk-backend-iss-export:2.3.3-23.el6sat.noarch
# - spacewalk-backend-libs:2.3.3-23.el6sat.noarch
# - spacewalk-backend-package-push-server:2.3.3-23.el6sat.noarch
# - spacewalk-backend-server:2.3.3-23.el6sat.noarch
# - spacewalk-backend-sql:2.3.3-23.el6sat.noarch
# - spacewalk-backend-sql-oracle:2.3.3-23.el6sat.noarch
# - spacewalk-backend-tools:2.3.3-23.el6sat.noarch
# - spacewalk-backend-upload-server:1.2.13-79.el6sat.noarch
# - spacewalk-backend-xml-export-libs:2.3.3-23.el6sat.noarch
# - spacewalk-backend-xmlrpc:2.3.3-23.el6sat.noarch
# - spacewalk-backend-xp:1.7.38-45.el6sat.noarch
# - spacewalk-backend:2.3.3-23.el6sat.noarch
# - spacewalk-backend-app:2.3.3-23.el6sat.noarch
# - spacewalk-backend-applet:2.3.3-23.el6sat.noarch
# - spacewalk-backend-config-files:2.3.3-23.el6sat.noarch
# - spacewalk-backend-config-files-common:2.3.3-23.el6sat.noarch
# - spacewalk-backend-config-files-tool:2.3.3-23.el6sat.noarch
# - spacewalk-backend-iss:2.3.3-23.el6sat.noarch
# - spacewalk-backend-iss-export:2.3.3-23.el6sat.noarch
# - spacewalk-backend-libs:2.3.3-23.el6sat.noarch
# - spacewalk-backend-package-push-server:2.3.3-23.el6sat.noarch
# - spacewalk-backend-server:2.3.3-23.el6sat.noarch
# - spacewalk-backend-sql:2.3.3-23.el6sat.noarch
# - spacewalk-backend-sql-oracle:2.3.3-23.el6sat.noarch
# - spacewalk-backend-tools:2.3.3-23.el6sat.noarch
# - spacewalk-backend-xml-export-libs:2.3.3-23.el6sat.noarch
# - spacewalk-backend-xmlrpc:2.3.3-23.el6sat.noarch
# - spacewalk-backend-xp:1.7.38-45.el6sat.noarch
#
# CVE List:
# - CVE-2013-2056
#
# More details:
# - https://www.cyberwatch.fr/notices/RHSA-2013:0848
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install spacewalk-backend-2.3.3 -y
sudo yum install spacewalk-backend-app-2.3.3 -y
sudo yum install spacewalk-backend-applet-2.3.3 -y
sudo yum install spacewalk-backend-config-files-2.3.3 -y
sudo yum install spacewalk-backend-config-files-common-2.3.3 -y
sudo yum install spacewalk-backend-config-files-tool-2.3.3 -y
sudo yum install spacewalk-backend-iss-2.3.3 -y
sudo yum install spacewalk-backend-iss-export-2.3.3 -y
sudo yum install spacewalk-backend-libs-2.3.3 -y
sudo yum install spacewalk-backend-package-push-server-2.3.3 -y
sudo yum install spacewalk-backend-server-2.3.3 -y
sudo yum install spacewalk-backend-sql-2.3.3 -y
sudo yum install spacewalk-backend-sql-oracle-2.3.3 -y
sudo yum install spacewalk-backend-tools-2.3.3 -y
sudo yum install spacewalk-backend-upload-server-1.2.13 -y
sudo yum install spacewalk-backend-xml-export-libs-2.3.3 -y
sudo yum install spacewalk-backend-xmlrpc-2.3.3 -y
sudo yum install spacewalk-backend-xp-1.7.38 -y
sudo yum install spacewalk-backend-2.3.3 -y
sudo yum install spacewalk-backend-app-2.3.3 -y
sudo yum install spacewalk-backend-applet-2.3.3 -y
sudo yum install spacewalk-backend-config-files-2.3.3 -y
sudo yum install spacewalk-backend-config-files-common-2.3.3 -y
sudo yum install spacewalk-backend-config-files-tool-2.3.3 -y
sudo yum install spacewalk-backend-iss-2.3.3 -y
sudo yum install spacewalk-backend-iss-export-2.3.3 -y
sudo yum install spacewalk-backend-libs-2.3.3 -y
sudo yum install spacewalk-backend-package-push-server-2.3.3 -y
sudo yum install spacewalk-backend-server-2.3.3 -y
sudo yum install spacewalk-backend-sql-2.3.3 -y
sudo yum install spacewalk-backend-sql-oracle-2.3.3 -y
sudo yum install spacewalk-backend-tools-2.3.3 -y
sudo yum install spacewalk-backend-xml-export-libs-2.3.3 -y
sudo yum install spacewalk-backend-xmlrpc-2.3.3 -y
sudo yum install spacewalk-backend-xp-1.7.38 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2013/RHSA-2013:0848.sh
|
Shell
|
mit
| 6,372 |
#!/bin/bash
# MYSQL (Mariadb)
mysql_path=/usr/bin/mysql
error=~/.sanity/.logs/.dev-error.txt
# Verificando a existência do Mysql
if [ ! -e $mysql_path ]; then
echo "# Mysql - O banco de dados Mysql não está instalado." >> $error
fi
|
igormoraisn/sanity
|
modules/development/mysql.sh
|
Shell
|
mit
| 240 |
ln -s ../audio/ static/audio
|
tiagoft/macunaima
|
src/configure_directories.sh
|
Shell
|
mit
| 30 |
#!/bin/sh -ve
#cordova plugin remove cordova-sqlite-ext
cordova plugin remove com-shortsands-videoplayer
cordova plugin remove com-shortsands-audioplayer
cordova plugin remove com-shortsands-aws --force
#cordova plugin remove com-shortsands-utility
#cordova platform remove ios
#cordova platform add ios
cordova plugin add $HOME/ShortSands/BibleApp/Plugins/AWS
cordova plugin add $HOME/ShortSands/BibleApp/Plugins/VideoPlayer
cordova plugin add $HOME/ShortSands/BibleApp/Plugins/AudioPlayer
#cordova plugin add $HOME/ShortSands/BibleApp/Plugins/Utility --nofetch
#cp plugins/com-shortsands-videoplayer/src/android/build-extras.gradle platforms/android/
#cordova plugin add cordova-sqlite-ext --save
cordova prepare ios
cordova prepare android
|
garygriswold/Bible.js
|
OBSOLETE/YourBible/Temp.sh
|
Shell
|
mit
| 748 |
#!/bin/bash
# install java 8
sudo apt-get update
sudo apt-get install openjdk-8-jre-headless --yes --no-install-recommends
# install the spring boot app
if [ -f /vagrant/target/*.deb ]
then
sudo dpkg -i /vagrant/target/*.deb;
else
echo "ERROR: No *.deb file found. Did you run 'mvn package' already?"
exit 1
fi
echo "app is up and running under http://192.168.23.42:8080/health"
|
hennr/springBootAsDebianPackageDemo
|
provision-vm.sh
|
Shell
|
mit
| 388 |
#!/bin/bash
cd "$(dirname "$0")"
SRC=src/prelude.scm
DST=src/prelude.c
HEADER=prelude.h
echo '// Copyright 2016 Mitchell Kember. Subject to the MIT License.' > $DST
echo -e "\n#include \"$HEADER\"\n" >> $DST
echo 'const char *const prelude_source =' >> $DST
sed -e '/^;.*$/d;/^$/d;s/\\/\\\\/g;s/"/\\"/g;s/^/ "/;s/$/"/;$s/$/;/' \
< $SRC >> $DST
|
mk12/eva
|
generate.sh
|
Shell
|
mit
| 352 |
#!/usr/bin/env bash
set -euo pipefail
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/base.sh"
. "$DIR/ansi"
ansi --yellow "Ensuring required directories exist.."
mkdir -pv $HOME/workspace/opt
mkdir -pv $HOME/workspace/go
. "$DIR/minimalcheck.sh"
mkdir -pv $HOME/mail/personal
mkdir -pv $HOME/mail/work
mkdir -pv $HOME/mail/.attachments
|
benmezger/dotfiles
|
scripts/ensure_directories.sh
|
Shell
|
mit
| 369 |
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#network interface on which to limit traffic
IF="eth0"
#limit of the network interface in question
LINKCEIL="1gbit"
#limit outbound Aibcoin protocol traffic to this rate
LIMIT="160kbit"
#defines the address space for which you wish to disable rate limiting
LOCALNET="192.168.0.0/16"
#delete existing rules
tc qdisc del dev ${IF} root
#add root class
tc qdisc add dev ${IF} root handle 1: htb default 10
#add parent class
tc class add dev ${IF} parent 1: classid 1:1 htb rate ${LINKCEIL} ceil ${LINKCEIL}
#add our two classes. one unlimited, another limited
tc class add dev ${IF} parent 1:1 classid 1:10 htb rate ${LINKCEIL} ceil ${LINKCEIL} prio 0
tc class add dev ${IF} parent 1:1 classid 1:11 htb rate ${LIMIT} ceil ${LIMIT} prio 1
#add handles to our classes so packets marked with <x> go into the class with "... handle <x> fw ..."
tc filter add dev ${IF} parent 1: protocol ip prio 1 handle 1 fw classid 1:10
tc filter add dev ${IF} parent 1: protocol ip prio 2 handle 2 fw classid 1:11
#delete any existing rules
#disable for now
#ret=0
#while [ $ret -eq 0 ]; do
# iptables -t mangle -D OUTPUT 1
# ret=$?
#done
#limit outgoing traffic to and from port 9333. but not when dealing with a host on the local network
# (defined by $LOCALNET)
# --set-mark marks packages matching these criteria with the number "2"
# these packages are filtered by the tc filter with "handle 2"
# this filter sends the packages into the 1:11 class, and this class is limited to ${LIMIT}
iptables -t mangle -A OUTPUT -p tcp -m tcp --dport 5223 ! -d ${LOCALNET} -j MARK --set-mark 0x2
iptables -t mangle -A OUTPUT -p tcp -m tcp --sport 5223 ! -d ${LOCALNET} -j MARK --set-mark 0x2
|
iobond/aib
|
contrib/qos/tc.sh
|
Shell
|
mit
| 1,857 |
#!/usr/bin/env zsh -v
#dependencies
sudo apt-get install -y git-core curl zlib1g-dev build-essential libssl-dev libreadline-dev libyaml-dev libsqlite3-dev sqlite3 libxml2-dev libxslt1-dev libcurl4-openssl-dev python-software-properties
git clone git://github.com/sstephenson/rbenv.git .rbenv
|
NinoScript/ServerConfigurator
|
5.rbenv.sh
|
Shell
|
mit
| 294 |
if command -v fasd >/dev/null; then
eval "$(fasd --init auto)"
fi
|
perdian/dotfiles
|
zsh/profiles/default/zshrc.d/fasd.zsh
|
Shell
|
mit
| 70 |
#!/bin/bash
# Update all packages. This will take a while.
yum update -y
|
fuzziebrain/vagrant-orclapex-centos-7.0
|
shell/bootstrap.sh
|
Shell
|
mit
| 73 |
#!/bin/bash
cd /home/russell/Desktop/russell_j/HAB_project/flight_data_2.0/
#This function returns a time/date string that is file-name friendly.
getDate()
{
echo $(date) | sed -e "s/ /_/g" | sed -e "s/:/-/g"
}
#This assigns the output of the "getDate()" function to the variable "DATE"
DATE=$(getDate)
#This runs the flight controller and writes the outputs to timestamped files.
./flight_controller_2.py 1> fc.out_$DATE 2> fc.err_$DATE
|
duwaar/BalloonSat-Flight-Controller
|
takeoff.sh
|
Shell
|
mit
| 448 |
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
BCSYMBOLMAP_DIR="BCSymbolMaps"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
if [ -d "${source}/${BCSYMBOLMAP_DIR}" ]; then
# Locate and install any .bcsymbolmaps if present, and remove them from the .framework before the framework is copied
find "${source}/${BCSYMBOLMAP_DIR}" -name "*.bcsymbolmap"|while read f; do
echo "Installing $f"
install_bcsymbolmap "$f" "$destination"
rm "$f"
done
rmdir "${source}/${BCSYMBOLMAP_DIR}"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
warn_missing_arch=${2:-true}
if [ -r "$source" ]; then
# Copy the dSYM into the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .dSYM "$source")"
binary_name="$(ls "$source/Contents/Resources/DWARF")"
binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}"
# Strip invalid architectures from the dSYM.
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary" "$warn_missing_arch"
fi
if [[ $STRIP_BINARY_RETVAL == 0 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM"
fi
fi
}
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
warn_missing_arch=${2:-true}
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
if [[ "$warn_missing_arch" == "true" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
fi
STRIP_BINARY_RETVAL=1
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=0
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Alamofire/Alamofire.framework"
install_framework "${BUILT_PRODUCTS_DIR}/BDTests/BDTests.framework"
install_framework "${BUILT_PRODUCTS_DIR}/OHHTTPStubs/OHHTTPStubs.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Alamofire/Alamofire.framework"
install_framework "${BUILT_PRODUCTS_DIR}/BDTests/BDTests.framework"
install_framework "${BUILT_PRODUCTS_DIR}/OHHTTPStubs/OHHTTPStubs.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
bytedissident/BDTests
|
Example/BDTestsExample/Pods/Target Support Files/Pods-BDTestsExample/Pods-BDTestsExample-frameworks.sh
|
Shell
|
mit
| 8,750 |
#!/bin/sh
# CYBERWATCH SAS - 2016
#
# Security fix for RHSA-2013:0671
#
# Security announcement date: 2013-03-21 18:33:14 UTC
# Script generation date: 2016-05-12 18:11:19 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - openstack-packstack.noarch:2012.2.3-0.1.dev454.el6ost
#
# Last versions recommanded by security team:
# - openstack-packstack.noarch:2014.1.1-0.46.dev1280.el6ost
#
# CVE List:
# - CVE-2013-1815
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install openstack-packstack.noarch-2014.1.1 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2013/RHSA-2013:0671.sh
|
Shell
|
mit
| 669 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/KFWatchKitAnimations/KFWatchKitAnimations.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/KFWatchKitAnimations/KFWatchKitAnimations.framework"
fi
|
kiavashfaisali/KFWatchKitAnimations
|
Example/Pods/Target Support Files/Pods-KFWatchKitAnimations_Example/Pods-KFWatchKitAnimations_Example-frameworks.sh
|
Shell
|
mit
| 3,657 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.