code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
service php7.0-fpm start
service nginx start
service cron restart
|
vrkansagara/mwop.net
|
.aws/application-start.sh
|
Shell
|
bsd-2-clause
| 78 |
#!/bin/bash
# launch from /srv/skaraman
#export LD_LIBRARY_PATH=/srv/skaraman/anaconda/envs/memex-weapons/lib/:$LD_LIBRARY_PATH
#export LD_LIBRARY_PATH="/srv/skaraman/caffe/build/lib:/usr/local/cuda-6.5/lib64:/srv/skaraman/anaconda/envs/memex-weapons/lib/:$LD_LIBRARY_PATH"
base_deep_dir=/srv/skaraman/weapons/memex-weapons/deepmodels
#export LD_LIBRARY_PATH="/srv/skaraman/caffe/build/lib:/usr/local/cuda-6.5/lib64:/usr/lib/x86_64-linux-gnu/:/srv/skaraman/anaconda/envs/memex-weapons/lib/:$LD_LIBRARY_PATH"
export LD_LIBRARY_PATH="/srv/skaraman/anaconda/envs/memex-weapons/lib/:$LD_LIBRARY_PATH"
./tools/caffe train --solver=$base_deep_dir/CNN_20K_solver_ukhack_annotated_lmdb.txt --weights=$base_deep_dir/CNN_20K.caffemodel
|
svebk/memex-weapons
|
deepmodels/train_annotated_lmdb.sh
|
Shell
|
bsd-2-clause
| 728 |
#!/bin/bash
# this file is mostly meant to be used by the author himself.
root=`pwd`
home=~
version=$1
force=$2
#--with-cc=gcc46 \
ngx-build $force $version \
--with-http_ssl_module \
--with-cc-opt="-I$OPENSSL_INC -I$PCRE_INC" \
--with-ld-opt="-L$OPENSSL_LIB -L$PCRE_LIB -Wl,-rpath,$OPENSSL_LIB:$PCRE_LIB:$LIBDRIZZLE_LIB" \
--without-mail_pop3_module \
--without-mail_imap_module \
--without-mail_smtp_module \
--without-http_upstream_ip_hash_module \
--without-http_empty_gif_module \
--without-http_memcached_module \
--without-http_referer_module \
--without-http_autoindex_module \
--without-http_auth_basic_module \
--without-http_userid_module \
--add-module=$root/../ndk-nginx-module \
--add-module=$root/../echo-nginx-module \
--add-module=$root/../lua-nginx-module \
--add-module=$root/../set-misc-nginx-module \
--add-module=$root \
--with-debug
#--add-module=$home/work/ndk \
#--without-http_ssi_module # we cannot disable ssi because echo_location_async depends on it (i dunno why?!)
|
LomoX-Offical/nginx-openresty-windows
|
src/encrypted-session-nginx-module-0.06/util/build.sh
|
Shell
|
bsd-2-clause
| 1,242 |
#!/bin/bash
cd pytition
echo "Creating 3 Organizations (RAP, Greenpeace and Attac)..."
python3 ./cli_pytition.py gen_orga --orga RAP
python3 ./cli_pytition.py gen_orga --orga Greenpeace
python3 ./cli_pytition.py gen_orga --orga Attac
echo "Creating 3 users (john, max, julia)..."
python3 ./cli_pytition.py gen_user --username john --first-name John --last-name Smith -p john
python3 ./cli_pytition.py gen_user --username max --first-name Max --last-name More -p max
python3 ./cli_pytition.py gen_user --username julia --first-name Julia --last-name Steven -p julia
echo "Make John join RAP and Greenpeace..."
python3 ./cli_pytition.py join_org --orga RAP --user john
python3 ./cli_pytition.py join_org --orga Greenpeace --user john
echo "Make Julia join Greenpeace and Attac"
python3 ./cli_pytition.py join_org --orga Attac --user julia
python3 ./cli_pytition.py join_org --orga Greenpeace --user julia
echo "Make Max join Attac and RAP..."
python3 ./cli_pytition.py join_org --orga Attac --user max
python3 ./cli_pytition.py join_org --orga RAP --user max
echo "Creating petitions for each user and each organization..."
python3 ./cli_pytition.py generate_petitions -n 10 --orga RAP
python3 ./cli_pytition.py generate_petitions -n 10 --orga Greenpeace
python3 ./cli_pytition.py generate_petitions -n 10 --orga Attac
python3 ./cli_pytition.py generate_petitions -n 10 --user john
python3 ./cli_pytition.py generate_petitions -n 10 --user max
python3 ./cli_pytition.py generate_petitions -n 10 --user julia
|
fallen/Pytition
|
dev/prepopulate.sh
|
Shell
|
bsd-3-clause
| 1,512 |
#!/bin/bash
###########################################################
# Copyright (c) 2015-2017, Blockbridge Networks LLC. All rights reserved.
# Use of this source code is governed by a BSD-style license, found
# in the LICENSE file.
###########################################################
###########################################################
# Blockbridge docker volume driver entrypoint
###########################################################
# trap signals
trap '' TERM
# setup environment
export RUBYLIB=/usr/lib/blockbridge-ruby:/usr/lib/blockbridge-ruby/bundle:/usr/lib/blockbridge-ruby/lib/ruby/2.3.0:/usr/lib/blockbridge-ruby/lib/ruby/2.3.0/x86_64-linux
export PATH=/usr/lib/blockbridge-ruby/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/bb/bin
# run blockbridge docker volume driver
ROOTDIR=$(dirname $0)
ruby -rbundler/setup $ROOTDIR/volume_driver.rb -e production -S /run/docker/plugins/blockbridge.sock
|
blockbridge/blockbridge-docker-volume
|
volume_driver.sh
|
Shell
|
bsd-3-clause
| 958 |
### __common-func-2.sh
CURBUILDAPP=
__dep()
{
echo Dependent Apps: $@
}
__echo-setcol-green()
{
COLGREEN=$'\e[1;32;1m'
echo "$COLGREEN"$@
}
__echo-setcol-red()
{
COLRED=$'\e[0;31;1m'
echo "$COLRED"$@
}
__echo-setcol-def()
{
COLDEF=$'\e[0m'
echo "$COLDEF"$@
}
__echo-g()
{
__echo-setcol-green $@
__echo-setcol-def
}
__err()
{
__echo-setcol-red "cur build app : " $CURBUILDAPP
echo $@
__echo-setcol-def
exit
}
__mes()
{
__echo-g "------------------------------"
__echo-g $1
__echo-g "------------------------------"
__echo-g $2
}
__wait()
{
__echo-g "<<< Prease enter key >>>"
# read
}
__cd()
{
CURBUILDAPP=$1
__mes $1 "Are you sure you want to build?"
cd $BASE_DIR/$1
if [ $? -ne 0 ]
then
__err "not directory error!!"
fi
__git-pull
__wait
}
__decord()
{
__mes $1 "Are you sure you want to decode?"
__wait
cd $SRC_DIR
BN=$(ls $1*.tar.*)
if [ $? -ne 0 ]
then
BN=$(ls $1*.tgz)
fi
__echo-g $BN
cd $BASE_DIR
case $BN in
*.tgz) gzip -dc $SRC_DIR/$BN | tar xvf - ;;
*.gz) gzip -dc $SRC_DIR/$BN | tar xvf - ;;
*.bz2) bzip2 -dc $SRC_DIR/$BN | tar xvf - ;;
*.xz) xz -dc $SRC_DIR/$BN | tar xvf - ;;
esac
}
__dcd()
{
__decord $1
__cd $1
}
__cdbt()
{
BLDTMP=$BASE_DIR/__bldtmp
rm $BLDTMP -rf
mkdir -v $BLDTMP
cd $BLDTMP
}
__mk()
{
__echo-g $CURBUILDAPP "[ make" $@ "]"
make $@
if [ $? -ne 0 ]
then
__err "make error!!"
fi
}
__mkinst()
{
__echo-g $CURBUILDAPP "[ sudo make install" $@ "]"
sudo make install $@
if [ $? -ne 0 ]
then
__err "sudo make install error!!"
fi
sudo ldconfig
}
__lsdir()
{
ls -F | grep / | sed -e "s/\/$//g"
}
__wget()
{
__mes "wget ["$1"]"
wget -c --directory-prefix=${SRC_DIR} $1
# if [ $? -ne 0 ]
# then
# __err "wget error!! ["$1"]"
# fi
}
__patch()
{
patch -p1 < $@
if [ $? -eq 2 ]
then
__err "patch error!! ["$@"]"
fi
}
__git-pull()
{
ls .git 2>& 1>& /dev/null
if [ $? -eq 0 ]
then
git pull
fi
}
__svn-pull()
{
ls .svn 2>& 1>& /dev/null
if [ $? -eq 0 ]
then
svn upgrade
svn cleanup
svn update
fi
}
__hg-pull()
{
ls .hg 2>& 1>& /dev/null
if [ $? -eq 0 ]
then
hg pull
hg update
fi
}
__cvs-pull()
{
ls CVS 2>& 1>& /dev/null
if [ $? -eq 0 ]
then
cvs update
fi
}
__vcs-pull()
{
__git-pull
__hg-pull
__svn-pull
__cvs-pull
}
__git-clone()
{
cd $BASE_DIR
git clone $@
}
__hg-clone()
{
cd $BASE_DIR
hg clone $@
}
__svn-clone()
{
cd $BASE_DIR
svn co $@
}
__cvs-clone()
{
cd $BASE_DIR
cvs $@
}
__autogen()
{
ls autogen.sh 2>& 1>& /dev/null
if [ $? -eq 0 ]
then
./autogen.sh
fi
}
__self-autogen()
{
ls m4 2>& 1>& /dev/null
if [ $? -eq 0 ]
then
aclocal --force -I m4
else
aclocal --install
fi
libtoolize
autoheader
automake -acf
autoconf
}
__cfg()
{
__autogen
./configure $@
if [ $? -ne 0 ]
then
__err "./configure error!! ["$@"]"
fi
}
__bld-common-simple()
{
$DIST_CLEAN
__cfg --prefix=/usr $@
$MAKE_CLEAN
__mk
__mkinst
}
__bld-common()
{
__bld-common-simple --sysconfdir=/etc $@
}
__common()
{
__cd $1
__bld-common
}
__pl-common()
{
__dcd $1
perl Makefile.PL
__mk
__mkinst
}
### cgroup として build-group を作成し、このスクリプトが含まれる現在のプロセスを登録する。
### build-group は、ビルド時の使用メモリーの上限を 7GByte までに制限する為に使う。
__init-build-group()
{
sudo cgcreate -g memory,cpu:/build-group
sudo sh -c "echo 7G > /sys/fs/cgroup/memory/build-group/memory.limit_in_bytes"
sudo sh -c "echo $$ >> /sys/fs/cgroup/memory/build-group/tasks"
}
|
takeutch-kemeco/private-build-script
|
common-func/__common-func-2.sh
|
Shell
|
bsd-3-clause
| 3,936 |
before() {
cp -r test-app detect-test
}
after() {
rm -rf detect-test
}
it_is_go_if_dart_files_under_src() {
sh -x bin/detect detect-test/
}
it_is_not_go_without_all_sh_or_dart_files() {
rm -rf detect-test/src/*
! sh -x bin/detect detect-test/
}
|
bindstone/dartisan
|
detect-test.sh
|
Shell
|
bsd-3-clause
| 258 |
for CID in $(docker ps -a -q --filter "name=bbsim-sn")
do
echo "Starting storage node: $(docker start $CID)"
done
|
blockbridge/blockbridge-demo
|
simulator/storage_node/start.sh
|
Shell
|
bsd-3-clause
| 118 |
#/bin/bash
# Register EBS backed AMI.
# Stop execution if any command fails.
#set -e
txtdef="\e[0m" # revert to default color
txtbld="\e[1m" # bold
txtgreen="\e[0;32m"
errcolor="\e[1;31m" # bold red
logcolor="\e[0;34m" # blue
logfile="out.log"
errfile="out.err"
: > $logfile
: > $errfile
# Requires: $img_file
# Output: $logfile, $errfile, $work_dir, $volume_size, $EC2_ACCESS_KEY, $EC2_SECRET_KEY
function init_variables {
work_dir="work-dir"
rm -rf $work_dir
mkdir $work_dir
local size=`du -B M $img_file | awk -F M '{print $1}'`
volume_size=$((size / 1024))
if [ $((size % 1024)) -ne 0 ]; then
(( volume_size += 1 ))
fi
: ${EC2_ACCESS_KEY:?"You need to set env variable: 'EC2_ACCESS_KEY'."}
: ${EC2_SECRET_KEY:?"You need to set env variable: 'EC2_SECRET_KEY'."}
}
function install_pkgs {
# euca2ools dependencies.
host_pkgs=('python-boto')
host_pkgs+=('python-m2crypto')
host_pkgs+=('build-essential')
# Other tools.
host_pkgs+=('mount') # losetup, mount, umount
host_pkgs+=('kpartx') # kpartx
host_pkgs+=('parted') # parted
host_pkgs+=('e2fsprogs') # tune2fs
host_pkgs+=('grub2') # grub-install
# Other tools
host_pkgs+=('grub-pc')
apt-get update
apt-get install -y ${host_pkgs[*]} | spin
[ $PIPESTATUS == 0 ] || die "Installing host packages failed!"
modprobe dm-mod
}
function usage {
echo -e "Usage:\n$0
\t[-a | --arch <amd64 | i386>]
\t[-d | --description <description>]
\t[-h | --help]
\t[-n | --name <ami-name>]
\t<image-file>"
}
# Takes as parameter "$@".
function parse_script_arguments {
arch=
availability_zone=
description="no-description"
ami_name=
while :
do
case $1 in
-a | --arch)
arch=$2
shift 2
;;
-d | --description)
description=$2
shift 2
;;
-h | --help)
usage
exit 0
;;
-n | --name)
ami_name=$2
shift 2
;;
--) # End of all options.
shift
break
;;
-*)
echo "${errcolor}WARN: Unknown option (ignored): $1${txtdef}" >&2
shift
;;
*)
break
;;
esac
done
# The image file.
if [ $# -ne 1 ] || [[ $1 != *.img ]]; then
echo -e "${errcolor}ERROR: Script must be called with just one parameter \
(excluding options), which is the image file: 'file-name.img'.\nAll options must \
preced the image file name.\n${txtdef}" >&2
usage >&2
exit 1
fi
img_file="$1"
# Some checks.
if ! silent_check arch; then
arch=$(parse_cfg_file xen_arch)
fi
[ $arch == "i386" ] || [ $arch == "amd64" ] || {
echo -e "${errcolor}ERROR: 'arch' option is not properly set. It can be \
specified on the command line or in a file called '${cfg_file}'.\n${txtdef}" >&2
usage >&2
exit 1
}
if ! silent_check description; then
echo -e "${errcolor}ERROR: 'description' option is not properly set.\n${txtdef}" >&2
usage >&2
exit 1
fi
if ! silent_check ami_name; then
ami_name="${img_file%.img}-`date +%s`"
fi
}
cfg_file=create-img-script.cfg
# Requires: $1 = name of configuratio option to search for.
# Output: The option if it is found in the configuratio file.
function parse_cfg_file {
if ! [ -f $cfg_file ]; then
echo -n ""
return
fi
local re="^[[:space:]]*${1}[[:space:]]*="
while read line; do
if [[ "$line" =~ $re ]]; then
echo -n ${line#*=}
return
fi
done < $cfg_file
echo -n ""
}
# Requires: $work_dir
# Output: --
function check_euca2ools {
check work_dir
# Install the euca2ools if they are not installed or the version is wrong
if ! command -v euca-version > /dev/null 2>&1; then
install_euca2ools
elif [[ ! "`euca-version`" =~ euca2ools\ 2.0.* ]]; then
install_euca2ools
fi
}
# Requires: $work_dir
# Output: --
function install_euca2ools {
# We want to fail if make fails, so don't start a subshell with ()
# Remember the old dir
local orig_pwd=$(pwd)
out "${txtgreen}Installing euca2ools...\n${txtdef}"
# Install euca2ools.
# Download sources for versions 2.0.2
wget -qO euca2ools-1.3.2.tar.gz https://github.com/eucalyptus/euca2ools/archive/1.3.2.tar.gz
tar zxf euca2ools-1.3.2.tar.gz
cd euca2ools-1.3.2
make | spin
[ $PIPESTATUS == 0 ] || die "Installation of euca2ools failed!"
cd $orig_pwd
apply_boto_patch
}
function apply_boto_patch {
local boto_ec2_dir='/usr/share/pyshared/boto/ec2'
local patch_url='https://bugzilla.redhat.com/attachment.cgi?id=455857'
if [ -r "$boto_ec2_dir/blockdevicemapping.py" ]; then
local result=$(
grep -q "pre = '%sBlockDeviceMapping.%d' % (pre, i)" $boto_ec2_dir/blockdevicemapping.py
echo $?)
if [ $result -eq 0 ]; then
wget -qO - $patch_url | patch -sfr - -d $boto_ec2_dir
[ $PIPESTATUS == 0 ] || die "Unable to patch boto."
fi
fi
}
# Requires: --
# Output: $instance_information, $instance_id, $region, $availability_zone, $EC2_URL
function get_host_info {
instance_information=`wget -qO - http://169.254.169.254/latest/dynamic/instance-identity/document`
# We need the region, for the apt sources and the availability zone for the EBS volume
instance_id=`printf -- "$instance_information" | grep instanceId | awk -F\" '{print $4}'`
region=`printf -- "$instance_information" | grep region | awk -F\" '{print $4}'`
availability_zone=`printf -- "$instance_information" | grep availabilityZone | awk -F\" '{print $4}'`
if [ -z "$instance_id" ]; then
die \
"Unable to fetch the instance id of this machine." \
"This script must be running on ec2 in order to mount EBS volumes."
fi
[ -z "$region" ] && die "Unable to fetch the region of this machine."
[ -z "$availability_zone" ] && die "Unable to fetch the availability zone of this machine."
export EC2_URL="https://ec2.$region.amazonaws.com"
log "instance_information: $instance_information"
log "EC2_URL=$EC2_URL"
# # Check if we can handle this region, there are hardcoded AKIs later on.
# if ! $(contains $region known_regions[@]); then
# die "The region $region is unkown."
# fi
}
# Requires: $volume_size, $availability_zone
# Output: $volume_id
function create_ebs_volume {
check volume_size availability_zone
volume_id=`euca-create-volume --size $volume_size --zone "$availability_zone" | awk '{print $2}'`
[ -z "$volume_id" ] && die "Unable to create volume."
dotdot "euca-describe-volumes $volume_id | grep available > /dev/null && echo available"
log "The EBS volume id is $volume_id"
}
# Requires: $instance_id, $volume_id
# Output: $device_path
function attach_ebs_volume {
check instance_id volume_id
# Get a random device letter, we will hang forever if we try to attach a volume to an already mapped device.
for device_letter in {f..z}; do
device_path="/dev/xvd$device_letter"
[ ! -b $device_path ] && break
done
[ -b $device_path ] && die "No free device letters found (tried sdf to sdz)!"
euca-attach-volume --instance "$instance_id" --device "/dev/sd$device_letter" "$volume_id"
# Wait until the volume is attached
dotdot "test -b $device_path && echo attached"
log "The EBS device is $device_path"
}
# Requires: $img_file, $device_path
# Output: --
function cp3_img_to_ebs_volume {
check img_file device_path
mkfs.ext3 $device_path
tune2fs -i 0 $device_path
# Mounting dst.
local dst_dir=$(mktemp -d)
mount $device_path $dst_dir
log "The volume is mounted at $dst_dir"
# Mounting source image.
local src_loop=$(losetup -f)
losetup $src_loop $img_file
partition=$(kpartx -l $src_loop | awk '{ print $1 }')
partition=/dev/mapper/$partition
kpartx -a $src_loop
local src_dir=$(mktemp -d)
mount -o loop $partition $src_dir
# Copy files.
( cd $src_dir && tar -cf - . ) | ( cd $dst_dir && tar -xpf - )
# Mount all the different special devices, other installers depend on their existence
mount --bind /dev $dst_dir/dev
chroot $dst_dir mount -t proc none /proc
chroot $dst_dir mount -t sysfs none /sys
chroot $dst_dir mount -t devpts none /dev/pts
# Grub
chmod -x $dst_dir/etc/grub.d/*
cp 40_custom $dst_dir/etc/grub.d/40_custom
chmod 755 $dst_dir/etc/grub.d/40_custom
sed -i "s/^GRUB_TIMEOUT=[0-9]\+/GRUB_TIMEOUT=0\nGRUB_HIDDEN_TIMEOUT=true/" $dst_dir/etc/default/grub
# Update grub.cfg using the script
chroot $dst_dir update-grub
# Alias grub.cfg as menu.lst
chroot $dst_dir rm -rf /boot/grub/menu.lst
chroot $dst_dir ln -s /boot/grub/grub.cfg /boot/grub/menu.lst
# We unmount from inside the image, otherwise the system won't boot
chroot $dst_dir umount /dev/pts
chroot $dst_dir umount /sys
chroot $dst_dir umount /proc
umount $dst_dir/dev
umount $dst_dir
rmdir $dst_dir
umount $src_dir
kpartx -d $src_loop
losetup -d $src_loop
rmdir $src_dir
}
function write_fstab {
check device_path
# Mounting dst.
local dst_dir=$(mktemp -d)
mount $device_path $dst_dir
local mountoptions=',barrier=0'
cat > $dst_dir/etc/fstab <<EOF
/dev/xvda1 / ext3 defaults$mountoptions 1 1
EOF
umount $dst_dir
rmdir $dst_dir
}
# Requires: $volume_id
# Output: --
function detach_ebs_volume {
check volume_id
euca-detach-volume $volume_id
dotdot "euca-describe-volumes $volume_id | grep 'available' > /dev/null && echo Detached."
}
# Requires: $volume_id
# Output: $snapshot_id
function create_ebs_snapshot {
check volume_id
logn "Creating snapshot of the EBS volume"
snapshot=`euca-create-snapshot $volume_id`
[ -z "$snapshot" ] && die "\nUnable to create snapshot from the volume '$volume_id'"
snapshot_id=`printf -- "$snapshot" | awk '{print $2}'`
# Wait for the snapshot to be completed, can take quite some time
dotdot "euca-describe-snapshots $snapshot_id | grep 'completed'"
}
# Requires: $volume_id
# Output: --
function delete_ebs_volume {
check volume_id
log "Deleting the volume"
euca-delete-volume $volume_id
}
# Requires: $region, $arch
# Output: $aki
function set_aki {
check region arch
log "Set aki"
# Figure out which pvGrub kernel ID we need.
case $region in
us-east-1)
[ $arch = 'amd64' ] && aki="aki-88aa75e1"
[ $arch = 'i386' ] && aki="aki-b6aa75df"
;;
us-west-1)
[ $arch = 'amd64' ] && aki="aki-f77e26b2"
[ $arch = 'i386' ] && aki="aki-f57e26b0"
;;
us-west-2)
[ $arch = 'amd64' ] && aki="aki-fc37bacc"
[ $arch = 'i386' ] && aki="aki-fa37baca"
;;
eu-west-1)
[ $arch = 'amd64' ] && aki="aki-71665e05"
[ $arch = 'i386' ] && aki="aki-75665e01"
;;
ap-southeast-1)
[ $arch = 'amd64' ] && aki="aki-fe1354ac"
[ $arch = 'i386' ] && aki="aki-f81354aa"
;;
ap-southeast-2)
[ $arch = 'amd64' ] && aki="aki-31990e0b"
[ $arch = 'i386' ] && aki="aki-33990e09"
;;
ap-northeast-1)
[ $arch = 'amd64' ] && aki="aki-44992845"
[ $arch = 'i386' ] && aki="aki-42992843"
;;
sa-east-1)
[ $arch = 'amd64' ] && aki="aki-c48f51d9"
[ $arch = 'i386' ] && aki="aki-ca8f51d7"
;;
us-gov-west-1)
[ $arch = 'amd64' ] && aki="aki-79a4c05a"
[ $arch = 'i386' ] && aki="aki-7ba4c058"
;;
*) die "Unrecognized region:" "$region"
esac
}
# Requires: $arch, $ami_name, $description, $aki, $snapshot_id, $volume_size
# Output: $ami_id
function register_ebs_ami {
check arch ami_name description aki snapshot_id volume_size
[ $arch = 'i386' ] && ami_arch='i386'
[ $arch = 'amd64' ] && ami_arch='x86_64'
# The AMI has to start with "debian", otherwise we won't get a nice icon
# The ":N:true:standard" is necessary so that the root volume
# will be deleted on termination of the instance (specifically the "true" part)
log "Registering an AMI with the snapshot '$snapshot_id'"
register=`euca-register \
--name "$ami_name" --description "$description" \
--architecture "$ami_arch" --kernel "$aki" \
--snapshot "$snapshot_id:$volume_size:true:standard"`
ami_id=`echo $register | awk '{print $2}'`
# If the user has already created an unnamed AMI today,
# this will fail, so give the AMI registration command to the user
if [[ ! "$ami_id" =~ ^ami-[0-9a-z]{8}$ ]]; then
die \
"Unable to register an AMI: $register" \
"You can do it manually with:" \
"export EC2_URL=\"$EC2_URL\"" \
"`which euca-register` \\\\" \
"--name '$ami_name' --description '$description' \\\\" \
"--architecture '$ami_arch' --kernel '$aki' \\\\" \
"--snapshot '$snapshot_id:$volume_size:true:standard'"
fi
log "Your AMI has been created with the ID '$ami_id'"
}
function clean {
rm -rf $work_dir
}
# # # # # # # # # # # # # # # # # # # # # # # #
# Each arg of log is a line.
function log {
for line in "$@"; do
out "$logcolor$line$txtdef\n" "$line\n" $logfile
done
}
# Log without the newline.
function logn {
out "$logcolor$1$txtdef" "$1" $logfile
}
# Each arg of die is a line.
function die {
for line in "$@"; do
out "$errcolor$line$txtdef\n" "$line\n" $errfile >&2
done
exit 1
}
function spin {
local cursor='|'
local cols=$(( `tput cols` - 2 ))
while read line; do
printf -- "\r$logcolor$cursor$txtdef %-${cols}s" "${line:0:$cols}"
case $cursor in
'|') cursor='/' ;;
'/') cursor='-' ;;
'-') cursor='\\' ;;
'\\') cursor='|' ;;
esac
done
printf "\n"
}
# Wait for the execution of $cmd not to return an empty string.
function dotdot {
local cmd=$1
local status=`eval $cmd`
local sleep=5
[ ! -z "$2" ] && sleep=$2
while [ -z "$status" ]; do
logn '.'
sleep $sleep
# Don't error out if the command fails.
status=`eval $cmd || true`
done
logn "\n"
}
# Output function, takes $msg, $filemsg and $file args in that order.
function out {
printf -- "$1"
if [ -n "$3" ]; then
printf -- "$2" >>$3
fi
}
function silent_check {
for x in "$@" ; do
[ -z "${!x}" ] && return 1
done
return 0
}
function check {
for x in "$@" ; do
[ -z "${!x}" ] && { echo -e "${errcolor}ERROR: $x is not set.${txtdef}" >&2 ; exit 1 ; }
done
:
}
# # # # # # # # # # # # # # # # # # # # # # # #
parse_script_arguments "$@"
init_variables "$@"
get_host_info
install_pkgs
check_euca2ools
create_ebs_volume
attach_ebs_volume
cp3_img_to_ebs_volume
write_fstab
detach_ebs_volume
create_ebs_snapshot
delete_ebs_volume
set_aki
register_ebs_ami
clean
|
ema/conpaas
|
conpaas-services/scripts/create_vm/register-image-ec2-ebs.sh
|
Shell
|
bsd-3-clause
| 15,580 |
#!/bin/sh
#############################################################################
# Copyright (c) 2015-2018, Intel Corporation #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# 1. Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# 2. Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in the #
# documentation and/or other materials provided with the distribution. #
# 3. Neither the name of the copyright holder nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT #
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, #
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED #
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
#############################################################################
# Hans Pabst (Intel Corp.)
#############################################################################
HERE=$(cd $(dirname $0); pwd -P)
VARIANT=Cached
LIMIT=31
if [ "" != "$1" ]; then
VARIANT=$1
shift
fi
if [ -f /cygdrive/c/Program\ Files/gnuplot/bin/wgnuplot ]; then
WGNUPLOT=/cygdrive/c/Program\ Files/gnuplot/bin/wgnuplot
GNUPLOT=/cygdrive/c/Program\ Files/gnuplot/bin/gnuplot
elif [ -f /cygdrive/c/Program\ Files\ \(x86\)/gnuplot/bin/wgnuplot ]; then
WGNUPLOT=/cygdrive/c/Program\ Files\ \(x86\)/gnuplot/bin/wgnuplot
GNUPLOT=/cygdrive/c/Program\ Files\ \(x86\)/gnuplot/bin/gnuplot
else
GNUPLOT=$(which gnuplot 2>/dev/null)
WGNUPLOT=${GNUPLOT}
fi
GNUPLOT_MAJOR=0
GNUPLOT_MINOR=0
if [ -f "${GNUPLOT}" ]; then
GNUPLOT_MAJOR=$("${GNUPLOT}" --version | sed "s/.\+ \([0-9]\).\([0-9]\) .*/\1/")
GNUPLOT_MINOR=$("${GNUPLOT}" --version | sed "s/.\+ \([0-9]\).\([0-9]\) .*/\2/")
fi
GNUPLOT_VERSION=$((GNUPLOT_MAJOR * 10000 + GNUPLOT_MINOR * 100))
SED=$(which sed)
function capturedTxtToDataFile {
${SED} \
-e "/^m=/,/${VARIANT}/{//!d}" \
-e "/${VARIANT}/d" \
-e "/\.\.\./,/Finished/{//!d}" \
-e "/Finished/d" \
-e "/diff:/d" \
-e "/\.\.\./d" \
-e "/^$/d" \
${HERE}/$1.txt \
| ${SED} \
-e "s/m=//" -e "s/n=//" -e "s/k=//" -e "s/ (..*) / /" \
-e "s/size=//" \
-e "/duration:/d" \
| ${SED} \
-e "N;s/ memory=..*\n..*//" \
-e "N;s/\n\tperformance:\(..*\) GFLOPS\/s/\1/" \
-e "N;s/\n\tbandwidth:\(..*\) GB\/s/\1/" \
> ${HERE}/$1.dat
}
if [ "40600" -le "${GNUPLOT_VERSION}" ]; then
RM=$(which rm)
if [ "" = "$1" ]; then
FILENAME=smm-$(echo ${VARIANT} | tr ' ,' '-' | tr -d '()' | tr '[:upper:]' '[:lower:]').pdf
else
FILENAME=$1
shift
fi
if [ "" = "$1" ]; then
MULTI=1
else
MULTI=$1
shift
fi
${RM} -f *.dat
capturedTxtToDataFile smm-blas
capturedTxtToDataFile smm-specialized
#capturedTxtToDataFile smm-dispatched
#capturedTxtToDataFile smm-inlined
env \
GDFONTPATH=/cygdrive/c/Windows/Fonts \
FILENAME=${FILENAME} \
MULTI=${MULTI} \
LIMIT=${LIMIT} \
"${WGNUPLOT}" smm-perf.plt
fi
|
egeor/libxsmm
|
samples/smm/smm-plot.sh
|
Shell
|
bsd-3-clause
| 4,326 |
#!/bin/bash
if ! type docker 2>>/dev/null
then
# https://github.com/gliderlabs/docker-consul/issues/60#issuecomment-104561023
sudo wget -qO- https://get.docker.com/ | sh
fi
# http://askubuntu.com/questions/477551/how-can-i-use-docker-without-sudo
sudo groupadd docker
sudo gpasswd -a ${USER} docker
# https://help.ubuntu.com/community/CheckingYourUbuntuVersion
sudo service docker restart
# install docker compose
# https://docs.docker.com/compose/install/
sudo apt-get -y install curl
if ! type docker-compose
then
sudo bash -c "curl -L https://github.com/docker/compose/releases/download/1.5.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose"
sudo chmod +x /usr/local/bin/docker-compose
else
echo "\"docker-compose\" already installed."
fi
# check docker and docker-compose
EXIT=false
if ! type docker
then
EXIT=true
echo "Command \"docker\" not found!"
echo "Use \"sudo delgroup docker\" if you accidentially installed docker but need to install docker.io."
fi
if ! type docker-compose
then
EXIT=true
echo "Command \"docker-compose\" not found!"
fi
if [ "$EXIT" == "true" ]
then
echo "ERROR!"
exit 1
fi
# both work
if docker info 1>>/dev/null 2>>/dev/null
then
echo
echo "You do NOT need to log out and in - the \"docker\" command can be used without sudo."
else
echo
echo "You must log out and in to use the \"docker\" command without super user privileges."
fi
|
niccokunzmann/cp-automated-development
|
setup-docker/01_install_docker.sh
|
Shell
|
mit
| 1,431 |
#!/bin/bash
cd ../output
gdb model
|
andyLaurito92/haikunet
|
debug/bin/debug.sh
|
Shell
|
mit
| 35 |
#!/bin/bash
mv /media/sf_D_DRIVE/Downloads/*.ofx files/.
echo $1
export FILE=files/SMILES_INTERN._MASTER-Próxima_Fatura_$1.ofx
node convert-ofx.js $FILE
cp $FILE.csv /media/sf_D_DRIVE/databox/Documents/hdc/csv/.
export FILE=files/OUROCARD_ELO_MAIS-Próxima_Fatura_$1.ofx
node convert-ofx.js $FILE
cp $FILE.csv /media/sf_D_DRIVE/databox/Documents/hdc/csv/.
|
hora-das-contas/backend
|
convertOFX.sh
|
Shell
|
mit
| 363 |
#!/bin/sh
make VIMRUNTIMEDIR=/usr/share/vim/vim80
|
metalmac/dotfiles
|
vim/unix-like/vim80-compile-sh.sh
|
Shell
|
mit
| 53 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2015-2017 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Make sure a ghost top-level Tuprules.tup doesn't fail
. ./tup.sh
tmkdir sub
cat > sub/Tupfile << HERE
include_rules
HERE
update
# Touch a file in the top-level directory to trigger parsing there.
tup touch foo
update
eotup
|
anatol/tup
|
test/t4174-toplevel-gitignore.sh
|
Shell
|
gpl-2.0
| 978 |
#!/bin/bash
tprname=step7_10.tpr
#trajname=${tprname%7_10.tpr}8_1.dcd
trajname=traj_openMM.dcd
trajgroname=${trajname%.dcd}_run.gro
gromacs_folder_prefix="../gromacs"
sn1outname=OrderParamSN1_sim2_openMM.dat
sn2outname=OrderParamSN2_sim2_openMM.dat
# workaround with tpr file from gromacs simulation (which should match exactly)
#python convert_dcd_to_gro.py
#echo System | gmx trjconv -f $trajname -o $trajgroname #-s $gromacs_folder_prefix/$tprname # -pbc res #-b 100000
# sn-1
for(( j = 3 ; j <= 16; j=j+1 ))
do
Cname=$(grep M_G1C"$j"_M ../../MAPPING/mappingPOPCcharmm.txt | awk '{printf "%5s\n",$2}')
H1name=$(grep M_G1C"$j"H1_M ../../MAPPING/mappingPOPCcharmm.txt | awk '{printf "%5s\n",$2}')
H2name=$(grep M_G1C"$j"H2_M ../../MAPPING/mappingPOPCcharmm.txt | awk '{printf "%5s\n",$2}')
H1op=$(awk -v Cname="$Cname" -v Hname="$H1name" -f ../../scripts/gro_OP.awk $trajgroname)
H2op=$(awk -v Cname="$Cname" -v Hname="$H2name" -f ../../scripts/gro_OP.awk $trajgroname)
echo $j $H1op $H2op >> $sn1outname
done
# sn-2
for(( j = 3 ; j <= 18; j=j+1 ))
do
Cname=$(grep M_G2C"$j"_M ../../MAPPING/mappingPOPCcharmm.txt | awk '{printf "%5s\n",$2}')
H1name=$(grep M_G2C"$j"H1_M ../../MAPPING/mappingPOPCcharmm.txt | awk '{printf "%5s\n",$2}')
H2name=$(grep M_G2C"$j"H2_M ../../MAPPING/mappingPOPCcharmm.txt | awk '{printf "%5s\n",$2}')
H1op=$(awk -v Cname="$Cname" -v Hname="$H1name" -f ../../scripts/gro_OP.awk $trajgroname)
H2op=$(awk -v Cname="$Cname" -v Hname="$H2name" -f ../../scripts/gro_OP.awk $trajgroname)
echo $j $H1op $H2op >> $sn2outname
done
|
jmelcr/NmrLipidsCholXray
|
DATA/charmm36-charmm-gui/chol30_openMM_Gmx-charmm-gui-inp-files/calcOrderParameters_oMM.sh
|
Shell
|
gpl-2.0
| 1,566 |
#!/bin/sh -e
test -n "$srcdir" || srcdir=`dirname "$0"`
test -n "$srcdir" || srcdir=.
mkdir -p m4
intltoolize --copy --force --automake && autoreconf --force --install --verbose --warnings=all "$srcdir"
test -n "$NOCONFIGURE" || "$srcdir/configure" "$@"
|
awinkelmann/easytag
|
autogen.sh
|
Shell
|
gpl-2.0
| 256 |
#!/bin/sh
# @author Greg Rundlett <[email protected]>
# This is a quick shell script to create a sql dump of your database.
# You may need to adjust the path of mysqldump,
# or sudo apt-get install mysqldump if it doesn't exist
# To configure this script,
# you could hardcode which database to backup
# DB=wiki
# We'll make it so you can pass the database name as the first parameter
# to the script. If no parameter is passed, we'll prompt you for the name
DB=$1
if [ $# -ne 1 ]; then
echo "Here are the current databases on the server"
mysql -u root --batch --skip-column-names -e 'show databases;'
echo "Enter the name of the database you want to backup"
read DB
fi
# We'll use a location that is exported to the host, so that our backups are
# accessible even if the virtual machine is no longer accessible.
backupdir="/vagrant/mediawiki/backups";
if [ ! -d "$backupdir" ]; then
mkdir -p "$backupdir";
fi
# we'll start with a default backup file named '01' in the sequence
backup="${backupdir}/dump-$(date +%F).$(hostname)-${DB}.01.sql";
# and we'll increment the counter in the filename if it already exists
i=1
filename=$(basename "$backup") # foo.txt (basename is everything after the last slash)
# shell parameter expansion see http://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html
extension=${filename##*.} # .txt (filename with the longest matching pattern of *. being deleted)
file=${filename%.*} # foo (filename with the shortest matching pattern of .* deleted)
file=${file%.*} # repeat the strip to get rid of the counter
# file=${filename%.{00..99}.$extension} # foo (filename with the shortest matching pattern of .[01-99].* deleted)
while [ -f $backup ]; do
backup="$backupdir/${file}.$(printf '%.2d' $(( i+1 ))).${extension}"
i=$(( i+1 )) # increments $i
# note that i is naked because $(( expression )) is arithmetic expansion in bash
done
if /usr/bin/mysqldump "$DB" > "$backup"; then
echo "backup created successfully"
ls -al "$backup";
echo "A command such as"
echo "mysql -u root $DB < $backup"
echo "will restore the database from the chosen sql dump file"
else
echo "Something went wrong with the backup"
exit 1
fi
|
freephile/Html2Wiki
|
backup.db.sh
|
Shell
|
gpl-2.0
| 2,230 |
# Copyright: 2016 Masatake YAMATO
# License: GPL-2
CTAGS=$1
echo '# FILE NAME ONLY'
# extension `m' matches both matlab and objc.
# matlab wins by the alphabetical order of parser names
${CTAGS} -G --print-language input.m
# extension `m' matches only objc because matlab is disabled.
${CTAGS} -G --languages=-MatLab --print-language input.m
# extension `m' matches only matlab because objc is disabled.
${CTAGS} -G --languages=-ObjectiveC --print-language input.m
# extension `m' matches no parser because the both objc and matlab
# are disabled.
${CTAGS} -G --languages=-ObjectiveC,-MatLab --print-language input.m
${CTAGS} -G --languages=-MatLab,-ObjectiveC --print-language input.m
echo '# EMACS MODE: MATLAB'
# extension `m' matches both matlab and objc.
# matlab wins by emacs modeline written in the input file.
${CTAGS} -G --print-language input-matlab.m
# extension `m' matches only objc. That's all.
${CTAGS} -G --languages=-MatLab --print-language input-matlab.m
# extension `m' matches only matlab. That's all.
${CTAGS} -G --languages=-ObjectiveC --print-language input-matlab.m
# extension `m' matches no parser because the both objc and matlab
# are disabled. That's all. ctags has no chance to read the file contents.
${CTAGS} -G --languages=-ObjectiveC,-MatLab --print-language input-matlab.m
${CTAGS} -G --languages=-MatLab,-ObjectiveC --print-language input-matlab.m
echo '# EMACS MODE: OBJC'
# extension `m' matches both matlab and objc.
# objc wins by emacs modeline written in the input file.
${CTAGS} -G --print-language input-objc.m
# extension `m' matches only objc. That's all.
${CTAGS} -G --languages=-MatLab --print-language input-objc.m
# extension `m' matches only matlab. That's all.
${CTAGS} -G --languages=-ObjectiveC --print-language input-objc.m
# extension `m' matches no parser because the both objc and matlab
# are disabled. That's all. ctags has no chance to read the file contents.
${CTAGS} -G --languages=-ObjectiveC,-MatLab --print-language input-objc.m
${CTAGS} -G --languages=-MatLab,-ObjectiveC --print-language input-objc.m
|
blackb1rd/ctags
|
Tmain/disable-languages.d/run.sh
|
Shell
|
gpl-2.0
| 2,082 |
#!/bin/bash
####
## Crear la lista de bamfiles de acuerdo a la carpeta señalada como input por el usuario !!!EN UNA CARPETA DEBE ESTAR CADA UNA DE LAS POBLACIONES QUE SE QUIEREN ESTUDIAR, CADA CARPETA SEPARADA
###
list_name=$(echo $1 | cut -d"/" -f3)
fecha=$(date -u | tr " " "_" | tr ":" ".")
o_file=$(echo temp/$list_name"_"$fecha".filelist")
echo "[]Creando filelist para el directorio $list_name"
find $1 -name "*.bam" > $o_file
echo "[>]Guardado en $o_file"
####
## A continuacion hace la SAF Estimation (Site Allele Frecuency)
###
echo "[>>] Calculando SAF para el directorio $list_name"
./Tools/ANGSD/angsd/angsd -b $o_file -anc Input_files/Ancestral_sequence/chimpHg19.fa.gz -out Output_files/$list_name -dosaf 1 -gl 1 -P 10
|
rgarcia-herrera/100g
|
scripts/positive_selection/Scripts/BKUPS/01.sh
|
Shell
|
gpl-3.0
| 740 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-init_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::init_0:1.0 -N ID0000001 -R condorpool -L example_workflow -T 2016-11-08T15:27:25+00:00 ./example_workflow-init_0-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 1A/logs/w-11_0/20161108T152726+0000/00/00/init_0_ID0000001.sh
|
Shell
|
gpl-3.0
| 1,222 |
#!/bin/bash
set -x
set -e
cd $1
source ./$1.info
wget --no-check-certificate -nc $DOWNLOAD
export OUTPUT="/tmp/pkgs"
mkdir -p $OUTPUT
sh $1.SlackBuild
|
cmotc/Byzantium
|
packages/build.sh
|
Shell
|
gpl-3.0
| 154 |
#!/bin/bash
cd /usr/local/infra-daemon
echo "Collecting Information from vCenter : VCENTER01 (DC1)"
./openinfra-vcenter-daemon.py --host VCENTER01 --user 'vcenter-ro' --pass 'PASS' &
echo "Collecting Information from vCenter : VCENTER02 (DC2)"
./openinfra-vcenter-daemon.py --host VCENTER02 --user 'vcenter-ro' --pass 'PASS' &
echo "Collecting Information from vCenter : VCENTER03 (DC3)"
./openinfra-vcenter-daemon.py --host VCENTER03 --user 'vcenter-ro' --pass 'PASS' &
echo "Collecting Information from vCenter : VCENTER04 (DC4)"
./openinfra-vcenter-daemon.py --host VCENTER04 --user 'vcenter-ro' --pass 'PASS' &
#end
|
arunbagul/openinfra
|
infra-daemon/openinfra-get-vmlist.sh
|
Shell
|
gpl-3.0
| 625 |
#!/bin/bash
#
# Script will be run after parameterization has completed, e.g.,
# use this to compile source code that has been parameterized.
#
sudo systemctl enable plc
sudo systemctl start plc
sudo systemctl enable httpserver.service
sudo systemctl start httpserver.service
|
cliffe/SecGen
|
modules/utilities/unix/labtainers/files/Labtainers-master/labs/iptables-ics/plc/_bin/fixlocal.sh
|
Shell
|
gpl-3.0
| 280 |
#!/usr/bin/env bash
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
set -o errexit
set -o nounset
set -o pipefail
# https://wiki.mozilla.org/CA/Included_Certificates
# 1. Mozilla's official CA database CSV file is downloaded with curl
# and processed with awk.
# 2. Rows end with `"\n`.
# 3. Each row is split by ^" and "," into columns.
# 4. Single and double quotes are removed from column 30.
# 5. If column 13 (12 in the csv file) contains `Websites`
# (some are Email-only), column 30 is printed, the raw certificate.
# 6. All CA certs trusted for Websites are stored into the `certs` file.
url="https://ccadb-public.secure.force.com/mozilla/IncludedCACertificateReportPEMCSV"
curl "${url}" -sSf | gawk -v RS="\"\n" -F'","|^"' \
'{gsub("\047","",$(30));gsub("\"","",$(30));if($(13)~/Websites/)print $(30)}' \
> certs
|
peterjoel/servo
|
etc/cert_generator.sh
|
Shell
|
mpl-2.0
| 984 |
#! /bin/bash -e
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Jenkins build script for running tests and packaging build
echo "this is build.sh"
./scripts/clean.sh
./scripts/bootstrap.sh
./scripts/test.sh
./scripts/integration-test.sh --destroy
./scripts/install.sh
./scripts/package.sh
|
luser/socorro
|
scripts/build.sh
|
Shell
|
mpl-2.0
| 452 |
#!/bin/bash
: ${FIREWALLA_HOME:=/home/pi/firewalla}
: ${LOGDIR:=/home/pi/logs}
source $FIREWALLA_HOME/scripts/utils.sh
setup_folders
logger Onboot start clean_log
${FIREWALLA_HOME}/scripts/clean_log.sh &> $LOGDIR/clean_log.log &
logger Onboot start sync_time
${FIREWALLA_HOME}/scripts/sync_time.sh &> $LOGDIR/sync_time.log &
DIR_D="/home/pi/.firewalla/config/fireonboot.d"
if [[ -d $DIR_D ]]; then
for script in $(ls $DIR_D/*.sh)
do
bash $script
done
fi
wait
exit 0
|
MelvinTo/firewalla
|
scripts/fireonboot.sh
|
Shell
|
agpl-3.0
| 487 |
#!/bin/bash
self_ip=$(ip addr show dev eth0 | awk '/inet /' | awk '$NF=="eth0" {print $2}' | cut -f1 -d/ | grep -v '^169\.254\.' | head -n 1)
if [[ -z $self_ip ]]; then
echo "Can not find Firewalla's IP address."
exit -1
fi
if [[ ! -f /blog/current/conn.log ]]; then
echo "bro conn.log is not found."
exit -1
fi
cp /blog/current/conn.log /home/pi/conn.log
total=$(cat /home/pi/conn.log | grep -v $self_ip | grep tcp | grep -v "OTH" | grep -v "0.0.0.0" | wc -l)
echo "Total captured tcp connections: $total"
complete=$(cat /home/pi/conn.log | grep -v $self_ip | grep tcp | grep -v "OTH" | grep -v "0.0.0.0" | grep "history\":\"ShA" | wc -l)
echo "Completely captured tcp connections: $complete"
complete_ratio=$(echo "scale=4;$complete/$total" | bc)
echo "Compatibility probability: $complete_ratio"
rm /home/pi/conn.log
exit 0
|
MelvinTo/firewalla
|
scripts/estimate_compatibility.sh
|
Shell
|
agpl-3.0
| 840 |
overlay coqhammer https://github.com/ppedrot/coqhammer rm-eauto-bfs-implem 15324
|
SkySkimmer/coq
|
dev/ci/user-overlays/15324-ppedrot-rm-eauto-bfs-implem.sh
|
Shell
|
lgpl-2.1
| 81 |
cd "$(dirname "$0")"
javac PokeUtility.java
jar -cvfm PokeUtility.jar manifest Pokemon.class PokeUtility.class Search.class PokeUtilityGUI.class SearchInput.class resources/pokedex/* resources/PressStart2P.ttf
echo "Made excecutable: PokeUtility.jar"
|
hbod8/PokeUtility
|
CreateExecutable.sh
|
Shell
|
apache-2.0
| 250 |
#!/bin/bash
set -e
function finish {
echo "disk space at end of build:"
df -h
}
trap finish EXIT
echo "disk space at beginning of build:"
df -h
# shellcheck source=ci/setup_cache.sh
. "$(dirname "$0")"/setup_cache.sh
read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:-}"
read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}"
BUILD_CONFIG="$(dirname "$(realpath "$0")")"/osx-build-config
# TODO(zuercher): remove --flaky_test_attempts when https://github.com/envoyproxy/envoy/issues/2428
# is resolved.
BAZEL_BUILD_OPTIONS=(
"--curses=no"
--show_task_finish
--verbose_failures
"--test_output=all"
"--flaky_test_attempts=integration@2"
"--override_repository=envoy_build_config=${BUILD_CONFIG}"
"${BAZEL_BUILD_EXTRA_OPTIONS[@]}"
"${BAZEL_EXTRA_TEST_OPTIONS[@]}")
NCPU=$(sysctl -n hw.ncpu)
if [[ $NCPU -gt 0 ]]; then
echo "limiting build to $NCPU jobs, based on CPU count"
BAZEL_BUILD_OPTIONS+=("--jobs=$NCPU")
fi
# Build envoy and run tests as separate steps so that failure output
# is somewhat more deterministic (rather than interleaving the build
# and test steps).
DEFAULT_TEST_TARGETS=(
"//test/integration:integration_test"
"//test/integration:protocol_integration_test"
"//test/integration:tcp_proxy_integration_test"
"//test/integration:extension_discovery_integration_test"
"//test/integration:listener_lds_integration_test")
if [[ $# -gt 0 ]]; then
TEST_TARGETS=("$@")
else
TEST_TARGETS=("${DEFAULT_TEST_TARGETS[@]}")
fi
if [[ "${TEST_TARGETS[*]}" == "${DEFAULT_TEST_TARGETS[*]}" ]]; then
bazel build "${BAZEL_BUILD_OPTIONS[@]}" //source/exe:envoy-static
fi
bazel test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS[@]}"
# Additionally run macOS specific test suites
bazel test "${BAZEL_BUILD_OPTIONS[@]}" //test/extensions/network/dns_resolver/apple:apple_dns_impl_test
|
envoyproxy/envoy
|
ci/mac_ci_steps.sh
|
Shell
|
apache-2.0
| 1,884 |
#!/usr/bin/env bash
for i in `find . -name "*.jar" -path "*cache/intellij*"`; do
grep -e $1 <(unzip -l "$i") && echo "Found in $i";
done
|
wisechengyi/intellij-pants-plugin
|
scripts/find_intellij_class_file.sh
|
Shell
|
apache-2.0
| 141 |
#!/bin/bash
# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0.
echo "*** Warning, this script is deprecated and will eventually be deleted. ***"
echo "*** Please replace with steps/nnet2/train_tanh.sh ***"
# Begin configuration section.
cmd=run.pl
num_epochs=15 # Number of epochs during which we reduce
# the learning rate; number of iteration is worked out from this.
num_epochs_extra=5 # Number of epochs after we stop reducing
# the learning rate.
num_iters_final=10 # Number of final iterations to give to the
# optimization over the validation set.
initial_learning_rate=0.02 # for RM; or 0.01 is suitable for Swbd.
final_learning_rate=0.004 # for RM; or 0.001 is suitable for Swbd.
num_utts_subset=300 # number of utterances in validation and training
# subsets used for shrinkage and diagnostics
num_valid_frames_shrink=0 # number of validation frames in the subset
# used for shrinking
num_train_frames_shrink=2000 # number of training frames in the subset used
# for shrinking (by default we use all training
# frames for this.)
shrink_interval=3 # shrink every $shrink_interval iters,
# except at the start of training when we do it every iter.
within_class_factor=1.0 # affects LDA via scaling of the output (e.g. try setting to 0.01).
num_valid_frames_combine=0 # #valid frames for combination weights at the very end.
num_train_frames_combine=10000 # # train frames for the above.
num_frames_diagnostic=4000 # number of frames for "compute_prob" jobs
minibatch_size=128 # by default use a smallish minibatch size for neural net training; this controls instability
# which would otherwise be a problem with multi-threaded update. Note:
# it also interacts with the "preconditioned" update, so it's not completely cost free.
samples_per_iter=400000 # each iteration of training, see this many samples
# per job. This is just a guideline; it will pick a number
# that divides the number of samples in the entire data.
shuffle_buffer_size=5000 # This "buffer_size" variable controls randomization of the samples
# on each iter. You could set it to 0 or to a large value for complete
# randomization, but this would both consume memory and cause spikes in
# disk I/O. Smaller is easier on disk and memory but less random. It's
# not a huge deal though, as samples are anyway randomized right at the start.
num_jobs_nnet=16 # Number of neural net jobs to run in parallel
feat_type=
initial_dropout_scale=
final_dropout_scale=
add_layers_period=2 # by default, add new layers every 2 iterations.
num_hidden_layers=2
initial_num_hidden_layers=1 # we'll add the rest one by one.
num_parameters=2000000 # 2 million parameters by default.
stage=-9
realign_iters=""
beam=10 # for realignment.
retry_beam=40
scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1"
parallel_opts="-pe smp 16" # by default we use 16 threads; this lets the queue know.
io_opts="-tc 5" # for jobs with a lot of I/O, limits the number running at one time.
nnet_config_opts=
splice_width=4 # meaning +- 4 frames on each side for second LDA
lda_dim=250
randprune=4.0 # speeds up LDA.
# If alpha is not set to the empty string, will do the preconditioned update.
alpha=4.0
shrink=true
mix_up=0 # Number of components to mix up to (should be > #tree leaves, if
# specified.)
num_threads=16 # Number of threads to run in parallel; you need to
# keep this in sync with parallel_opts.
random_copy=false
cleanup=true
# End configuration section.
echo "$0 $@" # Print the command line for logging
if [ -f path.sh ]; then . ./path.sh; fi
. parse_options.sh || exit 1;
if [ $# != 4 ]; then
echo "Usage: steps/train_nnet_cpu.sh [opts] <data> <lang> <ali-dir> <exp-dir>"
echo " e.g.: steps/train_nnet_cpu.sh data/train data/lang exp/tri3_ali exp/ tri4_nnet"
echo ""
echo "Main options (for others, see top of script file)"
echo " --config <config-file> # config file containing options"
echo " --cmd (utils/run.pl|utils/queue.pl <queue opts>) # how to run jobs."
echo " --num-epochs <#epochs|15> # Number of epochs of main training"
echo " # while reducing learning rate (determines #iterations, together"
echo " # with --samples-per-iter and --num-jobs-nnet)"
echo " --num-epochs-extra <#epochs-extra|5> # Number of extra epochs of training"
echo " # after learning rate fully reduced"
echo " --initial-learning-rate <initial-learning-rate|0.02> # Learning rate at start of training, e.g. 0.02 for small"
echo " # data, 0.01 for large data"
echo " --final-learning-rate <final-learning-rate|0.004> # Learning rate at end of training, e.g. 0.004 for small"
echo " # data, 0.001 for large data"
echo " --num-parameters <num-parameters|2000000> # #parameters. E.g. for 3 hours of data, try 750K parameters;"
echo " # for 100 hours of data, try 10M"
echo " --num-hidden-layers <#hidden-layers|2> # Number of hidden layers, e.g. 2 for 3 hours of data, 4 for 100hrs"
echo " --initial-num-hidden-layers <#hidden-layers|1> # Number of hidden layers to start with."
echo " --add-layers-period <#iters|2> # Number of iterations between adding hidden layers"
echo " --mix-up <#pseudo-gaussians|0> # Can be used to have multiple targets in final output layer,"
echo " # per context-dependent state. Try a number several times #states."
echo " --num-jobs-nnet <num-jobs|8> # Number of parallel jobs to use for main neural net"
echo " # training (will affect results as well as speed; try 8, 16)"
echo " # Note: if you increase this, you may want to also increase"
echo " # the learning rate."
echo " --num-threads <num-threads|16> # Number of parallel threads per job (will affect results"
echo " # as well as speed; may interact with batch size; if you increase"
echo " # this, you may want to decrease the batch size."
echo " --parallel-opts <opts|\"-pe smp 16\"> # extra options to pass to e.g. queue.pl for processes that"
echo " # use multiple threads."
echo " --io-opts <opts|\"-tc 10\"> # Options given to e.g. queue.pl for jobs that do a lot of I/O."
echo " --minibatch-size <minibatch-size|128> # Size of minibatch to process (note: product with --num-threads"
echo " # should not get too large, e.g. >2k)."
echo " --samples-per-iter <#samples|400000> # Number of samples of data to process per iteration, per"
echo " # process."
echo " --splice-width <width|4> # Number of frames on each side to append for feature input"
echo " # (note: we splice processed, typically 40-dimensional frames"
echo " --lda-dim <dim|250> # Dimension to reduce spliced features to with LDA"
echo " --num-iters-final <#iters|10> # Number of final iterations to give to nnet-combine-fast to "
echo " # interpolate parameters (the weights are learned with a validation set)"
echo " --num-utts-subset <#utts|300> # Number of utterances in subsets used for validation and diagnostics"
echo " # (the validation subset is held out from training)"
echo " --num-valid-frames-shrink <#frames|2000> # Number of frames from the validation set used for shrinking"
echo " --num-train-frames-shrink <#frames|0> # Number of frames from the training set used for shrinking"
echo " # (by default it's included in training, which for some reason helps)."
echo " --num-frames-diagnostic <#frames|4000> # Number of frames used in computing (train,valid) diagnostics"
echo " --num-valid-frames-combine <#frames|10000> # Number of frames used in getting combination weights at the"
echo " # very end."
echo " --stage <stage|-9> # Used to run a partially-completed training process from somewhere in"
echo " # the middle."
exit 1;
fi
data=$1
lang=$2
alidir=$3
dir=$4
# Check some files.
for f in $data/feats.scp $lang/L.fst $alidir/ali.1.gz $alidir/final.mdl $alidir/tree; do
[ ! -f $f ] && echo "$0: no such file $f" && exit 1;
done
# Set some variables.
oov=`cat $lang/oov.int`
num_leaves=`gmm-info $alidir/final.mdl 2>/dev/null | awk '/number of pdfs/{print $NF}'` || exit 1;
silphonelist=`cat $lang/phones/silence.csl` || exit 1;
nj=`cat $alidir/num_jobs` || exit 1; # number of jobs in alignment dir...
# in this dir we'll have just one job.
sdata=$data/split$nj
utils/split_data.sh $data $nj
mkdir -p $dir/log
echo $nj > $dir/num_jobs
splice_opts=`cat $alidir/splice_opts 2>/dev/null`
cp $alidir/splice_opts $dir 2>/dev/null
cp $alidir/final.mat $dir 2>/dev/null # any LDA matrix...
cp $alidir/tree $dir
# Get list of validation utterances.
awk '{print $1}' $data/utt2spk | utils/shuffle_list.pl | head -$num_utts_subset \
> $dir/valid_uttlist || exit 1;
awk '{print $1}' $data/utt2spk | utils/filter_scp.pl --exclude $dir/valid_uttlist | \
head -$num_utts_subset > $dir/train_subset_uttlist || exit 1;
## Set up features. Note: these are different from the normal features
## because we have one rspecifier that has the features for the entire
## training set, not separate ones for each batch.
if [ -z $feat_type ]; then
if [ -f $alidir/final.mat ]; then feat_type=lda; else feat_type=delta; fi
fi
echo "$0: feature type is $feat_type"
case $feat_type in
delta) feats="ark,s,cs:utils/filter_scp.pl --exclude $dir/valid_uttlist $sdata/JOB/feats.scp | apply-cmvn --norm-vars=false --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:- ark:- | add-deltas ark:- ark:- |"
valid_feats="ark,s,cs:utils/filter_scp.pl $dir/valid_uttlist $data/feats.scp | apply-cmvn --norm-vars=false --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- | add-deltas ark:- ark:- |"
train_subset_feats="ark,s,cs:utils/filter_scp.pl $dir/train_subset_uttlist $data/feats.scp | apply-cmvn --norm-vars=false --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- | add-deltas ark:- ark:- |"
;;
raw) feats="ark,s,cs:utils/filter_scp.pl --exclude $dir/valid_uttlist $sdata/JOB/feats.scp | apply-cmvn --norm-vars=false --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:- ark:- |"
valid_feats="ark,s,cs:utils/filter_scp.pl $dir/valid_uttlist $data/feats.scp | apply-cmvn --norm-vars=false --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- |"
train_subset_feats="ark,s,cs:utils/filter_scp.pl $dir/train_subset_uttlist $data/feats.scp | apply-cmvn --norm-vars=false --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- |"
;;
lda) feats="ark,s,cs:utils/filter_scp.pl --exclude $dir/valid_uttlist $sdata/JOB/feats.scp | apply-cmvn --norm-vars=false --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |"
valid_feats="ark,s,cs:utils/filter_scp.pl $dir/valid_uttlist $data/feats.scp | apply-cmvn --norm-vars=false --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |"
train_subset_feats="ark,s,cs:utils/filter_scp.pl $dir/train_subset_uttlist $data/feats.scp | apply-cmvn --norm-vars=false --utt2spk=ark:$data/utt2spk scp:$data/cmvn.scp scp:- ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $dir/final.mat ark:- ark:- |"
cp $alidir/final.mat $dir
;;
*) echo "$0: invalid feature type $feat_type" && exit 1;
esac
if [ -f $alidir/trans.1 ] && [ $feat_type != "raw" ]; then
echo "$0: using transforms from $alidir"
feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$alidir/trans.JOB ark:- ark:- |"
valid_feats="$valid_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $alidir/trans.*|' ark:- ark:- |"
train_subset_feats="$train_subset_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $alidir/trans.*|' ark:- ark:- |"
fi
if [ -f $alidir/raw_trans.1 ] && [ $feat_type == "raw" ]; then
echo "$0: using raw-fMLLR transforms from $alidir"
feats="$feats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$alidir/raw_trans.JOB ark:- ark:- |"
valid_feats="$valid_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $alidir/raw_trans.*|' ark:- ark:- |"
train_subset_feats="$train_subset_feats transform-feats --utt2spk=ark:$data/utt2spk 'ark:cat $alidir/raw_trans.*|' ark:- ark:- |"
fi
if [ $stage -le -9 ]; then
echo "$0: working out number of frames of training data"
num_frames=`feat-to-len scp:$data/feats.scp ark,t:- | awk '{x += $2;} END{print x;}'` || exit 1;
echo $num_frames > $dir/num_frames
else
num_frames=`cat $dir/num_frames` || exit 1;
fi
# Working out number of iterations per epoch.
iters_per_epoch=`perl -e "print int($num_frames/($samples_per_iter * $num_jobs_nnet) + 0.5);"` || exit 1;
[ $iters_per_epoch -eq 0 ] && iters_per_epoch=1
samples_per_iter_real=$[$num_frames/($num_jobs_nnet*$iters_per_epoch)]
echo "Every epoch, splitting the data up into $iters_per_epoch iterations,"
echo "giving samples-per-iteration of $samples_per_iter_real (you requested $samples_per_iter)."
## Do LDA on top of whatever features we already have; store the matrix which
## we'll put into the neural network as a constant.
if [ $stage -le -8 ]; then
echo "$0: Accumulating LDA statistics."
$cmd JOB=1:$nj $dir/log/lda_acc.JOB.log \
ali-to-post "ark:gunzip -c $alidir/ali.JOB.gz|" ark:- \| \
weight-silence-post 0.0 $silphonelist $alidir/final.mdl ark:- ark:- \| \
acc-lda --rand-prune=$randprune $alidir/final.mdl "$feats splice-feats --left-context=$splice_width --right-context=$splice_width ark:- ark:- |" ark,s,cs:- \
$dir/lda.JOB.acc || exit 1;
est-lda --within-class-factor=$within_class_factor --dim=$lda_dim $dir/lda.mat $dir/lda.*.acc \
2>$dir/log/lda_est.log || exit 1;
rm $dir/lda.*.acc
fi
##
if [ $initial_num_hidden_layers -gt $num_hidden_layers ]; then
echo "Initial num-hidden-layers $initial_num_hidden_layers is greater than final number $num_hidden_layers";
exit 1;
fi
feat_dim=`feat-to-dim "$train_subset_feats" -` || exit 1;
if [ $stage -le -7 ]; then
echo "$0: initializing neural net";
# to hidden.config it will write the part of the config corresponding to a
# single hidden layer; we need this to add new layers.
if [ ! -z "$alpha" ]; then
dropout_opt=
[ ! -z $initial_dropout_scale ] && dropout_opt="--dropout-scale $initial_dropout_scale"
utils/nnet-cpu/make_nnet_config_preconditioned.pl --alpha $alpha $nnet_config_opts \
$dropout_opt \
--learning-rate $initial_learning_rate \
--lda-mat $splice_width $lda_dim $dir/lda.mat \
--initial-num-hidden-layers $initial_num_hidden_layers $dir/hidden_layer.config \
$feat_dim $num_leaves $num_hidden_layers $num_parameters \
> $dir/nnet.config || exit 1;
else
[ ! -z $initial_dropout_scale ] && echo "Dropout without preconditioning unsupported" && exit 1;
utils/nnet-cpu/make_nnet_config.pl $nnet_config_opts \
--learning-rate $initial_learning_rate \
--lda-mat $splice_width $lda_dim $dir/lda.mat \
--initial-num-hidden-layers $initial_num_hidden_layers $dir/hidden_layer.config \
$feat_dim $num_leaves $num_hidden_layers $num_parameters \
> $dir/nnet.config || exit 1;
fi
$cmd $dir/log/nnet_init.log \
nnet-am-init $alidir/tree $lang/topo "nnet-init $dir/nnet.config -|" \
$dir/0.mdl || exit 1;
fi
if [ $stage -le -6 ]; then
echo "Training transition probabilities and setting priors"
$cmd $dir/log/train_trans.log \
nnet-train-transitions $dir/0.mdl "ark:gunzip -c $alidir/ali.*.gz|" $dir/0.mdl \
|| exit 1;
fi
if [ $stage -le -5 ]; then
echo "Compiling graphs of transcripts"
$cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \
compile-train-graphs $dir/tree $dir/0.mdl $lang/L.fst \
"ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt < $data/split$nj/JOB/text |" \
"ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1;
fi
cp $alidir/ali.*.gz $dir
nnet_context_opts="--left-context=`nnet-am-info $dir/0.mdl 2>/dev/null | grep -w left-context | awk '{print $2}'` --right-context=`nnet-am-info $dir/0.mdl 2>/dev/null | grep -w right-context | awk '{print $2}'`" || exit 1;
if [ $stage -le -4 ]; then
echo "Getting validation and training subset examples."
rm $dir/.error 2>/dev/null
$cmd $dir/log/create_valid_subset.log \
nnet-get-egs $nnet_context_opts "$valid_feats" \
"ark,cs:gunzip -c $dir/ali.*.gz | ali-to-pdf $dir/0.mdl ark:- ark:- | ali-to-post ark:- ark:- |" \
"ark:$dir/valid_all.egs" || touch $dir/.error &
$cmd $dir/log/create_train_subset.log \
nnet-get-egs $nnet_context_opts "$train_subset_feats" \
"ark,cs:gunzip -c $dir/ali.*.gz | ali-to-pdf $dir/0.mdl ark:- ark:- | ali-to-post ark:- ark:- |" \
"ark:$dir/train_subset_all.egs" || touch $dir/.error &
wait;
[ -f $dir/.error ] && exit 1;
echo "Getting subsets of validation examples for shrinking, diagnostics and combination."
$cmd $dir/log/create_valid_subset_shrink.log \
nnet-subset-egs --n=$num_valid_frames_shrink ark:$dir/valid_all.egs \
ark:$dir/valid_shrink.egs || touch $dir/.error &
$cmd $dir/log/create_valid_subset_combine.log \
nnet-subset-egs --n=$num_valid_frames_combine ark:$dir/valid_all.egs \
ark:$dir/valid_combine.egs || touch $dir/.error &
$cmd $dir/log/create_valid_subset_diagnostic.log \
nnet-subset-egs --n=$num_frames_diagnostic ark:$dir/valid_all.egs \
ark:$dir/valid_diagnostic.egs || touch $dir/.error &
$cmd $dir/log/create_train_subset_shrink.log \
nnet-subset-egs --n=$num_train_frames_shrink ark:$dir/train_subset_all.egs \
ark:$dir/train_shrink.egs || touch $dir/.error &
$cmd $dir/log/create_train_subset_combine.log \
nnet-subset-egs --n=$num_train_frames_combine ark:$dir/train_subset_all.egs \
ark:$dir/train_combine.egs || touch $dir/.error &
$cmd $dir/log/create_train_subset_diagnostic.log \
nnet-subset-egs --n=$num_frames_diagnostic ark:$dir/train_subset_all.egs \
ark:$dir/train_diagnostic.egs || touch $dir/.error &
wait
cat $dir/valid_shrink.egs $dir/train_shrink.egs > $dir/shrink.egs
cat $dir/valid_combine.egs $dir/train_combine.egs > $dir/combine.egs
for f in $dir/{shrink,combine,train_diagnostic,valid_diagnostic}.egs; do
[ ! -s $f ] && echo "No examples in file $f" && exit 1;
done
rm $dir/valid_all.egs $dir/train_subset_all.egs $dir/{train,valid}_{shrink,combine}.egs
fi
if [ $stage -le -3 ]; then
mkdir -p $dir/egs
mkdir -p $dir/temp
echo "Creating training examples";
# in $dir/egs, create $num_jobs_nnet separate files with training examples.
# The order is not randomized at this point.
egs_list=
for n in `seq 1 $num_jobs_nnet`; do
egs_list="$egs_list ark:$dir/egs/egs_orig.$n.JOB.ark"
done
echo "Generating training examples on disk"
# The examples will go round-robin to egs_list.
$cmd $io_opts JOB=1:$nj $dir/log/get_egs.JOB.log \
nnet-get-egs $nnet_context_opts "$feats" \
"ark,cs:gunzip -c $dir/ali.JOB.gz | ali-to-pdf $alidir/final.mdl ark:- ark:- | ali-to-post ark:- ark:- |" ark:- \| \
nnet-copy-egs ark:- $egs_list || exit 1;
fi
if [ $stage -le -2 ]; then
# combine all the "egs_orig.JOB.*.scp" (over the $nj splits of the data) and
# then split into multiple parts egs.JOB.*.scp for different parts of the
# data, 0 .. $iters_per_epoch-1.
if [ $iters_per_epoch -eq 1 ]; then
echo "Since iters-per-epoch == 1, just concatenating the data."
for n in `seq 1 $num_jobs_nnet`; do
cat $dir/egs/egs_orig.$n.*.ark > $dir/egs/egs_tmp.$n.0.ark || exit 1;
rm $dir/egs/egs_orig.$n.*.ark || exit 1;
done
else # We'll have to split it up using nnet-copy-egs.
egs_list=
for n in `seq 0 $[$iters_per_epoch-1]`; do
egs_list="$egs_list ark:$dir/egs/egs_tmp.JOB.$n.ark"
done
$cmd $io_opts JOB=1:$num_jobs_nnet $dir/log/split_egs.JOB.log \
nnet-copy-egs --random=$random_copy --srand=JOB \
"ark:cat $dir/egs/egs_orig.JOB.*.ark|" $egs_list '&&' \
rm $dir/egs/egs_orig.JOB.*.ark || exit 1;
fi
fi
if [ $stage -le -1 ]; then
# Next, shuffle the order of the examples in each of those files.
# Each one should not be too large, so we can do this in memory.
echo "Shuffling the order of training examples"
echo "(in order to avoid stressing the disk, these won't all run at once)."
for n in `seq 0 $[$iters_per_epoch-1]`; do
$cmd $io_opts JOB=1:$num_jobs_nnet $dir/log/shuffle.$n.JOB.log \
nnet-shuffle-egs "--srand=\$[JOB+($num_jobs_nnet*$n)]" \
ark:$dir/egs/egs_tmp.JOB.$n.ark ark:$dir/egs/egs.JOB.$n.ark '&&' \
rm $dir/egs/egs_tmp.JOB.$n.ark || exit 1;
done
fi
num_iters_reduce=$[$num_epochs * $iters_per_epoch];
num_iters_extra=$[$num_epochs_extra * $iters_per_epoch];
num_iters=$[$num_iters_reduce+$num_iters_extra]
echo "Will train for $num_epochs + $num_epochs_extra epochs, equalling "
echo " $num_iters_reduce + $num_iters_extra = $num_iters iterations, "
echo " (while reducing learning rate) + (with constant learning rate)."
# up till $last_normal_shrink_iter we will shrink the parameters
# in the normal way using the dev set, but after that we will
# only re-compute the shrinkage parameters periodically.
last_normal_shrink_iter=$[($num_hidden_layers-$initial_num_hidden_layers+1)*$add_layers_period + 2]
mix_up_iter=$last_normal_shrink_iter # this is pretty arbitrary.
x=0
while [ $x -lt $num_iters ]; do
if [ $x -ge 0 ] && [ $stage -le $x ]; then
mdl=$dir/$x.mdl
[ ! -z $initial_dropout_scale ] && mdl="nnet-am-copy --remove-dropout=true $mdl -|"
# Set off jobs doing some diagnostics, in the background.
$cmd $dir/log/compute_prob_valid.$x.log \
nnet-compute-prob "$mdl" ark:$dir/valid_diagnostic.egs &
$cmd $dir/log/compute_prob_train.$x.log \
nnet-compute-prob "$mdl" ark:$dir/train_diagnostic.egs &
if echo $realign_iters | grep -w $x >/dev/null; then
echo "Realigning data (pass $x)"
$cmd JOB=1:$nj $dir/log/align.$x.JOB.log \
nnet-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam "$mdl" \
"ark:gunzip -c $dir/fsts.JOB.gz|" "$feats" \
"ark:|gzip -c >$dir/ali.JOB.gz" || exit 1;
fi
echo "Training neural net (pass $x)"
if [ $x -gt 0 ] && \
[ $x -le $[($num_hidden_layers-$initial_num_hidden_layers)*$add_layers_period] ] && \
[ $[($x-1) % $add_layers_period] -eq 0 ]; then
mdl="nnet-init --srand=$x $dir/hidden_layer.config - | nnet-insert $dir/$x.mdl - - |"
else
mdl=$dir/$x.mdl
fi
$cmd $parallel_opts JOB=1:$num_jobs_nnet $dir/log/train.$x.JOB.log \
nnet-shuffle-egs --buffer-size=$shuffle_buffer_size --srand=$x \
ark:$dir/egs/egs.JOB.$[$x%$iters_per_epoch].ark ark:- \| \
nnet-train-parallel --num-threads=$num_threads --minibatch-size=$minibatch_size \
--srand=$x "$mdl" ark:- $dir/$[$x+1].JOB.mdl \
|| exit 1;
nnets_list=
for n in `seq 1 $num_jobs_nnet`; do
nnets_list="$nnets_list $dir/$[$x+1].$n.mdl"
done
learning_rate=`perl -e '($x,$n,$i,$f)=@ARGV; print ($x >= $n ? $f : $i*exp($x*log($f/$i)/$n));' $[$x+1] $num_iters_reduce $initial_learning_rate $final_learning_rate`;
if [ ! -z "$final_dropout_scale" ]; then
dropout_scale=`perl -e "print ($initial_dropout_scale + ($final_dropout_scale-$initial_dropout_scale)*(1+$x)/$num_iters);"`
dropout_opt="--dropout-scale=$dropout_scale"
else
dropout_opt=
fi
$cmd $dir/log/average.$x.log \
nnet-am-average $nnets_list - \| \
nnet-am-copy $dropout_opt --learning-rate=$learning_rate - $dir/$[$x+1].mdl || exit 1;
if $shrink; then
if [ $x -le $last_normal_shrink_iter ] || [ $[$x % $shrink_interval] -eq 0 ]; then
# For earlier iterations (while we've recently beeen adding layers), or every
# $shrink_interval=3 iters , just do shrinking normally.
mb=$[($num_valid_frames_shrink+$num_train_frames_shrink+$num_threads-1)/$num_threads]
$cmd $parallel_opts $dir/log/shrink.$x.log \
nnet-combine-fast --num-threads=$num_threads --verbose=3 --minibatch-size=$mb \
$dir/$[$x+1].mdl ark:$dir/shrink.egs $dir/$[$x+1].mdl || exit 1;
fi
fi
if [ "$mix_up" -gt 0 ] && [ $x -eq $mix_up_iter ]; then
# mix up.
echo Mixing up from $num_leaves to $mix_up components
$cmd $dir/log/mix_up.$x.log \
nnet-am-mixup --min-count=10 --num-mixtures=$mix_up \
$dir/$[$x+1].mdl $dir/$[$x+1].mdl || exit 1;
fi
rm $nnets_list
fi
x=$[$x+1]
done
rm $dir/final.mdl 2>/dev/null
# At the end, final.mdl will be a combination of the last e.g. 10 models.
nnets_list=()
start=$[$num_iters-$num_iters_final+1]
for x in `seq $start $num_iters`; do
idx=$[$x-$start]
if [ $x -gt $mix_up_iter ]; then
if [ ! -z $initial_dropout_scale ]; then
nnets_list[$idx]="nnet-am-copy --remove-dropout=true $dir/$x.mdl - |"
else
nnets_list[$idx]=$dir/$x.mdl
fi
fi
done
if [ $stage -le $num_iters ]; then
mb=$[($num_valid_frames_combine+$num_train_frames_combine+$num_threads-1)/$num_threads]
$cmd $parallel_opts $dir/log/combine.log \
nnet-combine-fast --num-threads=$num_threads --verbose=3 --minibatch-size=$mb \
"${nnets_list[@]}" ark:$dir/combine.egs $dir/final.mdl || exit 1;
fi
# Compute the probability of the final, combined model with
# the same subset we used for the previous compute_probs, as the
# different subsets will lead to different probs.
$cmd $dir/log/compute_prob_valid.final.log \
nnet-compute-prob $dir/final.mdl ark:$dir/valid_diagnostic.egs &
$cmd $dir/log/compute_prob_train.final.log \
nnet-compute-prob $dir/final.mdl ark:$dir/train_diagnostic.egs &
echo Done
if $cleanup; then
echo Cleaning up data
echo Removing training examples
rm -r $dir/egs
echo Removing most of the models
for x in `seq 0 $num_iters`; do
if [ $[$x%10] -ne 0 ] && [ $x -lt $[$num_iters-$num_iters_final+1] ]; then
# delete all but every 10th model; don't delete the ones which combine to form the final model.
rm $dir/$x.mdl
fi
done
fi
|
chagge/Kaldi-timit
|
s5/steps/train_nnet_cpu.sh
|
Shell
|
apache-2.0
| 27,786 |
#!/bin/bash
# Copyright (c) 2016 Pani Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Suppress output
exec > /dev/null
# Set environment variables
export OS_TOKEN="{{ openstack_service_token }}"
export OS_URL="http://{{ romana_master_ip }}:35357/v3"
export OS_IDENTITY_API_VERSION=3
# Neutron user
if ! openstack user show neutron 2>/dev/null; then
openstack user create --domain default --password "{{ stack_password }}" neutron
fi
if ! [[ $(openstack role assignment list --project service --user neutron --role admin -f value -c Role) ]]; then
openstack role add --project service --user neutron admin
fi
# Neutron service
if ! openstack service show neutron 2>/dev/null; then
openstack service create --name neutron --description "OpenStack Networking" network
fi
service_id=$(openstack service show -f value -c id neutron)
# Neutron endpoints
if ! [[ $(openstack endpoint list --service "$service_id" --interface public -f value -c ID 2>/dev/null) ]]; then
openstack endpoint create --region RegionOne network public "http://{{ romana_master_ip }}:9696"
fi
if ! [[ $(openstack endpoint list --service "$service_id" --interface internal -f value -c ID 2>/dev/null) ]]; then
openstack endpoint create --region RegionOne network internal "http://{{ romana_master_ip }}:9696"
fi
if ! [[ $(openstack endpoint list --service "$service_id" --interface admin -f value -c ID 2>/dev/null) ]]; then
openstack endpoint create --region RegionOne network admin "http://{{ romana_master_ip }}:9696"
fi
|
mortensteenrasmussen/romana
|
romana-install/roles/stack/openstack/install-controller/templates/neutron-post-install.sh
|
Shell
|
apache-2.0
| 2,031 |
#!/bin/sh
bin/test_main conf/test_stack.cf
|
hankwing/Squirrel
|
library/acl/unit_test/stack.sh
|
Shell
|
apache-2.0
| 43 |
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
set -u
set -o pipefail
python3 -m pip install -f https://release.oneflow.info oneflow==0.6.0+cpu
|
dmlc/tvm
|
docker/install/ubuntu_install_oneflow.sh
|
Shell
|
apache-2.0
| 903 |
#!/bin/bash
set -u
set -e
srcdir="${SRCDIR:-/tmp}"
jobs_=`nproc`
cxxflags="\
-stdlib=libc++ -std=c++14 \
-I/usr/local/include/c++/v1 \
-Wno-sign-compare \
-Wno-reserved-user-defined-literal"
export LD_LIBRARY_PATH=/usr/local/lib
cd $srcdir
if [ ! -d "wangle" ]; then
git clone https://github.com/rcgoodfellow/wangle.git
fi
cd wangle/wangle
mkdir -p build
cd build
cmake \
-DCMAKE_CXX_COMPILER=clang++ \
-DCMAKE_C_COMPILER=clang \
-DCMAKE_CXX_FLAGS="$cxxflags" \
.. \
-G Ninja
ninja
sudo ninja install
|
marinatb/marinatb
|
deps/wangle.sh
|
Shell
|
bsd-2-clause
| 529 |
#!/bin/bash
cd lib
ln -s libembree.so.* libembree.so
cd ..
cp -rv * "${PREFIX}"
|
astrohckr/pyembree
|
recipes/embree/build.sh
|
Shell
|
bsd-2-clause
| 80 |
VPN=$(nmcli conn show --active | grep vpn | cut -d' ' -f1)
if [ $VPN != "" ]; then
echo " $VPN"
fi
|
Jhebes/Configs
|
i3/scripts/vpn.sh
|
Shell
|
bsd-3-clause
| 107 |
#
# Get the absolute location of this script so we can correctly symlink the files
# and other dependencies can also force bigpipe in to submission by manually calling
# this supplied symlink file.
#
ROOT="$( cd "$( dirname "$0" )" && pwd )"
#
# We assume that all the sub projects are in the previous folder.
#
rm -rf $ROOT/node_modules/pagelet
ln -s ../../pagelet $ROOT/node_modules
rm -rf $ROOT/node_modules/bootstrap-pagelet
ln -s ../../bootstrap-pagelet $ROOT/node_modules
rm -rf $ROOT/node_modules/404-pagelet
ln -s ../../404-pagelet $ROOT/node_modules
rm -rf $ROOT/node_modules/500-pagelet
ln -s ../../500-pagelet $ROOT/node_modules
rm -rf $ROOT/node_modules/bigpipe.js
ln -s ../../bigpipe.js $ROOT/node_modules
rm -rf $ROOT/node_modules/temper
ln -s ../../temper $ROOT/node_modules
|
PatidarWeb/bigpipe
|
locallink.sh
|
Shell
|
mit
| 796 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "Pods/Masonry.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "Pods/Masonry.framework"
fi
|
pekkapulli/2020lifecount-ios
|
Pods/Target Support Files/Pods/Pods-frameworks.sh
|
Shell
|
mit
| 3,516 |
#!/bin/sh
NODE=/opt/node/bin/node
JSDOC=/opt/node/node_modules/jsdoc/jsdoc.js
SRC_ROOT=/depot/dsjs
#gen jsdoc
$NODE $JSDOC $SRC_ROOT/lib -c $SRC_ROOT/build/jsdocs-conf.json $SRC_ROOT/README.md
cd $SRC_ROOT
tar --exclude=.idea --exclude=dsjs.iml --exclude=.git -cvzf dsjslib.tar.gz .
|
monmohan/dsjslib
|
build/build.sh
|
Shell
|
mit
| 284 |
#!/bin/bash
DIR="languages"
DOMAIN="wpclef"
POT="$DIR/$DOMAIN.pot"
SOURCES="*.php"
# Create template
echo "Creating POT"
rm -f $POT
xgettext --copyright-holder="Clef, Inc." \
--package-name="WPClef" \
--package-version="2.3.4" \
--msgid-bugs-address="[email protected]" \
--language=PHP \
--sort-output \
--keyword=__ \
--keyword=_e \
--from-code=UTF-8 \
--output=$POT \
--default-domain=$DOMAIN \
`find . -type f -name "*.php" | grep -v ./node_modules | grep -v ./build`
# Update language .po files
for FILE in languages/*.po
do
LANG=${FILE#languages\/clef\-}
LANG=${LANG%\.po}
echo "Updating language file for $LANG from $POT"
msgmerge --sort-output --update --backup=off $FILE $POT
done
# Sync with Transifex
tx push -s -t
tx pull -af
# Compile language .po files to .mo
for FILE in languages/*.po
do
LANG=${FILE#languages\/clef\-}
LANG=${LANG%\.po}
echo "Compiling $LANG.po to $LANG.mo"
msgfmt --check --verbose --output-file=languages/clef-$LANG.mo $FILE
done
|
nickbass/newcecs300.dev
|
wp-content/plugins/wpclef/update_translations.sh
|
Shell
|
gpl-2.0
| 1,054 |
#!/bin/sh
export PATH="/bin:/sbin:/usr/sbin:/usr/bin"
gateway=`route -n get default | grep gateway | awk '{print $2}'`
{{range $i, $ip := .Ips}}route delete {{$ip.Ip}}/{{$ip.Cidr}} "${gateway}"
{{end}}
|
sabersalv/freedom-routes
|
routes/templates/mac/routes-down.sh
|
Shell
|
gpl-3.0
| 204 |
#!/bin/sh
echo "******************************************************************"
echo " HELIOS base"
echo " Created by Hidehisa Akiyama and Hiroki Shimora"
echo " Copyright 2000-2007. Hidehisa Akiyama"
echo " Copyright 2007-2012. Hidehisa Akiyama and Hiroki Shimora"
echo " All rights reserved."
echo "******************************************************************"
LIBPATH=/usr/local/lib
if [ x"$LIBPATH" != x ]; then
if [ x"$LD_LIBRARY_PATH" = x ]; then
LD_LIBRARY_PATH=$LIBPATH
else
LD_LIBRARY_PATH=$LIBPATH:$LD_LIBRARY_PATH
fi
export LD_LIBRARY_PATH
fi
DIR=`dirname $0`
player="${DIR}/sample_player"
coach="${DIR}/sample_coach"
teamname="HELIOS_base"
host="localhost"
port=6000
coach_port=""
debug_server_host=""
debug_server_port=""
player_conf="${DIR}/player.conf"
config_dir="${DIR}/formations-dt"
coach_conf="${DIR}/coach.conf"
team_graphic="--use_team_graphic off"
number=3
usecoach="true"
unum=0
sleepprog=sleep
goaliesleep=1
sleeptime=0
debugopt=""
coachdebug=""
offline_logging=""
offline_mode=""
fullstateopt=""
usage()
{
(echo "Usage: $0 [options]"
echo "Available options:"
echo " --help prints this"
echo " -h, --host HOST specifies server host (default: localhost)"
echo " -p, --port PORT specifies server port (default: 6000)"
echo " -P --coach-port PORT specifies server port for online coach (default: 6002)"
echo " -t, --teamname TEAMNAME specifies team name"
echo " -n, --number NUMBER specifies the number of players"
echo " -u, --unum UNUM specifies the uniform number of players"
echo " -C, --without-coach specifies not to run the coach"
echo " -f, --formation DIR specifies the formation directory"
echo " --team-graphic FILE specifies the team graphic xpm file"
echo " --offline-logging writes offline client log (default: off)"
echo " --offline-client-mode starts as an offline client (default: off)"
echo " --debug writes debug log (default: off)"
echo " --debug-server-connect connects to the debug server (default: off)"
echo " --debug-server-host HOST specifies debug server host (default: localhost)"
echo " --debug-server-port PORT specifies debug server port (default: 6032)"
echo " --debug-server-logging writes debug server log (default: off)"
echo " --log-dir DIRECTORY specifies debug log directory (default: /tmp)"
echo " --debug-log-ext EXTENSION specifies debug log file extension (default: .log)"
echo " --fullstate FULLSTATE_TYPE specifies fullstate model handling"
echo " FULLSTATE_TYPE is one of [ignore|reference|override].") 1>&2
}
while [ $# -gt 0 ]
do
case $1 in
--help)
usage
exit 0
;;
-h|--host)
if [ $# -lt 2 ]; then
usage
exit 1
fi
host="${2}"
shift 1
;;
-p|--port)
if [ $# -lt 2 ]; then
usage
exit 1
fi
port="${2}"
shift 1
;;
-P|--coach-port)
if [ $# -lt 2 ]; then
usage
exit 1
fi
coach_port="${2}"
shift 1
;;
-t|--teamname)
if [ $# -lt 2 ]; then
usage
exit 1
fi
teamname="${2}"
shift 1
;;
-n|--number)
if [ $# -lt 2 ]; then
usage
exit 1
fi
number="${2}"
shift 1
;;
-u|--unum)
if [ $# -lt 2 ]; then
usage
exit 1
fi
unum="${2}"
shift 1
;;
-C|--without-coach)
usecoach="false"
;;
-f|--formation)
if [ $# -lt 2 ]; then
usage
exit 1
fi
config_dir="${2}"
shift 1
;;
--team-graphic)
if [ $# -lt 2 ]; then
usage
exit 1
fi
team_graphic="--use_team_graphic on --team_graphic_file ${2}"
shift 1
;;
--offline-logging)
offline_logging="--offline_logging"
;;
--offline-client-mode)
offline_mode="on"
;;
--debug)
debugopt="${debugopt} --debug"
coachdebug="${coachdebug} --debug"
;;
--debug-server-connect)
debugopt="${debugopt} --debug_server_connect"
;;
--debug-server-host)
if [ $# -lt 2 ]; then
usage
exit 1
fi
debug_server_host="${2}"
shift 1
;;
--debug-server-port)
if [ $# -lt 2 ]; then
usage
exit 1
fi
debug_server_port="${2}"
shift 1
;;
--debug-server-logging)
debugopt="${debugopt} --debug_server_logging"
;;
--log-dir)
if [ $# -lt 2 ]; then
usage
exit 1
fi
debugopt="${debugopt} --log_dir ${2}"
shift 1
;;
--debug-log-ext)
if [ $# -lt 2 ]; then
usage
exit 1
fi
debugopt="${debugopt} --debug_log_ext ${2}"
shift 1
;;
--fullstate)
if [ $# -lt 2 ]; then
usage
exit 1
fi
fullstate_type="${2}"
shift 1
case "${fullstate_type}" in
ignore)
fullstateopt="--use_fullstate false --debug_fullstate false"
;;
reference)
fullstateopt="--use_fullstate false --debug_fullstate true"
;;
override)
fullstateopt="--use_fullstate true --debug_fullstate true"
;;
*)
usage
exit 1
;;
esac
;;
*)
echo 1>&2
echo "invalid option \"${1}\"." 1>&2
echo 1>&2
usage
exit 1
;;
esac
shift 1
done
if [ X"${offline_logging}" != X'' ]; then
if [ X"${offline_mode}" != X'' ]; then
echo "'--offline-logging' and '--offline-mode' cannot be used simultaneously."
exit 1
fi
fi
if [ X"${coach_port}" = X'' ]; then
coach_port=`expr ${port} + 2`
fi
if [ X"${debug_server_host}" = X'' ]; then
debug_server_host="${host}"
fi
if [ X"${debug_server_port}" = X'' ]; then
debug_server_port=`expr ${port} + 32`
fi
opt="--player-config ${player_conf} --config_dir ${config_dir}"
opt="${opt} -h ${host} -p ${port} -t ${teamname}"
opt="${opt} ${fullstateopt}"
opt="${opt} --debug_server_host ${debug_server_host}"
opt="${opt} --debug_server_port ${debug_server_port}"
opt="${opt} ${offline_logging}"
opt="${opt} ${debugopt}"
ping -c 1 $host
if [ $number -gt 0 ]; then
offline_number=""
if [ X"${offline_mode}" != X'' ]; then
offline_number="--offline_client_number 1"
if [ $unum -eq 0 ]; then
$player ${opt} -g ${offline_number} &
$sleepprog $goaliesleep
elif [ $unum -eq 1 ]; then
$player ${opt} -g ${offline_number} &
$sleepprog $goaliesleep
fi
else
$player ${opt} -g &
$sleepprog $goaliesleep
fi
fi
i=2
while [ $i -le ${number} ] ; do
offline_number=""
if [ X"${offline_mode}" != X'' ]; then
offline_number="--offline_client_number ${i}"
if [ $unum -eq 0 ]; then
$player ${opt} ${offline_number} &
$sleepprog $sleeptime
elif [ $unum -eq $i ]; then
$player ${opt} ${offline_number} &
$sleepprog $sleeptime
fi
else
$player ${opt} &
$sleepprog $sleeptime
fi
i=`expr $i + 1`
done
if [ "${usecoach}" = "true" ]; then
coachopt="--coach-config ${coach_conf}"
coachopt="${coachopt} -h ${host} -p ${coach_port} -t ${teamname}"
coachopt="${coachopt} ${team_graphic}"
coachopt="${coachopt} --debug_server_host ${debug_server_host}"
coachopt="${coachopt} --debug_server_port ${debug_server_port}"
coachopt="${coachopt} ${offline_logging}"
coachopt="${coachopt} ${debugopt}"
if [ X"${offline_mode}" != X'' ]; then
offline_mode="--offline_client_mode"
if [ $unum -eq 0 ]; then
$coach ${coachopt} ${offline_mode} &
elif [ $unum -eq 12 ]; then
$coach ${coachopt} ${offline_mode} &
fi
else
$coach ${coachopt} &
fi
fi
|
devanshujain919/robocup-attack-watchdogs
|
src/oppstart.sh
|
Shell
|
gpl-3.0
| 7,923 |
#!/bin/bash
BASE_DIRECTORY="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
pushd $BASE_DIRECTORY > /dev/null
PACKAGEDIR=`rospack find rgbdslam`/
if [[ "$1" == "" ]]; then
echo "This script will run rgbdslam on all bagfiles in this directory."
echo "Usage: $0 <Directory for results>"
TESTNAME=`date +%Y-%m-%d_%H:%M`
echo "No directory given, using $TESTNAME."
sleep 2
else
TESTNAME=$1
fi
export ROS_MASTER_URI=http://localhost:11386
#roscore -p 11386&
#ROSCOREPID=$!
#echo Waiting for roscore
#sleep 3
for CANDIDATES in 4; do
for OBS_EVAL in 0.00; do
for RANSAC_ITER in 100; do
for OPT_SKIP in 10; do #online/offline
for FEAT_TYPE in ORB; do
echo Evaluating $FEAT_TYPE
echo "Will evaluate RGBD-SLAM on the following bagfiles:"
SELECTION=`ls *plant.bag`
echo $SELECTION
for MAXFEATURES in 600; do
#PARAM_DIRECTORY="$BASE_DIRECTORY/$1/emm__$OBS_EVAL/CANDIDATES_$CANDIDATES/RANSAC_$RANSAC_ITER/SOLVER_$DISTANCEMSR/NN_$NN_RATIO/OPT_SKIP_$OPT_SKIP/${FEAT_TYPE}/${MAXFEATURES}_Features/"
PARAM_DIRECTORY="$BASE_DIRECTORY/$TESTNAME/emm__$OBS_EVAL/CANDIDATES_$CANDIDATES/RANSAC_$RANSAC_ITER/OPT_SKIP_$OPT_SKIP/${FEAT_TYPE}/${MAXFEATURES}_Features/"
for bagfile in $SELECTION; do
BASE_NAME=`basename $bagfile .bag`
DIRECTORY="$PARAM_DIRECTORY/$BASE_NAME"
LAUNCHFILE=`rospack find rgbdslam`/test/test_settings.launch
mkdir -p $DIRECTORY
if zgrep -q Coordinate $DIRECTORY/*estimate.txt.gz 2> /dev/null; then
echo There are already results for $BASE_NAME in $DIRECTORY. Will skip this bagfile >&2
continue #don't overwrite existing results
fi
if grep -q Coordinate $DIRECTORY/*estimate.txt* 2> /dev/null; then
echo There are already results for $BASE_NAME in $DIRECTORY. Will skip this bagfile >&2
continue #don't overwrite existing results
fi
#Remove old summary results if a new individual one is computed (will be recomputed further below)
rm $PARAM_DIRECTORY/ate_evaluation_*.csv 2> /dev/null
echo `date +%H:%M:%S` Results for $BASE_NAME are stored in `readlink -f $DIRECTORY`
roslaunch rgbdslam `basename $LAUNCHFILE` bagfile_name:=`readlink -f $bagfile` match_candidates:=$CANDIDATES \
sampled_candidates:=$CANDIDATES feature_type:=$FEAT_TYPE \
max_keypoints:=$MAXFEATURES ransac_iterations:=$RANSAC_ITER \
optimizer_skip_step:=$OPT_SKIP observability_threshold:=$OBS_EVAL \
gui:=false > $DIRECTORY/logfile 2>&1
#rosparam get /rgbdslam/config >> $DIRECTORY/logfile 2>&1
echo `date +%H:%M:%S` Finished processing $BASE_NAME
#Move Result files, run evaluation routine
mv ${bagfile}?* $DIRECTORY/
cp ${BASE_NAME}-groundtruth.txt $DIRECTORY/
cp $LAUNCHFILE $DIRECTORY/settings.xml #renamed to avoid name conflict with original file in roslaunch command
pushd $DIRECTORY > /dev/null
test -e logfile.gz && mv logfile.gz logfile-failed.gz # retain one previous run
gzip logfile
popd > /dev/null
done
$PACKAGEDIR/rgbd_benchmark/summarize_evaluation.sh $PARAM_DIRECTORY
done
done
done
done
done
done
popd > /dev/null
|
mylxiaoyi/rgbdslam_v2
|
test/run_tests.sh
|
Shell
|
gpl-3.0
| 3,693 |
#!/bin/bash
set -e
# Wrapper script to run bats tests for various drivers.
# Usage: DRIVER=[driver] ./run-bats.sh [subtest]
function quiet_run () {
if [[ "$VERBOSE" == "1" ]]; then
"$@"
else
"$@" &>/dev/null
fi
}
function cleanup_machines() {
if [[ $(machine ls -q | wc -l) -ne 0 ]]; then
quiet_run machine rm -f $(machine ls -q)
fi
}
function cleanup_store() {
if [[ -d "$MACHINE_STORAGE_PATH" ]]; then
rm -r "$MACHINE_STORAGE_PATH"
fi
}
function machine() {
export PATH="$MACHINE_ROOT"/bin:$PATH
"$MACHINE_ROOT"/bin/"$MACHINE_BIN_NAME" "$@"
}
function run_bats() {
for bats_file in $(find "$1" -name \*.bats); do
export NAME="bats-$DRIVER-test-$(date +%s)"
# BATS returns non-zero to indicate the tests have failed, we shouldn't
# neccesarily bail in this case, so that's the reason for the e toggle.
echo "=> $bats_file"
set +e
bats "$bats_file"
if [[ $? -ne 0 ]]; then
EXIT_STATUS=1
fi
set -e
echo
cleanup_machines
done
}
# Set this ourselves in case bats call fails
EXIT_STATUS=0
export BATS_FILE="$1"
if [[ -z "$DRIVER" ]]; then
echo "You must specify the DRIVER environment variable."
exit 1
fi
if [[ -z "$BATS_FILE" ]]; then
echo "You must specify a bats test to run."
exit 1
fi
if [[ ! -e "$BATS_FILE" ]]; then
echo "Requested bats file or directory not found: $BATS_FILE"
exit 1
fi
# TODO: Should the script bail out if these are set already?
export BASE_TEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
export MACHINE_ROOT="$BASE_TEST_DIR/../.."
export MACHINE_STORAGE_PATH="/tmp/machine-bats-test-$DRIVER"
export MACHINE_BIN_NAME=docker-machine
export BATS_LOG="$MACHINE_ROOT/bats.log"
export B2D_LOCATION=~/.docker/machine/cache/boot2docker.iso
# This function gets used in the integration tests, so export it.
export -f machine
> "$BATS_LOG"
cleanup_machines
cleanup_store
if [[ "$B2D_CACHE" == "1" ]] && [[ -f $B2D_LOCATION ]]; then
mkdir -p "${MACHINE_STORAGE_PATH}/cache"
cp $B2D_LOCATION "${MACHINE_STORAGE_PATH}/cache/boot2docker.iso"
fi
run_bats "$BATS_FILE"
cleanup_store
exit ${EXIT_STATUS}
|
NeilW/machine
|
test/integration/run-bats.sh
|
Shell
|
apache-2.0
| 2,253 |
#!/bin/bash
#
# Ensure that the host OS packages are current
#
# On an Atomic host, upgrade the host tree
# On traditional host, update RPMs
#
# ENVVARS:
# WC_NOTIFY: a curl CLI fragment to notify OpenStack Heat of the completion
# status of the script.
# Provided by an OpenStack WaitCondition object
# Exit on fail, bad VAR expansion
set -eux
# return the last (right most) non-zero status from pipes (or 0 on success)
set -o pipefail
source /usr/local/share/openshift-on-openstack/common_functions.sh
# ============================================================================
# MAIN
# ============================================================================
[ "$SYSTEM_UPDATE" = "True" ] || exit 0
# Check for Atomic Host
if [ -e /run/ostree-booted ]
then
# Update the OS tree
atomic host upgrade || notify_failure "failed to run 'atomic host upgrade'"
else
# Update using traditional RPMs
retry yum install -y deltarpm || notify_failure "could not install deltarpm"
retry yum -y update || notify_failure "could not update RPMs"
fi
|
redhat-openstack/openshift-on-openstack
|
fragments/host-update.sh
|
Shell
|
apache-2.0
| 1,101 |
#!/bin/bash
# SIGTERM-handler
term_handler() {
echo "Get SIGTERM"
iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
/etc/init.d/dnsmasq stop
/etc/init.d/hostapd stop
/etc/init.d/dbus stop
kill -TERM "$child" 2> /dev/null
}
ifconfig wlan0 10.0.0.1/24
if [ -z "$SSID" -a -z "$PASSWORD" ]; then
ssid="Pi3-AP"
password="raspberry"
else
ssid=$SSID
password=$PASSWORD
fi
sed -i "s/ssid=.*/ssid=$ssid/g" /etc/hostapd/hostapd.conf
sed -i "s/wpa_passphrase=.*/wpa_passphrase=$password/g" /etc/hostapd/hostapd.conf
/etc/init.d/dbus start
/etc/init.d/hostapd start
/etc/init.d/dnsmasq start
echo 1 > /proc/sys/net/ipv4/ip_forward
iptables -t nat -C POSTROUTING -o eth0 -j MASQUERADE
if [ ! $? -eq 0 ]
then
iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
fi
# setup handlers
trap term_handler SIGTERM
trap term_handler SIGKILL
sleep infinity &
child=$!
wait "$child"
|
openthings/znet
|
c.zhostapd_rpi/entrypoint.sh
|
Shell
|
apache-2.0
| 894 |
#!/bin/bash -e
function finish
{
JOBS=$(jobs -rp)
if [[ ! -z ${JOBS} ]] ; then
echo ${JOBS} | xargs kill ;
fi
#rm -f /tmp/sc-test-sreply-01.bin /tmp/sc-test-sreply-01.chsum /tmp/sc-test-sreply-01o.bin
}
trap finish EXIT
./sreply &
sleep 0.2
dd if=/dev/urandom count=1024 bs=1024 > /tmp/sc-test-sreply-01.bin
cat /tmp/sc-test-sreply-01.bin | pv -rb | socat stdio openssl:127.0.0.1:12345,verify=0,shut-down,linger=1 | pv -rb > /tmp/sc-test-sreply-01o.bin
cat /tmp/sc-test-sreply-01o.bin | shasum > /tmp/sc-test-sreply-01.chsum
cat /tmp/sc-test-sreply-01.bin | shasum --check /tmp/sc-test-sreply-01.chsum
echo "TEST $0 OK"
|
TeskaLabs/Frame_Transporter
|
examples/sreply/test_sreply_01.sh
|
Shell
|
bsd-3-clause
| 633 |
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
# Copy the dSYM into a the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .framework.dSYM "$source")"
binary="${DERIVED_FILES_DIR}/${basename}.framework.dSYM/Contents/Resources/DWARF/${basename}"
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary"
fi
if [[ $STRIP_BINARY_RETVAL == 1 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.framework.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.framework.dSYM"
fi
fi
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
STRIP_BINARY_RETVAL=0
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=1
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/AutoKeyboardScrollView/AutoKeyboardScrollView.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/AutoKeyboardScrollView/AutoKeyboardScrollView.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
honghaoz/AutoKeyboardScrollView
|
Example/Pods/Target Support Files/Pods-ProgrammaticExample/Pods-ProgrammaticExample-frameworks.sh
|
Shell
|
mit
| 7,929 |
#!/bin/bash
if [ -z "$1" ]
then
echo "Called without any arguments. Must be called with host name of server, like test.bewelcome.org"
exit 1
fi
file=`date +%Y-%m-%d`
if `/usr/bin/wget --header="Host: $1" http://localhost/about/updatestats -O /tmp/statsupdate$file > /dev/null 2>&1`
then
if /bin/grep -q "^success$" /tmp/statsupdate$file && /bin/rm /tmp/statsupdate$file
then
exit 0
else
error="Stats update failed on $1"
fi
else
error="Stats update failed on $1"
fi
echo $error | mail -s "Stats update failed" [email protected]
|
thisismeonmounteverest/rox
|
setup/cron_stats_update.sh
|
Shell
|
gpl-2.0
| 589 |
#!/bin/sh
test_description='working-tree-encoding conversion via gitattributes'
. ./test-lib.sh
GIT_TRACE_WORKING_TREE_ENCODING=1 && export GIT_TRACE_WORKING_TREE_ENCODING
test_expect_success 'setup test files' '
git config core.eol lf &&
text="hallo there!\ncan you read me?" &&
echo "*.utf16 text working-tree-encoding=utf-16" >.gitattributes &&
printf "$text" >test.utf8.raw &&
printf "$text" | iconv -f UTF-8 -t UTF-16 >test.utf16.raw &&
printf "$text" | iconv -f UTF-8 -t UTF-32 >test.utf32.raw &&
# Line ending tests
printf "one\ntwo\nthree\n" >lf.utf8.raw &&
printf "one\r\ntwo\r\nthree\r\n" >crlf.utf8.raw &&
# BOM tests
printf "\0a\0b\0c" >nobom.utf16be.raw &&
printf "a\0b\0c\0" >nobom.utf16le.raw &&
printf "\376\777\0a\0b\0c" >bebom.utf16be.raw &&
printf "\777\376a\0b\0c\0" >lebom.utf16le.raw &&
printf "\0\0\0a\0\0\0b\0\0\0c" >nobom.utf32be.raw &&
printf "a\0\0\0b\0\0\0c\0\0\0" >nobom.utf32le.raw &&
printf "\0\0\376\777\0\0\0a\0\0\0b\0\0\0c" >bebom.utf32be.raw &&
printf "\777\376\0\0a\0\0\0b\0\0\0c\0\0\0" >lebom.utf32le.raw &&
# Add only UTF-16 file, we will add the UTF-32 file later
cp test.utf16.raw test.utf16 &&
cp test.utf32.raw test.utf32 &&
git add .gitattributes test.utf16 &&
git commit -m initial
'
test_expect_success 'ensure UTF-8 is stored in Git' '
test_when_finished "rm -f test.utf16.git" &&
git cat-file -p :test.utf16 >test.utf16.git &&
test_cmp_bin test.utf8.raw test.utf16.git
'
test_expect_success 're-encode to UTF-16 on checkout' '
test_when_finished "rm -f test.utf16.raw" &&
rm test.utf16 &&
git checkout test.utf16 &&
test_cmp_bin test.utf16.raw test.utf16
'
test_expect_success 'check $GIT_DIR/info/attributes support' '
test_when_finished "rm -f test.utf32.git" &&
test_when_finished "git reset --hard HEAD" &&
echo "*.utf32 text working-tree-encoding=utf-32" >.git/info/attributes &&
git add test.utf32 &&
git cat-file -p :test.utf32 >test.utf32.git &&
test_cmp_bin test.utf8.raw test.utf32.git
'
for i in 16 32
do
test_expect_success "check prohibited UTF-${i} BOM" '
test_when_finished "git reset --hard HEAD" &&
echo "*.utf${i}be text working-tree-encoding=utf-${i}be" >>.gitattributes &&
echo "*.utf${i}le text working-tree-encoding=utf-${i}LE" >>.gitattributes &&
# Here we add a UTF-16 (resp. UTF-32) files with BOM (big/little-endian)
# but we tell Git to treat it as UTF-16BE/UTF-16LE (resp. UTF-32).
# In these cases the BOM is prohibited.
cp bebom.utf${i}be.raw bebom.utf${i}be &&
test_must_fail git add bebom.utf${i}be 2>err.out &&
test_i18ngrep "fatal: BOM is prohibited .* utf-${i}be" err.out &&
test_i18ngrep "use UTF-${i} as working-tree-encoding" err.out &&
cp lebom.utf${i}le.raw lebom.utf${i}be &&
test_must_fail git add lebom.utf${i}be 2>err.out &&
test_i18ngrep "fatal: BOM is prohibited .* utf-${i}be" err.out &&
test_i18ngrep "use UTF-${i} as working-tree-encoding" err.out &&
cp bebom.utf${i}be.raw bebom.utf${i}le &&
test_must_fail git add bebom.utf${i}le 2>err.out &&
test_i18ngrep "fatal: BOM is prohibited .* utf-${i}LE" err.out &&
test_i18ngrep "use UTF-${i} as working-tree-encoding" err.out &&
cp lebom.utf${i}le.raw lebom.utf${i}le &&
test_must_fail git add lebom.utf${i}le 2>err.out &&
test_i18ngrep "fatal: BOM is prohibited .* utf-${i}LE" err.out &&
test_i18ngrep "use UTF-${i} as working-tree-encoding" err.out
'
test_expect_success "check required UTF-${i} BOM" '
test_when_finished "git reset --hard HEAD" &&
echo "*.utf${i} text working-tree-encoding=utf-${i}" >>.gitattributes &&
cp nobom.utf${i}be.raw nobom.utf${i} &&
test_must_fail git add nobom.utf${i} 2>err.out &&
test_i18ngrep "fatal: BOM is required .* utf-${i}" err.out &&
test_i18ngrep "use UTF-${i}BE or UTF-${i}LE" err.out &&
cp nobom.utf${i}le.raw nobom.utf${i} &&
test_must_fail git add nobom.utf${i} 2>err.out &&
test_i18ngrep "fatal: BOM is required .* utf-${i}" err.out &&
test_i18ngrep "use UTF-${i}BE or UTF-${i}LE" err.out
'
test_expect_success "eol conversion for UTF-${i} encoded files on checkout" '
test_when_finished "rm -f crlf.utf${i}.raw lf.utf${i}.raw" &&
test_when_finished "git reset --hard HEAD^" &&
cat lf.utf8.raw | iconv -f UTF-8 -t UTF-${i} >lf.utf${i}.raw &&
cat crlf.utf8.raw | iconv -f UTF-8 -t UTF-${i} >crlf.utf${i}.raw &&
cp crlf.utf${i}.raw eol.utf${i} &&
cat >expectIndexLF <<-EOF &&
i/lf w/-text attr/text eol.utf${i}
EOF
git add eol.utf${i} &&
git commit -m eol &&
# UTF-${i} with CRLF (Windows line endings)
rm eol.utf${i} &&
git -c core.eol=crlf checkout eol.utf${i} &&
test_cmp_bin crlf.utf${i}.raw eol.utf${i} &&
# Although the file has CRLF in the working tree,
# ensure LF in the index
git ls-files --eol eol.utf${i} >actual &&
test_cmp expectIndexLF actual &&
# UTF-${i} with LF (Unix line endings)
rm eol.utf${i} &&
git -c core.eol=lf checkout eol.utf${i} &&
test_cmp_bin lf.utf${i}.raw eol.utf${i} &&
# The file LF in the working tree, ensure LF in the index
git ls-files --eol eol.utf${i} >actual &&
test_cmp expectIndexLF actual
'
done
test_expect_success 'check unsupported encodings' '
test_when_finished "git reset --hard HEAD" &&
echo "*.set text working-tree-encoding" >.gitattributes &&
printf "set" >t.set &&
test_must_fail git add t.set 2>err.out &&
test_i18ngrep "true/false are no valid working-tree-encodings" err.out &&
echo "*.unset text -working-tree-encoding" >.gitattributes &&
printf "unset" >t.unset &&
git add t.unset &&
echo "*.empty text working-tree-encoding=" >.gitattributes &&
printf "empty" >t.empty &&
git add t.empty &&
echo "*.garbage text working-tree-encoding=garbage" >.gitattributes &&
printf "garbage" >t.garbage &&
test_must_fail git add t.garbage 2>err.out &&
test_i18ngrep "failed to encode" err.out
'
test_expect_success 'error if encoding round trip is not the same during refresh' '
BEFORE_STATE=$(git rev-parse HEAD) &&
test_when_finished "git reset --hard $BEFORE_STATE" &&
# Add and commit a UTF-16 file but skip the "working-tree-encoding"
# filter. Consequently, the in-repo representation is UTF-16 and not
# UTF-8. This simulates a Git version that has no working tree encoding
# support.
echo "*.utf16le text working-tree-encoding=utf-16le" >.gitattributes &&
echo "hallo" >nonsense.utf16le &&
TEST_HASH=$(git hash-object --no-filters -w nonsense.utf16le) &&
git update-index --add --cacheinfo 100644 $TEST_HASH nonsense.utf16le &&
COMMIT=$(git commit-tree -p $(git rev-parse HEAD) -m "plain commit" $(git write-tree)) &&
git update-ref refs/heads/master $COMMIT &&
test_must_fail git checkout HEAD^ 2>err.out &&
test_i18ngrep "error: .* overwritten by checkout:" err.out
'
test_expect_success 'error if encoding garbage is already in Git' '
BEFORE_STATE=$(git rev-parse HEAD) &&
test_when_finished "git reset --hard $BEFORE_STATE" &&
# Skip the UTF-16 filter for the added file
# This simulates a Git version that has no checkoutEncoding support
cp nobom.utf16be.raw nonsense.utf16 &&
TEST_HASH=$(git hash-object --no-filters -w nonsense.utf16) &&
git update-index --add --cacheinfo 100644 $TEST_HASH nonsense.utf16 &&
COMMIT=$(git commit-tree -p $(git rev-parse HEAD) -m "plain commit" $(git write-tree)) &&
git update-ref refs/heads/master $COMMIT &&
git diff 2>err.out &&
test_i18ngrep "error: BOM is required" err.out
'
test_expect_success 'check roundtrip encoding' '
test_when_finished "rm -f roundtrip.shift roundtrip.utf16" &&
test_when_finished "git reset --hard HEAD" &&
text="hallo there!\nroundtrip test here!" &&
printf "$text" | iconv -f UTF-8 -t SHIFT-JIS >roundtrip.shift &&
printf "$text" | iconv -f UTF-8 -t UTF-16 >roundtrip.utf16 &&
echo "*.shift text working-tree-encoding=SHIFT-JIS" >>.gitattributes &&
# SHIFT-JIS encoded files are round-trip checked by default...
GIT_TRACE=1 git add .gitattributes roundtrip.shift 2>&1 |
grep "Checking roundtrip encoding for SHIFT-JIS" &&
git reset &&
# ... unless we overwrite the Git config!
! GIT_TRACE=1 git -c core.checkRoundtripEncoding=garbage \
add .gitattributes roundtrip.shift 2>&1 |
grep "Checking roundtrip encoding for SHIFT-JIS" &&
git reset &&
# UTF-16 encoded files should not be round-trip checked by default...
! GIT_TRACE=1 git add roundtrip.utf16 2>&1 |
grep "Checking roundtrip encoding for UTF-16" &&
git reset &&
# ... unless we tell Git to check it!
GIT_TRACE=1 git -c core.checkRoundtripEncoding="UTF-16, UTF-32" \
add roundtrip.utf16 2>&1 |
grep "Checking roundtrip encoding for utf-16" &&
git reset &&
# ... unless we tell Git to check it!
# (here we also check that the casing of the encoding is irrelevant)
GIT_TRACE=1 git -c core.checkRoundtripEncoding="UTF-32, utf-16" \
add roundtrip.utf16 2>&1 |
grep "Checking roundtrip encoding for utf-16" &&
git reset
'
test_done
|
Ikke/git
|
t/t0028-working-tree-encoding.sh
|
Shell
|
gpl-2.0
| 9,010 |
#
# $Id: net.west.sh,v 1.6 2004/04/16 19:46:17 mcr Exp $
#
if [ -n "$UML_west_CTL" ]
then
net_eth0="eth0=daemon,10:00:00:ab:cd:ff,unix,$UML_west_CTL,$UML_west_DATA";
elif [ -n "$UML_private_CTL" ]
then
net_eth0="eth0=daemon,10:00:00:ab:cd:ff,unix,$UML_private_CTL,$UML_private_DATA";
else
net_eth0="eth0=mcast,10:00:00:ab:cd:ff,239.192.0.2,40800";
fi
if [ -n "$UML_public_CTL" ]
then
net_eth1="eth1=daemon,10:00:00:64:64:45,unix,$UML_public_CTL,$UML_public_DATA";
else
net_eth1="eth1=mcast,10:00:00:64:64:45,239.192.1.2,31200";
fi
if [ -n "$UML_admin_CTL" ]
then
net_eth2="eth2=daemon,10:00:00:32:64:45,unix,$UML_admin_CTL,$UML_admin_DATA";
else
net_eth2="eth2=mcast,10:00:00:32:64:45,239.192.3.2,31210";
fi
net="$net_eth0 $net_eth1 $net_eth2"
|
qianguozheng/Openswan
|
testing/baseconfigs/net.west.sh
|
Shell
|
gpl-2.0
| 779 |
#!/bin/bash
#
# checks if Wireshark's ABI has been changes since last release (tag)
#
# Copyright 2011 Balint Reczey <[email protected]>
#
# $Id$
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
set -e
if test -z "$1"; then
echo "Usage:"
echo "$0 \"<build commands>\""
echo "e.g. $0 \"./autogen.sh && ./configure && make -j3 && make dumpabi\""
exit 1
fi
# build current version
bash -c "$1"
cd `git rev-parse --show-toplevel`
# we are at top level
# Stable branches with releases
#LAST_TAG=`git describe --tags --abbrev=0`
#LAST_TAG_DIR=$LAST_TAG
# Use latest commit
LAST_TAG=HEAD
LAST_TAG_DIR=master
rm -rf $LAST_TAG_DIR
mkdir $LAST_TAG_DIR
git archive $LAST_TAG | tar -x -C $LAST_TAG_DIR
# build latest tag
(cd $LAST_TAG_DIR && bash -c "$1")
exec tools/compare-abis.sh `pwd`/$LAST_TAG_DIR `pwd`
|
boundary/wireshark
|
tools/git-compare-abis.sh
|
Shell
|
gpl-2.0
| 1,585 |
#BSUB-e Beagle5.err.%J
#BSUB-o Beagle5.out.%J
#BSUB-J Beagle5
#BSUB-n 16
cd /scratch/inmegen/100g/phasing
export PATH=$PATH:/scratch/inmegen/100g/references/JAVA/jdk1.8.0_74/bin/
java8 -Xmx12g -jar /home/inmegen/r.garcia/src/beagle.03May16.862.jar gtgl=/scratch/inmegen/100g/wg_GATK/test3/allsamples_final_recaled_snp-indel.vcf chrom=5 ref=/scratch/inmegen/100g/references/1000g-phase_3-allele-frequency/ALL.chr5.phase3_shapeit2_mvncall_integrated_v5_extra_anno.20130502.genotypes.vcf.gz out=chr5 impute=false ibd=true
java8 -Xmx12g -jar /home/inmegen/r.garcia/src/beagle.03May16.862.jar gt=chr5.vcf.gz ref=/scratch/inmegen/100g/references/1000g-phase_3-allele-frequency/ALL.chr5.phase3_shapeit2_mvncall_integrated_v5_extra_anno.20130502.genotypes.vcf.gz out=chr5-phased impute=false ibd=true
|
rgarcia-herrera/100g
|
scripts/phasing-beagle/beagle-chr5.sh
|
Shell
|
gpl-3.0
| 796 |
#!/bin/sh
# Copyright (C) 2018-2020 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
CLANG_FORMAT_VER=9.0
if hash clang-format-${CLANG_FORMAT_VER} 2>/dev/null; then
CLANGFORMAT="$(which clang-format-${CLANG_FORMAT_VER})"
elif hash clang-format-${CLANG_FORMAT_VER%.*} 2>/dev/null; then
CLANGFORMAT="$(which clang-format-${CLANG_FORMAT_VER%.*})"
elif hash clang-format 2>/dev/null; then
CLANGFORMAT="$(which clang-format)"
else
echo "No clang-format found."
exit 2
fi
if ! "${CLANGFORMAT}" --version | grep -qEo "version ${CLANG_FORMAT_VER}\.[0-9]+"; then
echo "Could not find clang-format ${CLANG_FORMAT_VER}. ${CLANGFORMAT} is $(${CLANGFORMAT} --version | grep -Eo '[0-9\.]{5}' | head -n 1)."
exit 2
fi
${CLANGFORMAT} "$@"
|
pkreissl/espresso
|
maintainer/format/clang-format.sh
|
Shell
|
gpl-3.0
| 1,389 |
#! /bin/bash
. acsstartupAcsPorts
CL_SLEEP=0
CL_ERROR=
CL_HELP=
LONGOPTS=help
SHORTOPTS=hs:e:
function printUsage {
echo "Bad"
}
export POSIXLY_CORRECT=1
getopt -n `basename $0` -Q -u -a -l $LONGOPTS $SHORTOPTS "$@" || {
printUsage
exit 43;
}
set -- `getopt -u -a -l $LONGOPTS $SHORTOPTS "$@"`
while :
do
case "$1" in
-s) CL_SLEEP=$2 ; shift ;;
-e) CL_ERROR=$2 ; shift ;;
--help) CL_HELP=true ;;
-h) CL_HELP=true ;;
--) break ;;
esac
shift
done
shift
# restore
export POSIXLY_CORRECT=
unset POSIXLY_CORRECT
if [ "$CL_HELP" ] ; then
printUsage
exit 0
fi
sleep $CL_SLEEP
if [ "$CL_ERROR" = "HANG" ]
then
echo "$@"
while [ "1" ]
do
sleep 1
done
elif [ "$CL_ERROR" != "" ]
then
echo "$CL_ERROR"
exit 1
else
echo "$@"
fi
|
jbarriosc/ACSUFRO
|
LGPL/CommonSoftware/acsstartup/test/acsstartupGenericProcess.sh
|
Shell
|
lgpl-2.1
| 847 |
#!/usr/bin/env bash
mkdir -p $HOME/.binaries
cd $HOME/.binaries
if [ '!' -d apache-maven-3.3.9 ]
then
if [ '!' -f apache-maven-3.3.9-bin.zip ]
then
wget https://archive.apache.org/dist/maven/maven-3/3.3.9/binaries/apache-maven-3.3.9-bin.zip || exit 1
fi
echo "Installing maven 3.3.9"
unzip -qq apache-maven-3.3.9-bin.zip || exit 1
rm -f apache-maven-3.3.9-bin.zip
fi
exit 0
|
DevFactory/afc
|
build-tools/src/main/resources/bash/travis-download-maven.sh
|
Shell
|
apache-2.0
| 394 |
#!/bin/sh
# Copyright 2001,2004-2006 The Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# SimpleAxis2Server Script
#
# Environment Variable Prerequisites
#
# AXIS2_HOME Home of Axis2 installation. If not set I will try
# to figure it out.
#
# JAVA_HOME Must point at your Java Development Kit installation.
#
# NOTE: Borrowed generously from Apache Tomcat startup scripts.
# -----------------------------------------------------------------------------
# if JAVA_HOME is not set we're not happy
if [ -z "$JAVA_HOME" ]; then
echo "You must set the JAVA_HOME variable before running Axis2 Script."
exit 1
fi
# OS specific support. $var _must_ be set to either true or false.
cygwin=false
os400=false
case "`uname`" in
CYGWIN*) cygwin=true;;
OS400*) os400=true;;
esac
# resolve links - $0 may be a softlink
PRG="$0"
while [ -h "$PRG" ]; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '.*/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
# Get standard environment variables
PRGDIR=`dirname "$PRG"`
# Only set AXIS2_HOME if not already set
[ -z "$AXIS2_HOME" ] && AXIS2_HOME=`cd "$PRGDIR" ; pwd`
# For Cygwin, ensure paths are in UNIX format before anything is touched
if $cygwin; then
[ -n "$JAVA_HOME" ] && JAVA_HOME=`cygpath --unix "$JAVA_HOME"`
[ -n "$AXIS2_HOME" ] && AXIS2_HOME=`cygpath --unix "$AXIS2_HOME"`
[ -n "$CLASSPATH" ] && CLASSPATH=`cygpath --path --unix "$CLASSPATH"`
fi
# For OS400
if $os400; then
# Set job priority to standard for interactive (interactive - 6) by using
# the interactive priority - 6, the helper threads that respond to requests
# will be running at the same priority as interactive jobs.
COMMAND='chgjob job('$JOBNAME') runpty(6)'
system $COMMAND
# Enable multi threading
export QIBM_MULTI_THREADED=Y
fi
# update classpath
AXIS2_CLASSPATH="$AXIS2_HOME/../../lib"
for f in "$AXIS2_HOME"/../../repository/components/plugins/*.jar
do
syn_mod=`ls $f | grep org.apache.synapse.module`
if [ ! -e "$syn_mod" ]; then
AXIS2_CLASSPATH="$AXIS2_CLASSPATH":$f
fi
done
for f in "$AXIS2_HOME"/../../lib/*.jar
do
AXIS2_CLASSPATH="$AXIS2_CLASSPATH":$f
done
for f in "$AXIS2_HOME"/../../lib/core/WEB-INF/lib/*.jar
do
AXIS2_CLASSPATH="$AXIS2_CLASSPATH":$f
done
for f in "$AXIS2_HOME"/../../repository/components/extensions/*.jar
do
AXIS2_CLASSPATH="$AXIS2_CLASSPATH":$f
done
for f in "$AXIS2_HOME"/../../repository/components/lib/*.jar
do
AXIS2_CLASSPATH="$AXIS2_CLASSPATH":$f
done
for f in "$AXIS2_HOME"/../../lib/api/*.jar
do
AXIS2_CLASSPATH="$AXIS2_CLASSPATH":$f
done
AXIS2_CLASSPATH="$JAVA_HOME/lib/tools.jar":"$AXIS2_CLASSPATH":"$CLASSPATH"
for f in $AXIS2_HOME/../../repository/lib/*.jar
do
AXIS2_CLASSPATH=$f:$AXIS2_CLASSPATH
done
# For Cygwin, switch paths to Windows format before running java
if $cygwin; then
JAVA_HOME=`cygpath --absolute --windows "$JAVA_HOME"`
AXIS2_HOME=`cygpath --absolute --windows "$AXIS2_HOME"`
CLASSPATH=`cygpath --path --windows "$CLASSPATH"`
JAVA_ENDORSED_DIRS=`cygpath --path --windows "$JAVA_ENDORSED_DIRS"`
fi
# endorsed dir
AXIS2_ENDORSED=$AXIS2_HOME/../../lib/endorsed
echo " Using JAVA_HOME: $JAVA_HOME"
echo " Using AXIS2 Repository : $AXIS2_HOME/repository"
echo " Using AXIS2 Configuration : $AXIS2_HOME/repository/conf/axis2.xml"
HTTP_PORT_SET="FALSE"
HTTPS_PORT_SET="FALSE"
JAVACMD="java"
PROGRAM_PARAMS=""
while [ "$1" != "" ]; do
if [ "$1" = "-xdebug" ]; then
PROGRAM_PARAMS="$PROGRAM_PARAMS""-Xdebug -Xnoagent -Xrunjdwp:transport=dt_socket,server=y,address=8000 "
shift
elif [ "$1" = "-name" ]; then
PROGRAM_PARAMS="$PROGRAM_PARAMS""-Dserver_name=$2 "
shift
shift
elif [ "$1" = "-http" ]; then
PROGRAM_PARAMS="$PROGRAM_PARAMS""-Dhttp_port=$2 "
HTTP_PORT_SET="TRUE"
shift
shift
elif [ "$1" = "-https" ]; then
PROGRAM_PARAMS="$PROGRAM_PARAMS""-Dhttps_port=$2 "
HTTPS_PORT_SET="TRUE"
shift
shift
elif [ "$1" = "test" ]; then
JAVACMD="exec ""$JAVACMD"
shift
fi
done
if [ "$HTTP_PORT_SET" = "FALSE" ]; then
PROGRAM_PARAMS="$PROGRAM_PARAMS""-Dhttp_port=9000 "
fi
if [ "$HTTPS_PORT_SET" = "FALSE" ]; then
PROGRAM_PARAMS="$PROGRAM_PARAMS""-Dhttps_port=9002 "
fi
$JAVACMD -Dcarbon.home="$AXIS2_HOME/../../" $PROGRAM_PARAMS -Djava.io.tmpdir="$AXIS2_HOME/../../tmp/sampleServer" \
-Djava.endorsed.dirs="$AXIS2_ENDORSED" -classpath "$AXIS2_CLASSPATH" org.wso2.bps.samples.util.SampleAxis2Server \
-repo "$AXIS2_HOME/repository" -conf "$AXIS2_HOME/repository/conf/axis2.xml"
|
madhawa-gunasekara/product-ei
|
samples/business-process-samples/product/sample-utils/src/main/scripts/axis2server.sh
|
Shell
|
apache-2.0
| 5,282 |
#!/bin/bash
mvn -Pjar-with-dependencies verify
cp target/fitnesse.jar .
|
alexkogon/OASIS-Maven
|
buildoasis.sh
|
Shell
|
apache-2.0
| 72 |
#!/bin/sh
MD5TOOL=../file_md5
TEMP_DIR=/tmp/wavewritetest_temp$$
OUTPUT=${TEMP_DIR}/test_1.wav
mkdir -p ${TEMP_DIR}
BASE_COMMAND="../../apps/raw2bmx/raw2bmx --regtest -t wave -o $OUTPUT -f 25 -y 10:11:12:13 --orig regtest "
# create essence data
../create_test_essence -t 1 -d $1 ${TEMP_DIR}/pcm.raw
# write
$BASE_COMMAND -q 16 --pcm ${TEMP_DIR}/pcm.raw -q 16 --pcm ${TEMP_DIR}/pcm.raw >/dev/null
# calculate md5sum and compare with expected value
$MD5TOOL < $OUTPUT > ${TEMP_DIR}/test.md5
if diff ${TEMP_DIR}/test.md5 ${srcdir}/$2.md5
then
RESULT=0
else
echo "*** ERROR: $2 regression"
RESULT=1
fi
# clean-up
rm -Rf ${TEMP_DIR}
exit $RESULT
|
stuarthicks/bmx
|
test/wave/check_write.sh
|
Shell
|
bsd-3-clause
| 656 |
#!/bin/sh
. ./systemTesting.sh
nproc=2
rank=1
if test "x${MPI_DIR}" = "x"; then
MPI_DIR="/usr/local/mpich"
fi
if test "x${MPI_BINDIR}" = "x"; then
MPI_BINDIR="${MPI_DIR}/bin"
fi
if test "x${MPI_RUN}" = "x"; then
MPI_RUN="${MPI_BINDIR}/mpirun"
fi
if test "x${MPI_NPROC}" = "x"; then
MPI_NPROC="-np"
fi
EXEC="${MPI_RUN} ${MPI_MACHINES} ${MPI_NPROC} ${nproc}"
testname="`basename $0 .sh`"
if test "${UPDATE_MODE}x" = "x"; then
printf "$testname: ";
fi
if ! which Snac 1> /dev/null 2>&1; then
export PATH="$PATH:../../build/bin"
if ! which Snac 1> /dev/null 2>&1; then
echo "Snac could not be found"
exit 1;
else
progname=`which Snac`
fi
else
progname=`which Snac`
fi
if runTest $testname "${EXEC} ${progname} basic-velocity-completeForceCalc.xml"; then
passed="true"
if ! handleTestResult vel.${rank} ${testname}.vel.${rank}.expected; then
passed="false";
else
if ! handleTestResult forceCheckSum.${rank} ${testname}.forceCheckSum.${rank}.expected; then
passed="false";
fi
fi
if $passed = "true"; then
rm -f $testname.out;
fi
else
passed="false"
fi
rm -f *.0 *.1
if test $passed = "false"; then
exit 1
fi
|
bmi-forum/bmi-pyre
|
Snac/tests/test-basic-velocity-completeForceCalc.1of2.sh
|
Shell
|
gpl-2.0
| 1,151 |
#! /bin/sh
# Copyright (C) 1998-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Yet another '+=' test.
. test-init.sh
cat >> configure.ac << 'END'
AC_PROG_CC
END
cat > Makefile.am << 'END'
bin_PROGRAMS = foo
CC += -Dwhatever
END
$ACLOCAL
$AUTOMAKE
$FGREP '@CC@ -Dwhatever' Makefile.in
:
|
DDTChen/CookieVLC
|
vlc/extras/tools/automake/t/pluseq4.sh
|
Shell
|
gpl-2.0
| 904 |
#!/bin/sh
set -e
#CXX=~/sources/llvm/_build/install/bin/clang++
CXX=g++
CXXFLAGS=-O3
export PATH=../../../../scripts:$PATH
export PYTHONPATH=../../../..
# first clean all
find -name '*.pyc' -o -name '*.so' -o -name '*.c' -o -name '*.cpp' | xargs rm -f || true
# then build all cython stuff
for target in cython/*.pyx
do
cython --cplus $target -o ${target%.pyx}.cpp
$CXX $CXXFLAGS `python-config --cflags --libs` -shared ${target%.pyx}.cpp -fPIC -o ${target%.pyx}.so
done
# then build all pythran stuff
for target in pythran/*.py
do
pythran $CXXFLAGS $target -I../../../../build/lib.linux-x86_64-2.7/pythran -o ${target%.py}.so
done
# then run tests
echo "==== arc distance ===="
SETUP='import numpy as np; N=1000000 ; a,b,c,d = np.random.random(N), np.random.random(N), np.random.random(N), np.random.random(N) ; from arc_distance import arc_distance'
RUN='arc_distance(a,b,c,d)'
printf "python " ; PYTHONPATH=./python python -m timeit -s "$SETUP" "$RUN"
printf "cython " ; PYTHONPATH=./cython python -m timeit -s "$SETUP" "$RUN"
printf "pythran " ; PYTHONPATH=./pythran python -m timeit -s "$SETUP" "$RUN"
printf "pypy " ; PYTHONPATH=./pypy pypy -m timeit -s "from random import random ; import numpypy as np; N=1000000 ; a,b,c,d = np.array([random() for _ in range(N)]), np.array([random() for _ in range(N)]), np.array([random() for _ in range(N)]), np.array([random() for _ in range(N)]) ; from arc_distance import arc_distance" "$RUN"
echo "==== blacksholes ===="
SETUP='N=1000 ; from random import random ; a,b,c,d,e,f = [random() for _ in range(1,N)], [random() for _ in range(1,N)], [random() for _ in range(1,N)], 0.5, 0.76, 200 ; from blacksholes import BlackScholes'
RUN='BlackScholes(a,b,c,d,e,f)'
printf "python " ; PYTHONPATH=./python python -m timeit -s "$SETUP" "$RUN"
printf "cython " ; PYTHONPATH=./cython python -m timeit -s "$SETUP" "$RUN"
printf "pythran " ; PYTHONPATH=./pythran python -m timeit -s "$SETUP" "$RUN"
printf "pypy " ; PYTHONPATH=./pypy pypy -m timeit -s "$SETUP" "$RUN"
echo "==== growcut ===="
SETUP='N=20 ; import numpy as np ; image = np.zeros((N, N, 3)) ; state = np.zeros((N, N, 2)) ; state_next = np.empty((N, N, 2)) ; state[0, 0, 0] = 1 ; state[0, 0, 1] = 1 ; from growcut import growcut'
RUN='growcut(image, state, state_next, 10)'
printf "python " ; PYTHONPATH=./python python -m timeit -s "$SETUP" "$RUN"
printf "cython " ; PYTHONPATH=./cython python -m timeit -s "$SETUP" "$RUN"
printf "pythran " ; PYTHONPATH=./pythran python -m timeit -s "$SETUP" "$RUN"
printf "pypy " ; PYTHONPATH=./pypy pypy -m timeit -s "import numpypy ; $SETUP" "$RUN"
echo "==== n queens ===="
SETUP='N=8 ; from nqueens import n_queens'
RUN='n_queens(N)'
printf "python " ; PYTHONPATH=./python python -m timeit -s "$SETUP" "$RUN"
printf "cython " ; PYTHONPATH=./cython python -m timeit -s "$SETUP" "$RUN"
printf "pythran " ; PYTHONPATH=./pythran python -m timeit -s "$SETUP" "$RUN"
printf "pypy " ; PYTHONPATH=./pypy pypy -m timeit -s "$SETUP" "$RUN"
echo "==== rosen_der ===="
SETUP='N=1000000 ; import numpy as np ; r =np.random.random(N) ; from rosen_der import rosen_der'
RUN='rosen_der(r)'
printf "python " ; PYTHONPATH=./python python -m timeit -s "$SETUP" "$RUN"
printf "cython " ; PYTHONPATH=./cython python -m timeit -s "$SETUP" "$RUN"
printf "pythran " ; PYTHONPATH=./pythran python -m timeit -s "$SETUP" "$RUN"
printf "pypy " ; PYTHONPATH=./pypy pypy -m timeit -s "from random import random ; import numpypy as np; N=1000000 ; r = np.array([random() for _ in range(N)]); from rosen_der import rosen_der" "$RUN"
|
pombredanne/pythran
|
docs/papers/iop2014/xp/run_benchmarks.sh
|
Shell
|
bsd-3-clause
| 3,601 |
#m4nual
echo "This provider does not permit downloading ovpn files in an automatic way."
|
Hackplayers/4nonimizer
|
vpn/vpnme/vpn-get-ovpn.sh
|
Shell
|
gpl-3.0
| 89 |
#!/bin/bash
FN="scRNAseq_2.8.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.14/data/experiment/src/contrib/scRNAseq_2.8.0.tar.gz"
"https://bioarchive.galaxyproject.org/scRNAseq_2.8.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-scrnaseq/bioconductor-scrnaseq_2.8.0_src_all.tar.gz"
)
MD5="251b3fa8a616d99e873e45ea581b7cef"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
cokelaer/bioconda-recipes
|
recipes/bioconductor-scrnaseq/post-link.sh
|
Shell
|
mit
| 1,288 |
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code below is designed to support two specific scenarios for
# using Elasticsearch and Kibana with Kubernetes. In both cases the
# environment variables PROXY_HOST and PROXY_PORT identify the instance
# of Elasticsearch to be used by Kibana. The default value for ES_HOST
# identifies the location that served the Javascript for Kibana and
# the default value of ES_PORT 5601 is the port to be used for connecting
# to Kibana. Both of these may be overriden if required. The two scenarios are:
# 1. Elasticsearch and Kibana containers running in a single pod. In this
# case PROXY_HOST is set to the local host i.e. 127.0.0.1 and the
# PROXY_PORT is set to 9200 because Elasticsearch is running on the
# same name as Kibana. If KIBANA_IP is the external IP address of
# the Kubernetes Kibna service then all requests to:
# KIBANA_SERVICE:$ES_PORT/elasticsearch/XXX
# are proxied to:
# http://127.0.0.1:9200/XXX
# 2. Elasticsearch and Kibana are run in separate pods and Elasticsearch
# has an IP and port exposed via a Kubernetes service. In this case
# the Elasticsearch service *must* be called 'elasticsearch' and then
# all requests sent to:
# KIBANA_SERVICE:$ES_PORT/elasticsearch/XXX
# are proxied to:
# http://$ELASTICSEARCH_SERVICE_HOST:$ELASTICSEARCH_SERVICE_PORT:9200/XXX
# The proxy configuration occurs in a location block of the nginx configuration
# file /etc/nginx/sites-available/default.
set -o errexit
set -o nounset
set -o pipefail
# Report all environment variables containing 'elasticsearch'
set | grep -i elasticsearch
# Set the default value for the Elasticsearch host as seen by the client
# Javascript code for Kibana.
: ${ES_HOST:='"+window.location.hostname+"'}
echo ES_HOST=$ES_HOST
# Set the default port for Elasticsearch host as seen by the client
# Javascript for Kibana.
: ${ES_PORT:=5601}
echo ES_PORT=$ES_PORT
# Set the default host IP and port for Elasticsearch as seen by the proxy
# code in the configuration for nginx. If a Kubernetes Elasticsearch
# service called 'elasticsearch' is defined, use that. Otherwise, use
# a local instance of Elasticsearch on port 9200.
PROXY_HOST=${ELASTICSEARCH_SERVICE_HOST:-127.0.0.1}
echo PROXY_HOST=${PROXY_HOST}
PROXY_PORT=${ELASTICSEARCH_SERVICE_PORT:-9200}
echo PROXY_PORT=${PROXY_PORT}
# Test the connection to Elasticsearch
echo "Running curl http://${PROXY_HOST}:${PROXY_PORT}"
curl http://${PROXY_HOST}:${PROXY_PORT}
# Create a config.hs that defines the Elasticsearch server to be
# at http://${ES_HOST}:${ES_PORT}/elasticsearch from the perspective of
# the client Javascript code.
cat << EOF > /usr/share/nginx/html/config.js
/** @scratch /configuration/config.js/1
*
* == Configuration
* config.js is where you will find the core Kibana configuration. This file contains parameter that
* must be set before kibana is run for the first time.
*/
define(['settings'],
function (Settings) {
/** @scratch /configuration/config.js/2
*
* === Parameters
*/
return new Settings({
/** @scratch /configuration/config.js/5
*
* ==== elasticsearch
*
* The URL to your elasticsearch server. You almost certainly don't
* want +http://localhost:9200+ here. Even if Kibana and Elasticsearch are on
* the same host. By default this will attempt to reach ES at the same host you have
* kibana installed on. You probably want to set it to the FQDN of your
* elasticsearch host
*
* Note: this can also be an object if you want to pass options to the http client. For example:
*
* +elasticsearch: {server: "http://localhost:9200", withCredentials: true}+
*
*/
elasticsearch: "http://${ES_HOST}:${ES_PORT}/elasticsearch",
/** @scratch /configuration/config.js/5
*
* ==== default_route
*
* This is the default landing page when you don't specify a dashboard to load. You can specify
* files, scripts or saved dashboards here. For example, if you had saved a dashboard called
* WebLogs to elasticsearch you might use:
*
* default_route: '/dashboard/elasticsearch/WebLogs',
*/
default_route : '/dashboard/file/logstash.json',
/** @scratch /configuration/config.js/5
*
* ==== kibana-int
*
* The default ES index to use for storing Kibana specific object
* such as stored dashboards
*/
kibana_index: "kibana-int",
/** @scratch /configuration/config.js/5
*
* ==== panel_name
*
* An array of panel modules available. Panels will only be loaded when they are defined in the
* dashboard, but this list is used in the "add panel" interface.
*/
panel_names: [
'histogram',
'map',
'goal',
'table',
'filtering',
'timepicker',
'text',
'hits',
'column',
'trends',
'bettermap',
'query',
'terms',
'stats',
'sparklines'
]
});
});
EOF
# Proxy all calls to ...:80/elasticsearch to the location
# defined by http://${PROXY_HOST}:${PROXY_PORT}
cat <<EOF > /etc/nginx/sites-available/default
server {
listen 80 default_server;
listen [::]:80 default_server ipv6only=on;
root /usr/share/nginx/html;
index index.html index.htm;
# Make site accessible from http://localhost/
server_name localhost;
location ~ /elasticsearch/?(.*)$ {
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_read_timeout 1d;
proxy_set_header Connection "upgrade";
proxy_pass http://${PROXY_HOST}:${PROXY_PORT}/\$1;
}
location / {
# First attempt to serve request as file, then
# as directory, then fall back to displaying a 404.
try_files \$uri \$uri/ =404;
}
}
EOF
exec nginx -c /etc/nginx/nginx.conf "$@"
|
CrunchyData/dnsbridge
|
src/github.com/GoogleCloudPlatform/kubernetes/contrib/logging/kibana-image/run_kibana_nginx.sh
|
Shell
|
apache-2.0
| 6,553 |
#!/bin/bash
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
DIR="$(cd -P "$(dirname "$SOURCE")" && pwd)"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
DIR="$(cd -P "$(dirname "$SOURCE")" && pwd)"
classpath=$(${DIR}/classpath.sh dev/import-local)
if [ $? -ne 0 ]; then
echo "${classpath}"
exit
fi
dir=$1
if [ ! -d "$dir" ]; then
echo "you need to specify a data directory."
exit 1
fi
java \
-Xmx512m \
-Djava.awt.headless=true \
-Dfile.encoding=UTF-8 \
-Djava.library.path=$LD_LIBRARY_PATH \
-classpath ${classpath} \
io.lumify.tools.Import \
--datadir=${dir} \
--queuedups
|
j-bernardo/lumify
|
bin/importData.sh
|
Shell
|
apache-2.0
| 634 |
#!/bin/sh
# Debian, unlike Ubuntu, doesn't sudo it users by default
if [ -x /usr/bin/aptitude ]; then
# aptitude is nice since it doesn't fail if a non-existant package is hit
# See: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=503215
su -c "aptitude -y install $*"
else
su -c "apt-get -y --ignore-missing install $*"
fi
|
BryanQuigley/phoronix-test-suite
|
pts-core/external-test-dependencies/scripts/install-debian-packages.sh
|
Shell
|
gpl-3.0
| 329 |
#!/usr/bin/expect
set p "P@\$\$w0rd"
set f [lindex $argv 0]
spawn rpm --resign $f
expect "Enter pass phrase:"
send -- "$p\r"
expect eof
|
quark-pat/CLIP
|
packages/aqueduct/aqueduct/compliance/Puppet/STIG/rhel5/Unix-Checklist/base/dev/rhel5/scripts/trunk/scripts/rpm_addsign.sh
|
Shell
|
apache-2.0
| 136 |
#!/bin/bash
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
set -e
ARCH=`uname -m`
#check job type, do patch set specific unit test when job is verify
if [ "$JOB_TYPE" = "VERIFY" ]; then
cd $GOPATH/src/github.com/hyperledger/fabric/
#figure out what packages should be tested for uncommitted changes
# first check for uncommitted changes
TEST_PKGS=$(git diff --name-only HEAD * | grep .go$ | grep -v ^vendor/ \
| grep -v ^build/ | sed 's%/[^/]*$%/%'| sort -u \
| awk '{print "github.com/hyperledger/fabric/"$1"..."}')
if [ -z "$TEST_PKGS" ]; then
# next check for changes in the latest commit - typically this will
# be for CI only, but could also handle a committed change before
# pushing to Gerrit
TEST_PKGS=$(git diff-tree --no-commit-id --name-only -r $(git log -2 \
--pretty=format:"%h") | grep .go$ | grep -v ^vendor/ | grep -v ^build/ \
| sed 's%/[^/]*$%/%'| sort -u | \
awk '{print "github.com/hyperledger/fabric/"$1"..."}')
fi
#only run the test when test pkgs is not empty
if [[ ! -z "$TEST_PKGS" ]]; then
echo "Testing packages:"
echo $TEST_PKGS
# use go test -cover as this is much more efficient than gocov
time go test -cover -ldflags "$GO_LDFLAGS" $TEST_PKGS -p 1 -timeout=20m
else
echo "Nothing changed in unit test!!!"
fi
else
#check to see if TEST_PKGS is set else use default (all packages)
TEST_PKGS=${TEST_PKGS:-github.com/hyperledger/fabric/...}
echo -n "Obtaining list of tests to run for the following packages: ${TEST_PKGS}"
# Some examples don't play nice with `go test`
PKGS=`go list ${TEST_PKGS} 2> /dev/null | \
grep -v /vendor/ | \
grep -v /build/ | \
grep -v /bccsp/mocks | \
grep -v /bddtests | \
grep -v /orderer/mocks | \
grep -v /orderer/sample_clients | \
grep -v /common/mocks | \
grep -v /common/ledger/testutil | \
grep -v /core/mocks | \
grep -v /core/testutil | \
grep -v /core/ledger/testutil | \
grep -v /core/ledger/kvledger/example | \
grep -v /core/ledger/kvledger/marble_example | \
grep -v /core/deliverservice/mocks | \
grep -v /core/scc/samplesyscc | \
grep -v /test | \
grep -v /examples`
if [ x$ARCH == xppc64le -o x$ARCH == xs390x ]
then
PKGS=`echo $PKGS | sed 's@'github.com/hyperledger/fabric/core/chaincode/platforms/java/test'@@g'`
PKGS=`echo $PKGS | sed 's@'github.com/hyperledger/fabric/core/chaincode/platforms/java'@@g'`
fi
echo " DONE!"
echo "Running tests..."
#go test -cover -ldflags "$GO_LDFLAGS" $PKGS -p 1 -timeout=20m
gocov test -ldflags "$GO_LDFLAGS" $PKGS -p 1 -timeout=20m | gocov-xml > report.xml
fi
|
cophey/fabric
|
unit-test/run.sh
|
Shell
|
apache-2.0
| 3,465 |
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Common utilities for kube-up/kube-down
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd)
DEFAULT_KUBECONFIG="${HOME:-.}/.kube/config"
source "${KUBE_ROOT}/hack/lib/util.sh"
# KUBE_RELEASE_VERSION_REGEX matches things like "v1.2.3" or "v1.2.3-alpha.4"
#
# NOTE This must match the version_regex in build/common.sh
# kube::release::parse_and_validate_release_version()
KUBE_RELEASE_VERSION_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(-([a-zA-Z0-9]+)\\.(0|[1-9][0-9]*))?$"
KUBE_RELEASE_VERSION_DASHED_REGEX="v(0|[1-9][0-9]*)-(0|[1-9][0-9]*)-(0|[1-9][0-9]*)(-([a-zA-Z0-9]+)-(0|[1-9][0-9]*))?"
# KUBE_CI_VERSION_REGEX matches things like "v1.2.3-alpha.4.56+abcdefg" This
#
# NOTE This must match the version_regex in build/common.sh
# kube::release::parse_and_validate_ci_version()
KUBE_CI_VERSION_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)-([a-zA-Z0-9]+)\\.(0|[1-9][0-9]*)(\\.(0|[1-9][0-9]*)\\+[-0-9a-z]*)?$"
KUBE_CI_VERSION_DASHED_REGEX="^v(0|[1-9][0-9]*)-(0|[1-9][0-9]*)-(0|[1-9][0-9]*)-([a-zA-Z0-9]+)-(0|[1-9][0-9]*)(-(0|[1-9][0-9]*)\\+[-0-9a-z]*)?"
# Generate kubeconfig data for the created cluster.
# Assumed vars:
# KUBE_USER
# KUBE_PASSWORD
# KUBE_MASTER_IP
# KUBECONFIG
# CONTEXT
#
# If the apiserver supports bearer auth, also provide:
# KUBE_BEARER_TOKEN
#
# If the kubeconfig context being created should NOT be set as the current context
# SECONDARY_KUBECONFIG=true
#
# To explicitly name the context being created, use OVERRIDE_CONTEXT
#
# The following can be omitted for --insecure-skip-tls-verify
# KUBE_CERT
# KUBE_KEY
# CA_CERT
function create-kubeconfig() {
KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
SECONDARY_KUBECONFIG=${SECONDARY_KUBECONFIG:-}
OVERRIDE_CONTEXT=${OVERRIDE_CONTEXT:-}
if [[ "$OVERRIDE_CONTEXT" != "" ]];then
CONTEXT=$OVERRIDE_CONTEXT
fi
# KUBECONFIG determines the file we write to, but it may not exist yet
OLD_IFS=$IFS
IFS=':'
for cfg in ${KUBECONFIG} ; do
if [[ ! -e "${cfg}" ]]; then
mkdir -p "$(dirname "${cfg}")"
touch "${cfg}"
fi
done
IFS=$OLD_IFS
local cluster_args=(
"--server=${KUBE_SERVER:-https://${KUBE_MASTER_IP}}"
)
if [[ -z "${CA_CERT:-}" ]]; then
cluster_args+=("--insecure-skip-tls-verify=true")
else
cluster_args+=(
"--certificate-authority=${CA_CERT}"
"--embed-certs=true"
)
fi
local user_args=()
if [[ ! -z "${KUBE_BEARER_TOKEN:-}" ]]; then
user_args+=(
"--token=${KUBE_BEARER_TOKEN}"
)
elif [[ ! -z "${KUBE_USER:-}" && ! -z "${KUBE_PASSWORD:-}" ]]; then
user_args+=(
"--username=${KUBE_USER}"
"--password=${KUBE_PASSWORD}"
)
fi
if [[ ! -z "${KUBE_CERT:-}" && ! -z "${KUBE_KEY:-}" ]]; then
user_args+=(
"--client-certificate=${KUBE_CERT}"
"--client-key=${KUBE_KEY}"
"--embed-certs=true"
)
fi
KUBECONFIG="${KUBECONFIG}" "${kubectl}" config set-cluster "${CONTEXT}" "${cluster_args[@]}"
if [[ -n "${user_args[@]:-}" ]]; then
KUBECONFIG="${KUBECONFIG}" "${kubectl}" config set-credentials "${CONTEXT}" "${user_args[@]}"
fi
KUBECONFIG="${KUBECONFIG}" "${kubectl}" config set-context "${CONTEXT}" --cluster="${CONTEXT}" --user="${CONTEXT}"
if [[ "${SECONDARY_KUBECONFIG}" != "true" ]];then
KUBECONFIG="${KUBECONFIG}" "${kubectl}" config use-context "${CONTEXT}" --cluster="${CONTEXT}"
fi
# If we have a bearer token, also create a credential entry with basic auth
# so that it is easy to discover the basic auth password for your cluster
# to use in a web browser.
if [[ ! -z "${KUBE_BEARER_TOKEN:-}" && ! -z "${KUBE_USER:-}" && ! -z "${KUBE_PASSWORD:-}" ]]; then
KUBECONFIG="${KUBECONFIG}" "${kubectl}" config set-credentials "${CONTEXT}-basic-auth" "--username=${KUBE_USER}" "--password=${KUBE_PASSWORD}"
fi
echo "Wrote config for ${CONTEXT} to ${KUBECONFIG}"
}
# Clear kubeconfig data for a context
# Assumed vars:
# KUBECONFIG
# CONTEXT
#
# To explicitly name the context being removed, use OVERRIDE_CONTEXT
function clear-kubeconfig() {
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
OVERRIDE_CONTEXT=${OVERRIDE_CONTEXT:-}
if [[ "$OVERRIDE_CONTEXT" != "" ]];then
CONTEXT=$OVERRIDE_CONTEXT
fi
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
# Unset the current-context before we delete it, as otherwise kubectl errors.
local cc=$("${kubectl}" config view -o jsonpath='{.current-context}')
if [[ "${cc}" == "${CONTEXT}" ]]; then
"${kubectl}" config unset current-context
fi
"${kubectl}" config unset "clusters.${CONTEXT}"
"${kubectl}" config unset "users.${CONTEXT}"
"${kubectl}" config unset "users.${CONTEXT}-basic-auth"
"${kubectl}" config unset "contexts.${CONTEXT}"
echo "Cleared config for ${CONTEXT} from ${KUBECONFIG}"
}
# Gets username, password for the current-context in kubeconfig, if they exist.
# Assumed vars:
# KUBECONFIG # if unset, defaults to global
# KUBE_CONTEXT # if unset, defaults to current-context
#
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
#
# KUBE_USER,KUBE_PASSWORD will be empty if no current-context is set, or
# the current-context user does not exist or contain basicauth entries.
function get-kubeconfig-basicauth() {
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
local cc=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.current-context}")
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
cc="${KUBE_CONTEXT}"
fi
local user=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.contexts[?(@.name == \"${cc}\")].context.user}")
get-kubeconfig-user-basicauth "${user}"
if [[ -z "${KUBE_USER:-}" || -z "${KUBE_PASSWORD:-}" ]]; then
# kube-up stores username/password in a an additional kubeconfig section
# suffixed with "-basic-auth". Cloudproviders like GKE store in directly
# in the top level section along with the other credential information.
# TODO: Handle this uniformly, either get rid of "basic-auth" or
# consolidate its usage into a function across scripts in cluster/
get-kubeconfig-user-basicauth "${user}-basic-auth"
fi
}
# Sets KUBE_USER and KUBE_PASSWORD to the username and password specified in
# the kubeconfig section corresponding to $1.
#
# Args:
# $1 kubeconfig section to look for basic auth (eg: user or user-basic-auth).
# Assumed vars:
# KUBE_ROOT
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function get-kubeconfig-user-basicauth() {
KUBE_USER=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.users[?(@.name == \"$1\")].user.username}")
KUBE_PASSWORD=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.users[?(@.name == \"$1\")].user.password}")
}
# Generate basic auth user and password.
# Vars set:
# KUBE_USER
# KUBE_PASSWORD
function gen-kube-basicauth() {
KUBE_USER=admin
KUBE_PASSWORD=$(python -c 'import string,random; print("".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)))')
}
# Get the bearer token for the current-context in kubeconfig if one exists.
# Assumed vars:
# KUBECONFIG # if unset, defaults to global
# KUBE_CONTEXT # if unset, defaults to current-context
#
# Vars set:
# KUBE_BEARER_TOKEN
#
# KUBE_BEARER_TOKEN will be empty if no current-context is set, or the
# current-context user does not exist or contain a bearer token entry.
function get-kubeconfig-bearertoken() {
export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG}
local cc=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.current-context}")
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
cc="${KUBE_CONTEXT}"
fi
local user=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.contexts[?(@.name == \"${cc}\")].context.user}")
KUBE_BEARER_TOKEN=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o jsonpath="{.users[?(@.name == \"${user}\")].user.token}")
}
# Generate bearer token.
#
# Vars set:
# KUBE_BEARER_TOKEN
function gen-kube-bearertoken() {
KUBE_BEARER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
}
function load-or-gen-kube-basicauth() {
if [[ ! -z "${KUBE_CONTEXT:-}" ]]; then
get-kubeconfig-basicauth
fi
if [[ -z "${KUBE_USER:-}" || -z "${KUBE_PASSWORD:-}" ]]; then
gen-kube-basicauth
fi
# Make sure they don't contain any funny characters.
if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then
echo "Bad KUBE_USER string."
exit 1
fi
if ! [[ "${KUBE_PASSWORD}" =~ ^[-._@#%/a-zA-Z0-9]+$ ]]; then
echo "Bad KUBE_PASSWORD string."
exit 1
fi
}
# Sets KUBE_VERSION variable to the proper version number (e.g. "v1.0.6",
# "v1.2.0-alpha.1.881+376438b69c7612") or a version' publication of the form
# <path>/<version> (e.g. "release/stable",' "ci/latest-1").
#
# See the docs on getting builds for more information about version
# publication.
#
# Args:
# $1 version string from command line
# Vars set:
# KUBE_VERSION
function set_binary_version() {
if [[ "${1}" =~ "/" ]]; then
IFS='/' read -a path <<< "${1}"
if [[ "${path[0]}" == "release" ]]; then
KUBE_VERSION=$(gsutil cat "gs://kubernetes-release/${1}.txt")
else
KUBE_VERSION=$(gsutil cat "gs://kubernetes-release-dev/${1}.txt")
fi
else
KUBE_VERSION=${1}
fi
}
# Search for the specified tarball in the various known output locations,
# echoing the location if found.
#
# Assumed vars:
# KUBE_ROOT
#
# Args:
# $1 name of tarball to search for
function find-tar() {
local -r tarball=$1
locations=(
"${KUBE_ROOT}/node/${tarball}"
"${KUBE_ROOT}/server/${tarball}"
"${KUBE_ROOT}/_output/release-tars/${tarball}"
"${KUBE_ROOT}/bazel-bin/build/release-tars/${tarball}"
)
location=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
if [[ ! -f "${location}" ]]; then
echo "!!! Cannot find ${tarball}" >&2
exit 1
fi
echo "${location}"
}
# Verify and find the various tar files that we are going to use on the server.
#
# Assumed vars:
# KUBE_ROOT
# Vars set:
# NODE_BINARY_TAR
# SERVER_BINARY_TAR
# KUBE_MANIFESTS_TAR
function find-release-tars() {
SERVER_BINARY_TAR=$(find-tar kubernetes-server-linux-amd64.tar.gz)
if [[ "${NUM_WINDOWS_NODES}" -gt "0" ]]; then
NODE_BINARY_TAR=$(find-tar kubernetes-node-windows-amd64.tar.gz)
fi
# This tarball is used by GCI, Ubuntu Trusty, and Container Linux.
KUBE_MANIFESTS_TAR=
if [[ "${MASTER_OS_DISTRIBUTION:-}" == "trusty" || "${MASTER_OS_DISTRIBUTION:-}" == "gci" || "${MASTER_OS_DISTRIBUTION:-}" == "ubuntu" ]] || \
[[ "${NODE_OS_DISTRIBUTION:-}" == "trusty" || "${NODE_OS_DISTRIBUTION:-}" == "gci" || "${NODE_OS_DISTRIBUTION:-}" == "ubuntu" || "${NODE_OS_DISTRIBUTION:-}" == "custom" ]] ; then
KUBE_MANIFESTS_TAR=$(find-tar kubernetes-manifests.tar.gz)
fi
}
# Run the cfssl command to generates certificate files for etcd service, the
# certificate files will save in $1 directory.
#
# Optional vars:
# GEN_ETCD_CA_CERT (CA cert encode with base64 and ZIP compression)
# GEN_ETCD_CA_KEY (CA key encode with base64)
#
# If GEN_ETCD_CA_CERT or GEN_ETCD_CA_KEY is not specified, it will generates certs for CA.
#
# Args:
# $1 (the directory that certificate files to save)
# $2 (the ip of etcd member)
# $3 (the type of etcd certificates, must be one of client, server, peer)
# $4 (the prefix of the certificate filename, default is $3)
function generate-etcd-cert() {
local cert_dir=${1}
local member_ip=${2}
local type_cert=${3}
local prefix=${4:-"${type_cert}"}
local GEN_ETCD_CA_CERT=${GEN_ETCD_CA_CERT:-}
local GEN_ETCD_CA_KEY=${GEN_ETCD_CA_KEY:-}
mkdir -p "${cert_dir}"
pushd "${cert_dir}"
kube::util::ensure-cfssl .
if [ ! -r "ca-config.json" ]; then
cat >ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "43800h"
},
"profiles": {
"server": {
"expiry": "43800h",
"usages": [
"signing",
"key encipherment",
"server auth"
]
},
"client": {
"expiry": "43800h",
"usages": [
"signing",
"key encipherment",
"client auth"
]
},
"peer": {
"expiry": "43800h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
fi
if [ ! -r "ca-csr.json" ]; then
cat >ca-csr.json <<EOF
{
"CN": "Kubernetes",
"key": {
"algo": "ecdsa",
"size": 256
},
"names": [
{
"C": "US",
"L": "CA",
"O": "kubernetes.io"
}
]
}
EOF
fi
if [[ -n "${GEN_ETCD_CA_CERT}" && -n "${GEN_ETCD_CA_KEY}" ]]; then
echo "${ca_cert}" | base64 --decode | gunzip > ca.pem
echo "${ca_key}" | base64 --decode > ca-key.pem
fi
if [[ ! -r "ca.pem" || ! -r "ca-key.pem" ]]; then
${CFSSL_BIN} gencert -initca ca-csr.json | ${CFSSLJSON_BIN} -bare ca -
fi
case "${type_cert}" in
client)
echo "Generate client certificates..."
echo '{"CN":"client","hosts":["*"],"key":{"algo":"ecdsa","size":256}}' \
| ${CFSSL_BIN} gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client - \
| ${CFSSLJSON_BIN} -bare "${prefix}"
;;
server)
echo "Generate server certificates..."
echo '{"CN":"'${member_ip}'","hosts":[""],"key":{"algo":"ecdsa","size":256}}' \
| ${CFSSL_BIN} gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server -hostname="${member_ip},127.0.0.1" - \
| ${CFSSLJSON_BIN} -bare "${prefix}"
;;
peer)
echo "Generate peer certificates..."
echo '{"CN":"'${member_ip}'","hosts":[""],"key":{"algo":"ecdsa","size":256}}' \
| ${CFSSL_BIN} gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer -hostname="${member_ip},127.0.0.1" - \
| ${CFSSLJSON_BIN} -bare "${prefix}"
;;
*)
echo "Unknow, unsupported etcd certs type: ${type_cert}" >&2
echo "Supported type: client, server, peer" >&2
exit 2
esac
popd
}
# Check whether required binaries exist, prompting to download
# if missing.
# If KUBERNETES_SKIP_CONFIRM is set to y, we'll automatically download binaries
# without prompting.
function verify-kube-binaries() {
if ! "${KUBE_ROOT}/cluster/kubectl.sh" version --client >&/dev/null; then
echo "!!! kubectl appears to be broken or missing"
download-release-binaries
fi
}
# Check whether required release artifacts exist, prompting to download
# if missing.
# If KUBERNETES_SKIP_CONFIRM is set to y, we'll automatically download binaries
# without prompting.
function verify-release-tars() {
if ! $(find-release-tars); then
download-release-binaries
fi
}
# Download release artifacts.
function download-release-binaries() {
get_binaries_script="${KUBE_ROOT}/cluster/get-kube-binaries.sh"
local resp="y"
if [[ ! "${KUBERNETES_SKIP_CONFIRM:-n}" =~ ^[yY]$ ]]; then
echo "Required release artifacts appear to be missing. Do you wish to download them? [Y/n]"
read resp
fi
if [[ "${resp}" =~ ^[nN]$ ]]; then
echo "You must download release artifacts to continue. You can use "
echo " ${get_binaries_script}"
echo "to do this for your automatically."
exit 1
fi
"${get_binaries_script}"
}
# Run pushd without stack output
function pushd() {
command pushd $@ > /dev/null
}
# Run popd without stack output
function popd() {
command popd $@ > /dev/null
}
|
quinton-hoole/kubernetes
|
cluster/common.sh
|
Shell
|
apache-2.0
| 16,500 |
/drd/software/int/bin/launcher.sh -p hf2 -d rnd --launchBlocking bach -o EPA_CMDLINE python2.5 --arg data_export/auto_import.py --arg /drd/jobs/hf2/global/art/Textures/grass/ --arg auto-import -x /drd/users/barry.robison/drd/core/launcher_xml/trunk
|
barryrobison/arsenalsuite
|
cpp/apps/bach/plugins/auto_import_cron.sh
|
Shell
|
gpl-2.0
| 251 |
MACHINE=
SCRIPT_NAME=elf
OUTPUT_FORMAT="elf32-rx-linux"
# See also `include/elf/rx.h'
TEXT_START_ADDR=0x10000000
ARCH=rx
ENTRY=start
EMBEDDED=yes
TEMPLATE_NAME=elf
EXTRA_EM_FILE=rxlinux
ELFSIZE=32
MAXPAGESIZE=256
STACK_ADDR="(DEFINED(__stack) ? __stack : 0xbffffffc)"
STACK_SENTINEL="LONG(0xdeaddead)"
# We do not need .stack for shared library.
test -n "$CREATE_SHLIB" && unset STACK_ADDR
|
mattstock/binutils-bexkat1
|
ld/emulparams/elf32rx_linux.sh
|
Shell
|
gpl-2.0
| 391 |
version=1.60.0
src_url=http://sourceforge.net/projects/boost/files/boost/${version}/boost_${version//./_}.tar.bz2
src_url_sha1=7f56ab507d3258610391b47fef6b11635861175a
pkg_install-include () {
mkdir -p "$install_dir/include/boost"
cp -a "$src_dir/boost/." "$install_dir/include/boost"
}
pkg_install () {
pkg_copy_src_to_build
in_dir "$build_dir" ./bootstrap.sh --with-libraries=system
in_dir "$build_dir" ./b2
mkdir -p "$install_dir/lib"
in_dir "$build_dir" cp "stage/lib/libboost_system.a" "$install_dir/lib"
}
pkg_install-windows () {
: # Include files only
}
|
mbroadst/rethinkdb
|
mk/support/pkg/boost.sh
|
Shell
|
agpl-3.0
| 599 |
#!/bin/bash
set -e
source "`dirname $0`/includes.sh"
OFN_COMMIT=$(get_ofn_commit)
if [ "$OFN_COMMIT" = 'OFN_COMMIT_NOT_FOUND' ]; then
OFN_COMMIT=$(git rev-parse $BUILDKITE_COMMIT)
fi
echo "--- Checking environment variables"
require_env_vars OFN_COMMIT STAGING_SSH_HOST STAGING_CURRENT_PATH STAGING_SERVICE STAGING_DB_HOST STAGING_DB_USER STAGING_DB PRODUCTION_REMOTE
echo "--- Saving baseline data for staging"
VARS="CURRENT_PATH='$STAGING_CURRENT_PATH' SERVICE='$STAGING_SERVICE' DB_HOST='$STAGING_DB_HOST' DB_USER='$STAGING_DB_USER' DB='$STAGING_DB'"
ssh "$STAGING_SSH_HOST" "$VARS $STAGING_CURRENT_PATH/script/ci/save_staging_baseline.sh $OFN_COMMIT"
echo "--- Pushing to production"
exec 5>&1
OUTPUT=$(git push "$PRODUCTION_REMOTE" "$OFN_COMMIT":master --force 2>&1 |tee /dev/fd/5)
[[ $OUTPUT =~ "Done" ]]
|
RohanM/openfoodnetwork
|
script/ci/push_to_production.sh
|
Shell
|
agpl-3.0
| 818 |
#!/bin/sh
# Package
PACKAGE="python"
DNAME="Python"
# Others
INSTALL_DIR="/usr/local/${PACKAGE}"
PATH="${INSTALL_DIR}/bin:${PATH}"
preinst ()
{
exit 0
}
postinst ()
{
# Link
ln -s ${SYNOPKG_PKGDEST} ${INSTALL_DIR}
# Install busybox stuff
${INSTALL_DIR}/bin/busybox --install ${INSTALL_DIR}/bin
# Install the wheels
${INSTALL_DIR}/bin/pip install --no-deps --no-index -I -f ${INSTALL_DIR}/share/wheelhouse -r ${INSTALL_DIR}/share/wheelhouse/requirements.txt > /dev/null 2>&1
# Log installation informations
${INSTALL_DIR}/bin/python --version > ${INSTALL_DIR}/install.log 2>&1
echo "" >> ${INSTALL_DIR}/install.log
echo "System installed modules:" >> ${INSTALL_DIR}/install.log
${INSTALL_DIR}/bin/pip freeze >> ${INSTALL_DIR}/install.log
# Byte-compile in background
${INSTALL_DIR}/bin/python -m compileall -q -f ${INSTALL_DIR}/lib/python2.7 > /dev/null &
${INSTALL_DIR}/bin/python -OO -m compileall -q -f ${INSTALL_DIR}/lib/python2.7 > /dev/null &
exit 0
}
preuninst ()
{
exit 0
}
postuninst ()
{
# Remove link
rm -f ${INSTALL_DIR}
exit 0
}
preupgrade ()
{
exit 0
}
postupgrade ()
{
exit 0
}
|
GaetanCambier/spksrc
|
spk/python/src/installer.sh
|
Shell
|
bsd-3-clause
| 1,196 |
function gi() { curl -fL https://www.gitignore.io/api/${(j:,:)@} }
_gitignoreio_get_command_list() {
curl -fL https://www.gitignore.io/api/list | tr "," "\n"
}
_gitignoreio () {
compset -P '*,'
compadd -S '' `_gitignoreio_get_command_list`
}
compdef _gitignoreio gi
|
ccollins/dotfiles
|
zsh/oh-my-zsh.symlink/plugins/gitignore/gitignore.plugin.zsh
|
Shell
|
mit
| 275 |
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kube::util::sortable_date() {
date "+%Y%m%d-%H%M%S"
}
# arguments: target, item1, item2, item3, ...
# returns 0 if target is in the given items, 1 otherwise.
kube::util::array_contains() {
local search="$1"
local element
shift
for element; do
if [[ "${element}" == "${search}" ]]; then
return 0
fi
done
return 1
}
kube::util::wait_for_url() {
local url=$1
local prefix=${2:-}
local wait=${3:-1}
local times=${4:-30}
local maxtime=${5:-1}
which curl >/dev/null || {
kube::log::usage "curl must be installed"
exit 1
}
local i
for i in $(seq 1 "$times"); do
local out
if out=$(curl --max-time "$maxtime" -gkfs "$url" 2>/dev/null); then
kube::log::status "On try ${i}, ${prefix}: ${out}"
return 0
fi
sleep "${wait}"
done
kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each"
return 1
}
# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG
# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
kube::util::trap_add() {
local trap_add_cmd
trap_add_cmd=$1
shift
for trap_add_name in "$@"; do
local existing_cmd
local new_cmd
# Grab the currently defined trap commands for this trap
existing_cmd=`trap -p "${trap_add_name}" | awk -F"'" '{print $2}'`
if [[ -z "${existing_cmd}" ]]; then
new_cmd="${trap_add_cmd}"
else
new_cmd="${trap_add_cmd};${existing_cmd}"
fi
# Assign the test
trap "${new_cmd}" "${trap_add_name}"
done
}
# Opposite of kube::util::ensure-temp-dir()
kube::util::cleanup-temp-dir() {
rm -rf "${KUBE_TEMP}"
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
kube::util::ensure-temp-dir() {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX)
kube::util::trap_add kube::util::cleanup-temp-dir EXIT
fi
}
# This figures out the host platform without relying on golang. We need this as
# we don't want a golang install to be a prerequisite to building yet we need
# this info to figure out where the final binaries are placed.
kube::util::host_platform() {
local host_os
local host_arch
case "$(uname -s)" in
Darwin)
host_os=darwin
;;
Linux)
host_os=linux
;;
*)
kube::log::error "Unsupported host OS. Must be Linux or Mac OS X."
exit 1
;;
esac
case "$(uname -m)" in
x86_64*)
host_arch=amd64
;;
i?86_64*)
host_arch=amd64
;;
amd64*)
host_arch=amd64
;;
aarch64*)
host_arch=arm64
;;
arm64*)
host_arch=arm64
;;
arm*)
host_arch=arm
;;
i?86*)
host_arch=x86
;;
s390x*)
host_arch=s390x
;;
ppc64le*)
host_arch=ppc64le
;;
*)
kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le."
exit 1
;;
esac
echo "${host_os}/${host_arch}"
}
kube::util::find-binary-for-platform() {
local -r lookfor="$1"
local -r platform="$2"
local locations=(
"${KUBE_ROOT}/_output/bin/${lookfor}"
"${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}"
"${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}"
"${KUBE_ROOT}/platforms/${platform}/${lookfor}"
)
# Also search for binary in bazel build tree.
# The bazel go rules place binaries in subtrees like
# "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure
# the platform name is matched in the path.
locations+=($(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \
-path "*/${platform/\//_}*/${lookfor}" 2>/dev/null || true) )
# List most recently-updated location.
local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
echo -n "${bin}"
}
kube::util::find-binary() {
kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)"
}
# Run all known doc generators (today gendocs and genman for kubectl)
# $1 is the directory to put those generated documents
kube::util::gen-docs() {
local dest="$1"
# Find binary
gendocs=$(kube::util::find-binary "gendocs")
genkubedocs=$(kube::util::find-binary "genkubedocs")
genman=$(kube::util::find-binary "genman")
genyaml=$(kube::util::find-binary "genyaml")
genfeddocs=$(kube::util::find-binary "genfeddocs")
mkdir -p "${dest}/docs/user-guide/kubectl/"
"${gendocs}" "${dest}/docs/user-guide/kubectl/"
mkdir -p "${dest}/docs/admin/"
"${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver"
"${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager"
"${genkubedocs}" "${dest}/docs/admin/" "cloud-controller-manager"
"${genkubedocs}" "${dest}/docs/admin/" "kube-proxy"
"${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler"
"${genkubedocs}" "${dest}/docs/admin/" "kubelet"
"${genkubedocs}" "${dest}/docs/admin/" "kubeadm"
mkdir -p "${dest}/docs/man/man1/"
"${genman}" "${dest}/docs/man/man1/" "kube-apiserver"
"${genman}" "${dest}/docs/man/man1/" "kube-controller-manager"
"${genman}" "${dest}/docs/man/man1/" "cloud-controller-manager"
"${genman}" "${dest}/docs/man/man1/" "kube-proxy"
"${genman}" "${dest}/docs/man/man1/" "kube-scheduler"
"${genman}" "${dest}/docs/man/man1/" "kubelet"
"${genman}" "${dest}/docs/man/man1/" "kubectl"
"${genman}" "${dest}/docs/man/man1/" "kubeadm"
mkdir -p "${dest}/docs/yaml/kubectl/"
"${genyaml}" "${dest}/docs/yaml/kubectl/"
# create the list of generated files
pushd "${dest}" > /dev/null
touch docs/.generated_docs
find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs
popd > /dev/null
}
# Puts a placeholder for every generated doc. This makes the link checker work.
kube::util::set-placeholder-gen-docs() {
local list_file="${KUBE_ROOT}/docs/.generated_docs"
if [[ -e "${list_file}" ]]; then
# remove all of the old docs; we don't want to check them in.
while read file; do
if [[ "${list_file}" != "${KUBE_ROOT}/${file}" ]]; then
cp "${KUBE_ROOT}/hack/autogenerated_placeholder.txt" "${KUBE_ROOT}/${file}"
fi
done <"${list_file}"
# The docs/.generated_docs file lists itself, so we don't need to explicitly
# delete it.
fi
}
# Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT
# must be set.
kube::util::remove-gen-docs() {
if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then
# remove all of the old docs; we don't want to check them in.
while read file; do
rm "${KUBE_ROOT}/${file}" 2>/dev/null || true
done <"${KUBE_ROOT}/docs/.generated_docs"
# The docs/.generated_docs file lists itself, so we don't need to explicitly
# delete it.
fi
}
# Takes a group/version and returns the path to its location on disk, sans
# "pkg". E.g.:
# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1
# * default behavior for only a group: experimental -> apis/experimental
# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned
# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1
# * Very special handling for when both group and version are "": / -> api
kube::util::group-version-to-pkg-path() {
staging_apis=(
$(
cd "${KUBE_ROOT}/staging/src/k8s.io/api" &&
find . -name types.go -exec dirname {} \; | sed "s|\./||g" | sort
))
local group_version="$1"
if [[ " ${staging_apis[@]} " =~ " ${group_version/.*k8s.io/} " ]]; then
echo "vendor/k8s.io/api/${group_version/.*k8s.io/}"
return
fi
# "v1" is the API GroupVersion
if [[ "${group_version}" == "v1" ]]; then
echo "vendor/k8s.io/api/core/v1"
return
fi
# Special cases first.
# TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
# moving the results to pkg/apis/api.
case "${group_version}" in
# both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API.
__internal)
echo "pkg/apis/core"
;;
meta/v1)
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1"
;;
meta/v1beta1)
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1"
;;
*.k8s.io)
echo "pkg/apis/${group_version%.*k8s.io}"
;;
*.k8s.io/*)
echo "pkg/apis/${group_version/.*k8s.io/}"
;;
*)
echo "pkg/apis/${group_version%__internal}"
;;
esac
}
# Takes a group/version and returns the swagger-spec file name.
# default behavior: extensions/v1beta1 -> extensions_v1beta1
# special case for v1: v1 -> v1
kube::util::gv-to-swagger-name() {
local group_version="$1"
case "${group_version}" in
v1)
echo "v1"
;;
*)
echo "${group_version%/*}_${group_version#*/}"
;;
esac
}
# Fetches swagger spec from apiserver.
# Assumed vars:
# SWAGGER_API_PATH: Base path for swaggerapi on apiserver. Ex:
# http://localhost:8080/swaggerapi.
# SWAGGER_ROOT_DIR: Root dir where we want to save the fetched spec.
# VERSIONS: Array of group versions to include in swagger spec.
kube::util::fetch-swagger-spec() {
for ver in ${VERSIONS}; do
if [[ " ${KUBE_NONSERVER_GROUP_VERSIONS} " == *" ${ver} "* ]]; then
continue
fi
# fetch the swagger spec for each group version.
if [[ ${ver} == "v1" ]]; then
SUBPATH="api"
else
SUBPATH="apis"
fi
SUBPATH="${SUBPATH}/${ver}"
SWAGGER_JSON_NAME="$(kube::util::gv-to-swagger-name ${ver}).json"
curl -w "\n" -fs "${SWAGGER_API_PATH}${SUBPATH}" > "${SWAGGER_ROOT_DIR}/${SWAGGER_JSON_NAME}"
# fetch the swagger spec for the discovery mechanism at group level.
if [[ ${ver} == "v1" ]]; then
continue
fi
SUBPATH="apis/"${ver%/*}
SWAGGER_JSON_NAME="${ver%/*}.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}${SUBPATH}" > "${SWAGGER_ROOT_DIR}/${SWAGGER_JSON_NAME}"
done
# fetch swagger specs for other discovery mechanism.
curl -w "\n" -fs "${SWAGGER_API_PATH}" > "${SWAGGER_ROOT_DIR}/resourceListing.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}version" > "${SWAGGER_ROOT_DIR}/version.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}api" > "${SWAGGER_ROOT_DIR}/api.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}apis" > "${SWAGGER_ROOT_DIR}/apis.json"
curl -w "\n" -fs "${SWAGGER_API_PATH}logs" > "${SWAGGER_ROOT_DIR}/logs.json"
}
# Returns the name of the upstream remote repository name for the local git
# repo, e.g. "upstream" or "origin".
kube::util::git_upstream_remote_name() {
git remote -v | grep fetch |\
grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\
head -n 1 | awk '{print $1}'
}
# Ensures the current directory is a git tree for doing things like restoring or
# validating godeps
kube::util::create-fake-git-tree() {
local -r target_dir=${1:-$(pwd)}
pushd "${target_dir}" >/dev/null
git init >/dev/null
git config --local user.email "[email protected]"
git config --local user.name "$0"
git add . >/dev/null
git commit -q -m "Snapshot" >/dev/null
if (( ${KUBE_VERBOSE:-5} >= 6 )); then
kube::log::status "${target_dir} is now a git tree."
fi
popd >/dev/null
}
# Checks whether godep restore was run in the current GOPATH, i.e. that all referenced repos exist
# and are checked out to the referenced rev.
kube::util::godep_restored() {
local -r godeps_json=${1:-Godeps/Godeps.json}
local -r gopath=${2:-${GOPATH%:*}}
if ! which jq &>/dev/null; then
echo "jq not found. Please install." 1>&2
return 1
fi
local root
local old_rev=""
while read path rev; do
rev=$(echo "${rev}" | sed "s/['\"]//g") # remove quotes which are around revs sometimes
if [[ "${rev}" == "${old_rev}" ]] && [[ "${path}" == "${root}"* ]]; then
# avoid checking the same git/hg root again
continue
fi
root="${path}"
while [ "${root}" != "." -a ! -d "${gopath}/src/${root}/.git" -a ! -d "${gopath}/src/${root}/.hg" ]; do
root=$(dirname "${root}")
done
if [ "${root}" == "." ]; then
echo "No checkout of ${path} found in GOPATH \"${gopath}\"." 1>&2
return 1
fi
local head
if [ -d "${gopath}/src/${root}/.git" ]; then
head="$(cd "${gopath}/src/${root}" && git rev-parse HEAD)"
else
head="$(cd "${gopath}/src/${root}" && hg parent --template '{node}')"
fi
if [ "${head}" != "${rev}" ]; then
echo "Unexpected HEAD '${head}' at ${gopath}/src/${root}, expected '${rev}'." 1>&2
return 1
fi
old_rev="${rev}"
done < <(jq '.Deps|.[]|.ImportPath + " " + .Rev' -r < "${godeps_json}")
return 0
}
# Exits script if working directory is dirty. If it's run interactively in the terminal
# the user can commit changes in a second terminal. This script will wait.
kube::util::ensure_clean_working_dir() {
while ! git diff HEAD --exit-code &>/dev/null; do
echo -e "\nUnexpected dirty working directory:\n"
if tty -s; then
git status -s
else
git diff -a # be more verbose in log files without tty
exit 1
fi | sed 's/^/ /'
echo -e "\nCommit your changes in another terminal and then continue here by pressing enter."
read
done 1>&2
}
# Ensure that the given godep version is installed and in the path. Almost
# nobody should use any version but the default.
kube::util::ensure_godep_version() {
GODEP_VERSION=${1:-"v80"} # this version is known to work
if [[ "$(godep version 2>/dev/null)" == *"godep ${GODEP_VERSION}"* ]]; then
return
fi
kube::log::status "Installing godep version ${GODEP_VERSION}"
go install k8s.io/kubernetes/vendor/github.com/tools/godep/
if ! which godep >/dev/null 2>&1; then
kube::log::error "Can't find godep - is your GOPATH 'bin' in your PATH?"
kube::log::error " GOPATH: ${GOPATH}"
kube::log::error " PATH: ${PATH}"
return 1
fi
if [[ "$(godep version 2>/dev/null)" != *"godep ${GODEP_VERSION}"* ]]; then
kube::log::error "Wrong godep version - is your GOPATH 'bin' in your PATH?"
kube::log::error " expected: godep ${GODEP_VERSION}"
kube::log::error " got: $(godep version)"
kube::log::error " GOPATH: ${GOPATH}"
kube::log::error " PATH: ${PATH}"
return 1
fi
}
# Ensure that none of the staging repos is checked out in the GOPATH because this
# easily confused godep.
kube::util::ensure_no_staging_repos_in_gopath() {
kube::util::ensure_single_dir_gopath
local error=0
for repo_file in "${KUBE_ROOT}"/staging/src/k8s.io/*; do
if [[ ! -d "$repo_file" ]]; then
# not a directory or there were no files
continue;
fi
repo="$(basename "$repo_file")"
if [ -e "${GOPATH}/src/k8s.io/${repo}" ]; then
echo "k8s.io/${repo} exists in GOPATH. Remove before running godep-save.sh." 1>&2
error=1
fi
done
if [ "${error}" = "1" ]; then
exit 1
fi
}
# Checks that the GOPATH is simple, i.e. consists only of one directory, not multiple.
kube::util::ensure_single_dir_gopath() {
if [[ "${GOPATH}" == *:* ]]; then
echo "GOPATH must consist of a single directory." 1>&2
exit 1
fi
}
# Checks whether there are any files matching pattern $2 changed between the
# current branch and upstream branch named by $1.
# Returns 1 (false) if there are no changes, 0 (true) if there are changes
# detected.
kube::util::has_changes_against_upstream_branch() {
local -r git_branch=$1
local -r pattern=$2
local -r not_pattern=${3:-totallyimpossiblepattern}
local full_branch
full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}"
echo "Checking for '${pattern}' changes against '${full_branch}'"
# make sure the branch is valid, otherwise the check will pass erroneously.
if ! git describe "${full_branch}" >/dev/null; then
# abort!
exit 1
fi
# notice this uses ... to find the first shared ancestor
if git diff --name-only "${full_branch}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
return 0
fi
# also check for pending changes
if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
echo "Detected '${pattern}' uncommitted changes."
return 0
fi
echo "No '${pattern}' changes detected."
return 1
}
kube::util::download_file() {
local -r url=$1
local -r destination_file=$2
rm ${destination_file} 2&> /dev/null || true
for i in $(seq 5)
do
if ! curl -fsSL --retry 3 --keepalive-time 2 ${url} -o ${destination_file}; then
echo "Downloading ${url} failed. $((5-i)) retries left."
sleep 1
else
echo "Downloading ${url} succeed"
return 0
fi
done
return 1
}
# Test whether openssl is installed.
# Sets:
# OPENSSL_BIN: The path to the openssl binary to use
function kube::util::test_openssl_installed {
openssl version >& /dev/null
if [ "$?" != "0" ]; then
echo "Failed to run openssl. Please ensure openssl is installed"
exit 1
fi
OPENSSL_BIN=$(command -v openssl)
}
# creates a client CA, args are sudo, dest-dir, ca-id, purpose
# purpose is dropped in after "key encipherment", you usually want
# '"client auth"'
# '"server auth"'
# '"client auth","server auth"'
function kube::util::create_signing_certkey {
local sudo=$1
local dest_dir=$2
local id=$3
local purpose=$4
# Create client ca
${sudo} /usr/bin/env bash -e <<EOF
rm -f "${dest_dir}/${id}-ca.crt" "${dest_dir}/${id}-ca.key"
${OPENSSL_BIN} req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout "${dest_dir}/${id}-ca.key" -out "${dest_dir}/${id}-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment",${purpose}]}}}' > "${dest_dir}/${id}-ca-config.json"
EOF
}
# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups...
function kube::util::create_client_certkey {
local sudo=$1
local dest_dir=$2
local ca=$3
local id=$4
local cn=${5:-$4}
local groups=""
local SEP=""
shift 5
while [ -n "${1:-}" ]; do
groups+="${SEP}{\"O\":\"$1\"}"
SEP=","
shift 1
done
${sudo} /usr/bin/env bash -e <<EOF
cd ${dest_dir}
echo '{"CN":"${cn}","names":[${groups}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare client-${id}
mv "client-${id}-key.pem" "client-${id}.key"
mv "client-${id}.pem" "client-${id}.crt"
rm -f "client-${id}.csr"
EOF
}
# signs a serving certificate: args are sudo, dest-dir, ca, filename (roughly), subject, hosts...
function kube::util::create_serving_certkey {
local sudo=$1
local dest_dir=$2
local ca=$3
local id=$4
local cn=${5:-$4}
local hosts=""
local SEP=""
shift 5
while [ -n "${1:-}" ]; do
hosts+="${SEP}\"$1\""
SEP=","
shift 1
done
${sudo} /usr/bin/env bash -e <<EOF
cd ${dest_dir}
echo '{"CN":"${cn}","hosts":[${hosts}],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare serving-${id}
mv "serving-${id}-key.pem" "serving-${id}.key"
mv "serving-${id}.pem" "serving-${id}.crt"
rm -f "serving-${id}.csr"
EOF
}
# creates a self-contained kubeconfig: args are sudo, dest-dir, ca file, host, port, client id, token(optional)
function kube::util::write_client_kubeconfig {
local sudo=$1
local dest_dir=$2
local ca_file=$3
local api_host=$4
local api_port=$5
local client_id=$6
local token=${7:-}
cat <<EOF | ${sudo} tee "${dest_dir}"/${client_id}.kubeconfig > /dev/null
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: ${ca_file}
server: https://${api_host}:${api_port}/
name: local-up-cluster
users:
- user:
token: ${token}
client-certificate: ${dest_dir}/client-${client_id}.crt
client-key: ${dest_dir}/client-${client_id}.key
name: local-up-cluster
contexts:
- context:
cluster: local-up-cluster
user: local-up-cluster
name: local-up-cluster
current-context: local-up-cluster
EOF
# flatten the kubeconfig files to make them self contained
username=$(whoami)
${sudo} /usr/bin/env bash -e <<EOF
$(kube::util::find-binary kubectl) --kubeconfig="${dest_dir}/${client_id}.kubeconfig" config view --minify --flatten > "/tmp/${client_id}.kubeconfig"
mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig"
chown ${username} "${dest_dir}/${client_id}.kubeconfig"
EOF
}
# Determines if docker can be run, failures may simply require that the user be added to the docker group.
function kube::util::ensure_docker_daemon_connectivity {
DOCKER=(docker ${DOCKER_OPTS})
if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then
cat <<'EOF' >&2
Can't connect to 'docker' daemon. please fix and retry.
Possible causes:
- Docker Daemon not started
- Linux: confirm via your init system
- macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start <name>`
- macOS w/ Docker for Mac: Check the menu bar and start the Docker application
- DOCKER_HOST hasn't been set or is set incorrectly
- Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
- macOS w/ docker-machine: run `eval "$(docker-machine env <name>)"`
- macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
- Other things to check:
- Linux: User isn't in 'docker' group. Add and relogin.
- Something like 'sudo usermod -a -G docker ${USER}'
- RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8
EOF
return 1
fi
}
# Wait for background jobs to finish. Return with
# an error status if any of the jobs failed.
kube::util::wait-for-jobs() {
local fail=0
local job
for job in $(jobs -p); do
wait "${job}" || fail=$((fail + 1))
done
return ${fail}
}
# kube::util::join <delim> <list...>
# Concatenates the list elements with the delimiter passed as first parameter
#
# Ex: kube::util::join , a b c
# -> a,b,c
function kube::util::join {
local IFS="$1"
shift
echo "$*"
}
# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH
#
# Assumed vars:
# $1 (cfssl directory) (optional)
#
# Sets:
# CFSSL_BIN: The path of the installed cfssl binary
# CFSSLJSON_BIN: The path of the installed cfssljson binary
#
function kube::util::ensure-cfssl {
if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then
CFSSL_BIN=$(command -v cfssl)
CFSSLJSON_BIN=$(command -v cfssljson)
return 0
fi
# Create a temp dir for cfssl if no directory was given
local cfssldir=${1:-}
if [[ -z "${cfssldir}" ]]; then
kube::util::ensure-temp-dir
cfssldir="${KUBE_TEMP}/cfssl"
fi
mkdir -p "${cfssldir}"
pushd "${cfssldir}" > /dev/null
echo "Unable to successfully run 'cfssl' from $PATH; downloading instead..."
kernel=$(uname -s)
case "${kernel}" in
Linux)
curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
;;
Darwin)
curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64
curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64
;;
*)
echo "Unknown, unsupported platform: ${kernel}." >&2
echo "Supported platforms: Linux, Darwin." >&2
exit 2
esac
chmod +x cfssl || true
chmod +x cfssljson || true
CFSSL_BIN="${cfssldir}/cfssl"
CFSSLJSON_BIN="${cfssldir}/cfssljson"
if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then
echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH."
echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
exit 1
fi
popd > /dev/null
}
# kube::util::ensure_dockerized
# Confirms that the script is being run inside a kube-build image
#
function kube::util::ensure_dockerized {
if [[ -f /kube-build-image ]]; then
return 0
else
echo "ERROR: This script is designed to be run inside a kube-build container"
exit 1
fi
}
# kube::util::ensure-gnu-sed
# Determines which sed binary is gnu-sed on linux/darwin
#
# Sets:
# SED: The name of the gnu-sed binary
#
function kube::util::ensure-gnu-sed {
if LANG=C sed --help 2>&1 | grep -q GNU; then
SED="sed"
elif which gsed &>/dev/null; then
SED="gsed"
else
kube::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2
return 1
fi
}
# Some useful colors.
if [[ -z "${color_start-}" ]]; then
declare -r color_start="\033["
declare -r color_red="${color_start}0;31m"
declare -r color_yellow="${color_start}0;33m"
declare -r color_green="${color_start}0;32m"
declare -r color_blue="${color_start}1;34m"
declare -r color_cyan="${color_start}1;36m"
declare -r color_norm="${color_start}0m"
fi
# ex: ts=2 sw=2 et filetype=sh
|
linzhaoming/origin
|
vendor/k8s.io/kubernetes/hack/lib/util.sh
|
Shell
|
apache-2.0
| 25,968 |
legacy_sdcard_check_image() {
local file="$1"
local diskdev partdev diff
export_bootdevice && export_partdevice diskdev 0 || {
v "Unable to determine upgrade device"
return 1
}
get_partitions "/dev/$diskdev" bootdisk
v "Extract boot sector from the image"
get_image_dd "$1" of=/tmp/image.bs count=1 bs=512b
get_partitions /tmp/image.bs image
#compare tables
diff="$(grep -F -x -v -f /tmp/partmap.bootdisk /tmp/partmap.image)"
rm -f /tmp/image.bs /tmp/partmap.bootdisk /tmp/partmap.image
if [ -n "$diff" ]; then
v "Partition layout has changed. Full image will be written."
ask_bool 0 "Abort" && exit 1
return 0
fi
}
legacy_sdcard_do_upgrade() {
local board=$(board_name)
local diskdev partdev diff
export_bootdevice && export_partdevice diskdev 0 || {
v "Unable to determine upgrade device"
return 1
}
sync
if [ "$UPGRADE_OPT_SAVE_PARTITIONS" = "1" ]; then
get_partitions "/dev/$diskdev" bootdisk
v "Extract boot sector from the image"
get_image_dd "$1" of=/tmp/image.bs count=1 bs=512b
get_partitions /tmp/image.bs image
#compare tables
diff="$(grep -F -x -v -f /tmp/partmap.bootdisk /tmp/partmap.image)"
else
diff=1
fi
if [ -n "$diff" ]; then
get_image_dd "$1" of="/dev/$diskdev" bs=4096 conv=fsync
# Separate removal and addtion is necessary; otherwise, partition 1
# will be missing if it overlaps with the old partition 2
partx -d - "/dev/$diskdev"
partx -a - "/dev/$diskdev"
else
v "Writing bootloader to /dev/$diskdev"
get_image_dd "$1" of="$diskdev" bs=512 skip=1 seek=1 count=2048 conv=fsync
#iterate over each partition from the image and write it to the boot disk
while read part start size; do
if export_partdevice partdev $part; then
v "Writing image to /dev/$partdev..."
get_image_dd "$1" of="/dev/$partdev" ibs="512" obs=1M skip="$start" count="$size" conv=fsync
else
v "Unable to find partition $part device, skipped."
fi
done < /tmp/partmap.image
v "Writing new UUID to /dev/$diskdev..."
get_image_dd "$1" of="/dev/$diskdev" bs=1 skip=440 count=4 seek=440 conv=fsync
fi
sleep 1
}
legacy_sdcard_copy_config() {
local partdev
if export_partdevice partdev 1; then
mkdir -p /boot
[ -f /boot/kernel.img ] || mount -o rw,noatime /dev/$partdev /boot
cp -af "$UPGRADE_BACKUP" "/boot/$BACKUP_FILE"
sync
umount /boot
fi
}
|
the2masters/openwrt
|
package/base-files/files/lib/upgrade/legacy-sdcard.sh
|
Shell
|
gpl-2.0
| 2,358 |
#!/bin/zsh
SALT=`date +%N`
if [[ ARGC -gt 0 ]] then
BINNAME=`basename $PWD`
foreach USER ($@)
mkdir -p obj/$USER
AA=`echo $USER $SALT $BINNAME | sha512sum | base64 | head -1 | cut -c 1-8`
cat program.c.template | sed s/AAAAAA/$AA/ >! program.c
gcc -o obj/$USER/$BINNAME program.c
end
else
echo "USAGE: build.zsh <user_email(s)>"
fi
|
cliffe/SecGen
|
modules/utilities/unix/ctf/metactf/files/repository/src_sse/Ch3.6-3.7/Ch3_07_SegvBacktrace/build.zsh
|
Shell
|
gpl-3.0
| 356 |
#!/bin/sh
# run this after running auction.exp, to check the export output files.
! grep -v '^"[0-9]*","[0-9]*","[0-9]*","0","1","1","[0-9]*","[0-9]","-*[0-9]*","[0-9]*\-[0-9]*\-[0-9]* [012][0-9]:[0-5][0-9]:[0-5][0-9].[0-9]*","[0-9]*.[0-9]*"$' *EXPORTDEMO*BID*.csv
|
wolffcm/voltdb
|
tools/kit_tools/auction.sh
|
Shell
|
agpl-3.0
| 265 |
#!/bin/bash
# load java environment variables
source $IROOT/java7.installed
sed -i 's|mysql://.*:3306|mysql://'"${DBHOST}"':3306|g' src/main/java/conf/application.conf
mvn clean compile assembly:single
java -Dninja.port=8080 -jar target/ninja-standalone-0.0.1-SNAPSHOT-jar-with-dependencies.jar &
|
hamiltont/FrameworkBenchmarks
|
frameworks/Java/ninja-standalone/setup.sh
|
Shell
|
bsd-3-clause
| 300 |
## Platforms with a built-in command-not-found handler init file
for file (
# Arch Linux. Must have pkgfile installed: https://wiki.archlinux.org/index.php/Pkgfile#Command_not_found
/usr/share/doc/pkgfile/command-not-found.zsh
# macOS (M1 and classic Homebrew): https://github.com/Homebrew/homebrew-command-not-found
/opt/homebrew/Library/Taps/homebrew/homebrew-command-not-found/handler.sh
/usr/local/Homebrew/Library/Taps/homebrew/homebrew-command-not-found/handler.sh
); do
if [[ -r "$file" ]]; then
source "$file"
unset file
return 0
fi
done
unset file
## Platforms with manual command_not_found_handler() setup
# Debian and derivatives: https://launchpad.net/ubuntu/+source/command-not-found
if [[ -x /usr/lib/command-not-found || -x /usr/share/command-not-found/command-not-found ]]; then
command_not_found_handler() {
if [[ -x /usr/lib/command-not-found ]]; then
/usr/lib/command-not-found -- "$1"
return $?
elif [[ -x /usr/share/command-not-found/command-not-found ]]; then
/usr/share/command-not-found/command-not-found -- "$1"
return $?
else
printf "zsh: command not found: %s\n" "$1" >&2
return 127
fi
}
fi
# Fedora: https://fedoraproject.org/wiki/Features/PackageKitCommandNotFound
if [[ -x /usr/libexec/pk-command-not-found ]]; then
command_not_found_handler() {
if [[ -S /var/run/dbus/system_bus_socket && -x /usr/libexec/packagekitd ]]; then
/usr/libexec/pk-command-not-found "$@"
return $?
fi
printf "zsh: command not found: %s\n" "$1" >&2
return 127
}
fi
# NixOS: https://github.com/NixOS/nixpkgs/tree/master/nixos/modules/programs/command-not-found
if [[ -x /run/current-system/sw/bin/command-not-found ]]; then
command_not_found_handler() {
/run/current-system/sw/bin/command-not-found "$@"
}
fi
# Termux: https://github.com/termux/command-not-found
if [[ -x /data/data/com.termux/files/usr/libexec/termux/command-not-found ]]; then
command_not_found_handler() {
/data/data/com.termux/files/usr/libexec/termux/command-not-found "$1"
}
fi
# SUSE and derivates: https://www.unix.com/man-page/suse/1/command-not-found/
if [[ -x /usr/bin/command-not-found ]]; then
command_not_found_handler() {
/usr/bin/command-not-found "$1"
}
fi
|
lstolowski/oh-my-zsh
|
plugins/command-not-found/command-not-found.plugin.zsh
|
Shell
|
mit
| 2,294 |
#!/bin/sh
test_description='test git fast-import unpack limit'
. ./test-lib.sh
test_expect_success 'create loose objects on import' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/master
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
initial
COMMIT
done
INPUT_END
git -c fastimport.unpackLimit=2 fast-import --done <input &&
git fsck --no-progress &&
test $(find .git/objects/?? -type f | wc -l) -eq 2 &&
test $(find .git/objects/pack -type f | wc -l) -eq 0
'
test_expect_success 'bigger packs are preserved' '
test_tick &&
cat >input <<-INPUT_END &&
commit refs/heads/master
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
incremental should create a pack
COMMIT
from refs/heads/master^0
commit refs/heads/branch
committer $GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE
data <<COMMIT
branch
COMMIT
done
INPUT_END
git -c fastimport.unpackLimit=2 fast-import --done <input &&
git fsck --no-progress &&
test $(find .git/objects/?? -type f | wc -l) -eq 2 &&
test $(find .git/objects/pack -type f | wc -l) -eq 2
'
test_expect_success 'lookups after checkpoint works' '
hello_id=$(echo hello | git hash-object --stdin -t blob) &&
id="$GIT_COMMITTER_NAME <$GIT_COMMITTER_EMAIL> $GIT_COMMITTER_DATE" &&
before=$(git rev-parse refs/heads/master^0) &&
(
cat <<-INPUT_END &&
blob
mark :1
data 6
hello
commit refs/heads/master
mark :2
committer $id
data <<COMMIT
checkpoint after this
COMMIT
from refs/heads/master^0
M 100644 :1 hello
# pre-checkpoint
cat-blob :1
cat-blob $hello_id
checkpoint
# post-checkpoint
cat-blob :1
cat-blob $hello_id
INPUT_END
n=0 &&
from=$before &&
while test x"$from" = x"$before"
do
if test $n -gt 30
then
echo >&2 "checkpoint did not update branch" &&
exit 1
else
n=$(($n + 1))
fi &&
sleep 1 &&
from=$(git rev-parse refs/heads/master^0)
done &&
cat <<-INPUT_END &&
commit refs/heads/master
committer $id
data <<COMMIT
make sure from "unpacked sha1 reference" works, too
COMMIT
from $from
INPUT_END
echo done
) | git -c fastimport.unpackLimit=100 fast-import --done &&
test $(find .git/objects/?? -type f | wc -l) -eq 6 &&
test $(find .git/objects/pack -type f | wc -l) -eq 2
'
test_done
|
brunosantiagovazquez/git
|
t/t9302-fast-import-unpack-limit.sh
|
Shell
|
gpl-2.0
| 2,364 |
#!/usr/bin/env bash
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
set -o errexit
set -o nounset
set -o pipefail
diff="$(find . -name 'Cargo.lock' -print0 | xargs -0 git diff)"
echo "${diff}"
[[ -z "${diff}" ]]
|
CYBAI/servo
|
etc/ci/lockfile_changed.sh
|
Shell
|
mpl-2.0
| 367 |
#!/bin/bash
set -o pipefail
: ${SCHEME:="XCDYouTubeKit iOS Static Library"}
: ${CONFIGURATION:="Release"}
: ${DESTINATION:="platform=iOS Simulator,name=iPhone 5s"}
COMMAND="xcodebuild clean test -project XCDYouTubeKit.xcodeproj -scheme '${SCHEME}' -configuration '${CONFIGURATION}' -destination '${DESTINATION}'"
for BUILD_SETTING in OBJROOT RUN_CLANG_STATIC_ANALYZER; do
VALUE=`eval echo \\$"${BUILD_SETTING}"`
if [ ! -z "${VALUE}" ]; then
COMMAND+=" ${BUILD_SETTING}='${VALUE}'"
unset ${BUILD_SETTING}
fi
done
xcpretty --version > /dev/null && COMMAND+=" | xcpretty -c"
set -x
eval "${COMMAND}"
|
eni9889/XCDYouTubeKit
|
Scripts/run-tests.sh
|
Shell
|
mit
| 631 |
# ltmain.sh - Provide generalized library-building support services.
# NOTE: Changing this file will not affect anything until you rerun configure.
#
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001
# Free Software Foundation, Inc.
# Originally by Gordon Matzigkeit <[email protected]>, 1996
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
# Check that we have a working $echo.
if test "X$1" = X--no-reexec; then
# Discard the --no-reexec flag, and continue.
shift
elif test "X$1" = X--fallback-echo; then
# Avoid inline document here, it may be left over
:
elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
# Yippee, $echo works!
:
else
# Restart under the correct shell, and then maybe $echo will work.
exec $SHELL "$0" --no-reexec ${1+"$@"}
fi
if test "X$1" = X--fallback-echo; then
# used as fallback echo
shift
cat <<EOF
$*
EOF
exit 0
fi
# The name of this program.
progname=`$echo "$0" | sed 's%^.*/%%'`
modename="$progname"
# Constants.
PROGRAM=ltmain.sh
PACKAGE=libtool
VERSION=1.4
TIMESTAMP=" (1.920 2001/04/24 23:26:18)"
default_mode=
help="Try \`$progname --help' for more information."
magic="%%%MAGIC variable%%%"
mkdir="mkdir"
mv="mv -f"
rm="rm -f"
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
Xsed='sed -e 1s/^X//'
sed_quote_subst='s/\([\\`\\"$\\\\]\)/\\\1/g'
SP2NL='tr \040 \012'
NL2SP='tr \015\012 \040\040'
# NLS nuisances.
# Only set LANG and LC_ALL to C if already set.
# These must not be set unconditionally because not all systems understand
# e.g. LANG=C (notably SCO).
# We save the old values to restore during execute mode.
if test "${LC_ALL+set}" = set; then
save_LC_ALL="$LC_ALL"; LC_ALL=C; export LC_ALL
fi
if test "${LANG+set}" = set; then
save_LANG="$LANG"; LANG=C; export LANG
fi
if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
echo "$modename: not configured to build any kind of library" 1>&2
echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
exit 1
fi
# Global variables.
mode=$default_mode
nonopt=
prev=
prevopt=
run=
show="$echo"
show_help=
execute_dlfiles=
lo2o="s/\\.lo\$/.${objext}/"
o2lo="s/\\.${objext}\$/.lo/"
# Parse our command line options once, thoroughly.
while test $# -gt 0
do
arg="$1"
shift
case $arg in
-*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;;
*) optarg= ;;
esac
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
case $prev in
execute_dlfiles)
execute_dlfiles="$execute_dlfiles $arg"
;;
*)
eval "$prev=\$arg"
;;
esac
prev=
prevopt=
continue
fi
# Have we seen a non-optional argument yet?
case $arg in
--help)
show_help=yes
;;
--version)
echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP"
exit 0
;;
--config)
sed -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $0
exit 0
;;
--debug)
echo "$progname: enabling shell trace mode"
set -x
;;
--dry-run | -n)
run=:
;;
--features)
echo "host: $host"
if test "$build_libtool_libs" = yes; then
echo "enable shared libraries"
else
echo "disable shared libraries"
fi
if test "$build_old_libs" = yes; then
echo "enable static libraries"
else
echo "disable static libraries"
fi
exit 0
;;
--finish) mode="finish" ;;
--mode) prevopt="--mode" prev=mode ;;
--mode=*) mode="$optarg" ;;
--quiet | --silent)
show=:
;;
-dlopen)
prevopt="-dlopen"
prev=execute_dlfiles
;;
-*)
$echo "$modename: unrecognized option \`$arg'" 1>&2
$echo "$help" 1>&2
exit 1
;;
*)
nonopt="$arg"
break
;;
esac
done
if test -n "$prevopt"; then
$echo "$modename: option \`$prevopt' requires an argument" 1>&2
$echo "$help" 1>&2
exit 1
fi
if test -z "$show_help"; then
# Infer the operation mode.
if test -z "$mode"; then
case $nonopt in
*cc | *++ | gcc* | *-gcc*)
mode=link
for arg
do
case $arg in
-c)
mode=compile
break
;;
esac
done
;;
*db | *dbx | *strace | *truss)
mode=execute
;;
*install*|cp|mv)
mode=install
;;
*rm)
mode=uninstall
;;
*)
# If we have no mode, but dlfiles were specified, then do execute mode.
test -n "$execute_dlfiles" && mode=execute
# Just use the default operation mode.
if test -z "$mode"; then
if test -n "$nonopt"; then
$echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2
else
$echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2
fi
fi
;;
esac
fi
# Only execute mode is allowed to have -dlopen flags.
if test -n "$execute_dlfiles" && test "$mode" != execute; then
$echo "$modename: unrecognized option \`-dlopen'" 1>&2
$echo "$help" 1>&2
exit 1
fi
# Change the help message to a mode-specific one.
generic_help="$help"
help="Try \`$modename --help --mode=$mode' for more information."
# These modes are in order of execution frequency so that they run quickly.
case $mode in
# libtool compile mode
compile)
modename="$modename: compile"
# Get the compilation command and the source file.
base_compile=
prev=
lastarg=
srcfile="$nonopt"
suppress_output=
user_target=no
for arg
do
case $prev in
"") ;;
xcompiler)
# Aesthetically quote the previous argument.
prev=
lastarg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
# Double-quote args containing other shell metacharacters.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
# Add the previous argument to base_compile.
if test -z "$base_compile"; then
base_compile="$lastarg"
else
base_compile="$base_compile $lastarg"
fi
continue
;;
esac
# Accept any command-line options.
case $arg in
-o)
if test "$user_target" != "no"; then
$echo "$modename: you cannot specify \`-o' more than once" 1>&2
exit 1
fi
user_target=next
;;
-static)
build_old_libs=yes
continue
;;
-prefer-pic)
pic_mode=yes
continue
;;
-prefer-non-pic)
pic_mode=no
continue
;;
-Xcompiler)
prev=xcompiler
continue
;;
-Wc,*)
args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"`
lastarg=
IFS="${IFS= }"; save_ifs="$IFS"; IFS=','
for arg in $args; do
IFS="$save_ifs"
# Double-quote args containing other shell metacharacters.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
lastarg="$lastarg $arg"
done
IFS="$save_ifs"
lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"`
# Add the arguments to base_compile.
if test -z "$base_compile"; then
base_compile="$lastarg"
else
base_compile="$base_compile $lastarg"
fi
continue
;;
esac
case $user_target in
next)
# The next one is the -o target name
user_target=yes
continue
;;
yes)
# We got the output file
user_target=set
libobj="$arg"
continue
;;
esac
# Accept the current argument as the source file.
lastarg="$srcfile"
srcfile="$arg"
# Aesthetically quote the previous argument.
# Backslashify any backslashes, double quotes, and dollar signs.
# These are the only characters that are still specially
# interpreted inside of double-quoted scrings.
lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"`
# Double-quote args containing other shell metacharacters.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
case $lastarg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
lastarg="\"$lastarg\""
;;
esac
# Add the previous argument to base_compile.
if test -z "$base_compile"; then
base_compile="$lastarg"
else
base_compile="$base_compile $lastarg"
fi
done
case $user_target in
set)
;;
no)
# Get the name of the library object.
libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'`
;;
*)
$echo "$modename: you must specify a target with \`-o'" 1>&2
exit 1
;;
esac
# Recognize several different file suffixes.
# If the user specifies -o file.o, it is replaced with file.lo
xform='[cCFSfmso]'
case $libobj in
*.ada) xform=ada ;;
*.adb) xform=adb ;;
*.ads) xform=ads ;;
*.asm) xform=asm ;;
*.c++) xform=c++ ;;
*.cc) xform=cc ;;
*.cpp) xform=cpp ;;
*.cxx) xform=cxx ;;
*.f90) xform=f90 ;;
*.for) xform=for ;;
esac
libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"`
case $libobj in
*.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;;
*)
$echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2
exit 1
;;
esac
if test -z "$base_compile"; then
$echo "$modename: you must specify a compilation command" 1>&2
$echo "$help" 1>&2
exit 1
fi
# Delete any leftover library objects.
if test "$build_old_libs" = yes; then
removelist="$obj $libobj"
else
removelist="$libobj"
fi
$run $rm $removelist
trap "$run $rm $removelist; exit 1" 1 2 15
# On Cygwin there's no "real" PIC flag so we must build both object types
case $host_os in
cygwin* | mingw* | pw32* | os2*)
pic_mode=default
;;
esac
if test $pic_mode = no && test "$deplibs_check_method" != pass_all; then
# non-PIC code in shared libraries is not supported
pic_mode=default
fi
# Calculate the filename of the output object if compiler does
# not support -o with -c
if test "$compiler_c_o" = no; then
output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext}
lockfile="$output_obj.lock"
removelist="$removelist $output_obj $lockfile"
trap "$run $rm $removelist; exit 1" 1 2 15
else
need_locks=no
lockfile=
fi
# Lock this critical section if it is needed
# We use this script file to make the link, it avoids creating a new file
if test "$need_locks" = yes; then
until $run ln "$0" "$lockfile" 2>/dev/null; do
$show "Waiting for $lockfile to be removed"
sleep 2
done
elif test "$need_locks" = warn; then
if test -f "$lockfile"; then
echo "\
*** ERROR, $lockfile exists and contains:
`cat $lockfile 2>/dev/null`
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit 1
fi
echo $srcfile > "$lockfile"
fi
if test -n "$fix_srcfile_path"; then
eval srcfile=\"$fix_srcfile_path\"
fi
# Only build a PIC object if we are building libtool libraries.
if test "$build_libtool_libs" = yes; then
# Without this assignment, base_compile gets emptied.
fbsd_hideous_sh_bug=$base_compile
if test "$pic_mode" != no; then
# All platforms use -DPIC, to notify preprocessed assembler code.
command="$base_compile $srcfile $pic_flag -DPIC"
else
# Don't build PIC code
command="$base_compile $srcfile"
fi
if test "$build_old_libs" = yes; then
lo_libobj="$libobj"
dir=`$echo "X$libobj" | $Xsed -e 's%/[^/]*$%%'`
if test "X$dir" = "X$libobj"; then
dir="$objdir"
else
dir="$dir/$objdir"
fi
libobj="$dir/"`$echo "X$libobj" | $Xsed -e 's%^.*/%%'`
if test -d "$dir"; then
$show "$rm $libobj"
$run $rm $libobj
else
$show "$mkdir $dir"
$run $mkdir $dir
status=$?
if test $status -ne 0 && test ! -d $dir; then
exit $status
fi
fi
fi
if test "$compiler_o_lo" = yes; then
output_obj="$libobj"
command="$command -o $output_obj"
elif test "$compiler_c_o" = yes; then
output_obj="$obj"
command="$command -o $output_obj"
fi
$run $rm "$output_obj"
$show "$command"
if $run eval "$command"; then :
else
test -n "$output_obj" && $run $rm $removelist
exit 1
fi
if test "$need_locks" = warn &&
test x"`cat $lockfile 2>/dev/null`" != x"$srcfile"; then
echo "\
*** ERROR, $lockfile contains:
`cat $lockfile 2>/dev/null`
but it should contain:
$srcfile
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit 1
fi
# Just move the object if needed, then go on to compile the next one
if test x"$output_obj" != x"$libobj"; then
$show "$mv $output_obj $libobj"
if $run $mv $output_obj $libobj; then :
else
error=$?
$run $rm $removelist
exit $error
fi
fi
# If we have no pic_flag, then copy the object into place and finish.
if (test -z "$pic_flag" || test "$pic_mode" != default) &&
test "$build_old_libs" = yes; then
# Rename the .lo from within objdir to obj
if test -f $obj; then
$show $rm $obj
$run $rm $obj
fi
$show "$mv $libobj $obj"
if $run $mv $libobj $obj; then :
else
error=$?
$run $rm $removelist
exit $error
fi
xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$obj"; then
xdir="."
else
xdir="$xdir"
fi
baseobj=`$echo "X$obj" | $Xsed -e "s%.*/%%"`
libobj=`$echo "X$baseobj" | $Xsed -e "$o2lo"`
# Now arrange that obj and lo_libobj become the same file
$show "(cd $xdir && $LN_S $baseobj $libobj)"
if $run eval '(cd $xdir && $LN_S $baseobj $libobj)'; then
exit 0
else
error=$?
$run $rm $removelist
exit $error
fi
fi
# Allow error messages only from the first compilation.
suppress_output=' >/dev/null 2>&1'
fi
# Only build a position-dependent object if we build old libraries.
if test "$build_old_libs" = yes; then
if test "$pic_mode" != yes; then
# Don't build PIC code
command="$base_compile $srcfile"
else
# All platforms use -DPIC, to notify preprocessed assembler code.
command="$base_compile $srcfile $pic_flag -DPIC"
fi
if test "$compiler_c_o" = yes; then
command="$command -o $obj"
output_obj="$obj"
fi
# Suppress compiler output if we already did a PIC compilation.
command="$command$suppress_output"
$run $rm "$output_obj"
$show "$command"
if $run eval "$command"; then :
else
$run $rm $removelist
exit 1
fi
if test "$need_locks" = warn &&
test x"`cat $lockfile 2>/dev/null`" != x"$srcfile"; then
echo "\
*** ERROR, $lockfile contains:
`cat $lockfile 2>/dev/null`
but it should contain:
$srcfile
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit 1
fi
# Just move the object if needed
if test x"$output_obj" != x"$obj"; then
$show "$mv $output_obj $obj"
if $run $mv $output_obj $obj; then :
else
error=$?
$run $rm $removelist
exit $error
fi
fi
# Create an invalid libtool object if no PIC, so that we do not
# accidentally link it into a program.
if test "$build_libtool_libs" != yes; then
$show "echo timestamp > $libobj"
$run eval "echo timestamp > \$libobj" || exit $?
else
# Move the .lo from within objdir
$show "$mv $libobj $lo_libobj"
if $run $mv $libobj $lo_libobj; then :
else
error=$?
$run $rm $removelist
exit $error
fi
fi
fi
# Unlock the critical section if it was locked
if test "$need_locks" != no; then
$run $rm "$lockfile"
fi
exit 0
;;
# libtool link mode
link | relink)
modename="$modename: link"
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
# It is impossible to link a dll without this setting, and
# we shouldn't force the makefile maintainer to figure out
# which system we are compiling for in order to pass an extra
# flag for every libtool invokation.
# allow_undefined=no
# FIXME: Unfortunately, there are problems with the above when trying
# to make a dll which has undefined symbols, in which case not
# even a static library is built. For now, we need to specify
# -no-undefined on the libtool link line when we can be certain
# that all symbols are satisfied, otherwise we get a static library.
allow_undefined=yes
;;
*)
allow_undefined=yes
;;
esac
libtool_args="$nonopt"
compile_command="$nonopt"
finalize_command="$nonopt"
compile_rpath=
finalize_rpath=
compile_shlibpath=
finalize_shlibpath=
convenience=
old_convenience=
deplibs=
old_deplibs=
compiler_flags=
linker_flags=
dllsearchpath=
lib_search_path=`pwd`
avoid_version=no
dlfiles=
dlprefiles=
dlself=no
export_dynamic=no
export_symbols=
export_symbols_regex=
generated=
libobjs=
ltlibs=
module=no
no_install=no
objs=
prefer_static_libs=no
preload=no
prev=
prevarg=
release=
rpath=
xrpath=
perm_rpath=
temp_rpath=
thread_safe=no
vinfo=
# We need to know -static, to get the right output filenames.
for arg
do
case $arg in
-all-static | -static)
if test "X$arg" = "X-all-static"; then
if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
$echo "$modename: warning: complete static linking is impossible in this configuration" 1>&2
fi
if test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
else
if test -z "$pic_flag" && test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
fi
build_libtool_libs=no
build_old_libs=yes
prefer_static_libs=yes
break
;;
esac
done
# See if our shared archives depend on static archives.
test -n "$old_archive_from_new_cmds" && build_old_libs=yes
# Go through the arguments, transforming them on the way.
while test $# -gt 0; do
arg="$1"
shift
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test
;;
*) qarg=$arg ;;
esac
libtool_args="$libtool_args $qarg"
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
case $prev in
output)
compile_command="$compile_command @OUTPUT@"
finalize_command="$finalize_command @OUTPUT@"
;;
esac
case $prev in
dlfiles|dlprefiles)
if test "$preload" = no; then
# Add the symbol object into the linking commands.
compile_command="$compile_command @SYMFILE@"
finalize_command="$finalize_command @SYMFILE@"
preload=yes
fi
case $arg in
*.la | *.lo) ;; # We handle these cases below.
force)
if test "$dlself" = no; then
dlself=needless
export_dynamic=yes
fi
prev=
continue
;;
self)
if test "$prev" = dlprefiles; then
dlself=yes
elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then
dlself=yes
else
dlself=needless
export_dynamic=yes
fi
prev=
continue
;;
*)
if test "$prev" = dlfiles; then
dlfiles="$dlfiles $arg"
else
dlprefiles="$dlprefiles $arg"
fi
prev=
continue
;;
esac
;;
expsyms)
export_symbols="$arg"
if test ! -f "$arg"; then
$echo "$modename: symbol file \`$arg' does not exist"
exit 1
fi
prev=
continue
;;
expsyms_regex)
export_symbols_regex="$arg"
prev=
continue
;;
release)
release="-$arg"
prev=
continue
;;
rpath | xrpath)
# We need an absolute path.
case $arg in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
$echo "$modename: only absolute run-paths are allowed" 1>&2
exit 1
;;
esac
if test "$prev" = rpath; then
case "$rpath " in
*" $arg "*) ;;
*) rpath="$rpath $arg" ;;
esac
else
case "$xrpath " in
*" $arg "*) ;;
*) xrpath="$xrpath $arg" ;;
esac
fi
prev=
continue
;;
xcompiler)
compiler_flags="$compiler_flags $qarg"
prev=
compile_command="$compile_command $qarg"
finalize_command="$finalize_command $qarg"
continue
;;
xlinker)
linker_flags="$linker_flags $qarg"
compiler_flags="$compiler_flags $wl$qarg"
prev=
compile_command="$compile_command $wl$qarg"
finalize_command="$finalize_command $wl$qarg"
continue
;;
*)
eval "$prev=\"\$arg\""
prev=
continue
;;
esac
fi # test -n $prev
prevarg="$arg"
case $arg in
-all-static)
if test -n "$link_static_flag"; then
compile_command="$compile_command $link_static_flag"
finalize_command="$finalize_command $link_static_flag"
fi
continue
;;
-allow-undefined)
# FIXME: remove this flag sometime in the future.
$echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2
continue
;;
-avoid-version)
avoid_version=yes
continue
;;
-dlopen)
prev=dlfiles
continue
;;
-dlpreopen)
prev=dlprefiles
continue
;;
-export-dynamic)
export_dynamic=yes
continue
;;
-export-symbols | -export-symbols-regex)
if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
$echo "$modename: more than one -exported-symbols argument is not allowed"
exit 1
fi
if test "X$arg" = "X-export-symbols"; then
prev=expsyms
else
prev=expsyms_regex
fi
continue
;;
# The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
# so, if we see these flags be careful not to treat them like -L
-L[A-Z][A-Z]*:*)
case $with_gcc/$host in
no/*-*-irix*)
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
;;
esac
continue
;;
-L*)
dir=`$echo "X$arg" | $Xsed -e 's/^-L//'`
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
absdir=`cd "$dir" && pwd`
if test -z "$absdir"; then
$echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2
exit 1
fi
dir="$absdir"
;;
esac
case "$deplibs " in
*" -L$dir "*) ;;
*)
deplibs="$deplibs -L$dir"
lib_search_path="$lib_search_path $dir"
;;
esac
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
case :$dllsearchpath: in
*":$dir:"*) ;;
*) dllsearchpath="$dllsearchpath:$dir";;
esac
;;
esac
continue
;;
-l*)
if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then
case $host in
*-*-cygwin* | *-*-pw32* | *-*-beos*)
# These systems don't actually have a C or math library (as such)
continue
;;
*-*-mingw* | *-*-os2*)
# These systems don't actually have a C library (as such)
test "X$arg" = "X-lc" && continue
;;
esac
fi
deplibs="$deplibs $arg"
continue
;;
-module)
module=yes
continue
;;
-no-fast-install)
fast_install=no
continue
;;
-no-install)
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
# The PATH hackery in wrapper scripts is required on Windows
# in order for the loader to find any dlls it needs.
$echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2
$echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2
fast_install=no
;;
*) no_install=yes ;;
esac
continue
;;
-no-undefined)
allow_undefined=no
continue
;;
-o) prev=output ;;
-release)
prev=release
continue
;;
-rpath)
prev=rpath
continue
;;
-R)
prev=xrpath
continue
;;
-R*)
dir=`$echo "X$arg" | $Xsed -e 's/^-R//'`
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
$echo "$modename: only absolute run-paths are allowed" 1>&2
exit 1
;;
esac
case "$xrpath " in
*" $dir "*) ;;
*) xrpath="$xrpath $dir" ;;
esac
continue
;;
-static)
# The effects of -static are defined in a previous loop.
# We used to do the same as -all-static on platforms that
# didn't have a PIC flag, but the assumption that the effects
# would be equivalent was wrong. It would break on at least
# Digital Unix and AIX.
continue
;;
-thread-safe)
thread_safe=yes
continue
;;
-version-info)
prev=vinfo
continue
;;
-Wc,*)
args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'`
arg=
IFS="${IFS= }"; save_ifs="$IFS"; IFS=','
for flag in $args; do
IFS="$save_ifs"
case $flag in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
flag="\"$flag\""
;;
esac
arg="$arg $wl$flag"
compiler_flags="$compiler_flags $flag"
done
IFS="$save_ifs"
arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
;;
-Wl,*)
args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'`
arg=
IFS="${IFS= }"; save_ifs="$IFS"; IFS=','
for flag in $args; do
IFS="$save_ifs"
case $flag in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
flag="\"$flag\""
;;
esac
arg="$arg $wl$flag"
compiler_flags="$compiler_flags $wl$flag"
linker_flags="$linker_flags $flag"
done
IFS="$save_ifs"
arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
;;
-Xcompiler)
prev=xcompiler
continue
;;
-Xlinker)
prev=xlinker
continue
;;
# Some other compiler flag.
-* | +*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
;;
*.lo | *.$objext)
# A library or standard object.
if test "$prev" = dlfiles; then
# This file was specified with -dlopen.
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
dlfiles="$dlfiles $arg"
prev=
continue
else
# If libtool objects are unsupported, then we need to preload.
prev=dlprefiles
fi
fi
if test "$prev" = dlprefiles; then
# Preload the old-style object.
dlprefiles="$dlprefiles "`$echo "X$arg" | $Xsed -e "$lo2o"`
prev=
else
case $arg in
*.lo) libobjs="$libobjs $arg" ;;
*) objs="$objs $arg" ;;
esac
fi
;;
*.$libext)
# An archive.
deplibs="$deplibs $arg"
old_deplibs="$old_deplibs $arg"
continue
;;
*.la)
# A libtool-controlled library.
if test "$prev" = dlfiles; then
# This library was specified with -dlopen.
dlfiles="$dlfiles $arg"
prev=
elif test "$prev" = dlprefiles; then
# The library was specified with -dlpreopen.
dlprefiles="$dlprefiles $arg"
prev=
else
deplibs="$deplibs $arg"
fi
continue
;;
# Some other compiler argument.
*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
;;
esac # arg
# Now actually substitute the argument into the commands.
if test -n "$arg"; then
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
fi
done # argument parsing loop
if test -n "$prev"; then
$echo "$modename: the \`$prevarg' option requires an argument" 1>&2
$echo "$help" 1>&2
exit 1
fi
if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
eval arg=\"$export_dynamic_flag_spec\"
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
fi
# calculate the name of the file, without its directory
outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'`
libobjs_save="$libobjs"
if test -n "$shlibpath_var"; then
# get the directories listed in $shlibpath_var
eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
else
shlib_search_path=
fi
eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'`
if test "X$output_objdir" = "X$output"; then
output_objdir="$objdir"
else
output_objdir="$output_objdir/$objdir"
fi
# Create the object directory.
if test ! -d $output_objdir; then
$show "$mkdir $output_objdir"
$run $mkdir $output_objdir
status=$?
if test $status -ne 0 && test ! -d $output_objdir; then
exit $status
fi
fi
# Determine the type of output
case $output in
"")
$echo "$modename: you must specify an output file" 1>&2
$echo "$help" 1>&2
exit 1
;;
*.$libext) linkmode=oldlib ;;
*.lo | *.$objext) linkmode=obj ;;
*.la) linkmode=lib ;;
*) linkmode=prog ;; # Anything else should be a program.
esac
specialdeplibs=
libs=
# Find all interdependent deplibs by searching for libraries
# that are linked more than once (e.g. -la -lb -la)
for deplib in $deplibs; do
case "$libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
libs="$libs $deplib"
done
deplibs=
newdependency_libs=
newlib_search_path=
need_relink=no # whether we're linking any uninstalled libtool libraries
notinst_deplibs= # not-installed libtool libraries
notinst_path= # paths that contain not-installed libtool libraries
case $linkmode in
lib)
passes="conv link"
for file in $dlfiles $dlprefiles; do
case $file in
*.la) ;;
*)
$echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2
exit 1
;;
esac
done
;;
prog)
compile_deplibs=
finalize_deplibs=
alldeplibs=no
newdlfiles=
newdlprefiles=
passes="conv scan dlopen dlpreopen link"
;;
*) passes="conv"
;;
esac
for pass in $passes; do
if test $linkmode = prog; then
# Determine which files to process
case $pass in
dlopen)
libs="$dlfiles"
save_deplibs="$deplibs" # Collect dlpreopened libraries
deplibs=
;;
dlpreopen) libs="$dlprefiles" ;;
link) libs="$deplibs %DEPLIBS% $dependency_libs" ;;
esac
fi
for deplib in $libs; do
lib=
found=no
case $deplib in
-l*)
if test $linkmode = oldlib && test $linkmode = obj; then
$echo "$modename: warning: \`-l' is ignored for archives/objects: $deplib" 1>&2
continue
fi
if test $pass = conv; then
deplibs="$deplib $deplibs"
continue
fi
name=`$echo "X$deplib" | $Xsed -e 's/^-l//'`
for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do
# Search the libtool library
lib="$searchdir/lib${name}.la"
if test -f "$lib"; then
found=yes
break
fi
done
if test "$found" != yes; then
# deplib doesn't seem to be a libtool library
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
test $linkmode = lib && newdependency_libs="$deplib $newdependency_libs"
fi
continue
fi
;; # -l
-L*)
case $linkmode in
lib)
deplibs="$deplib $deplibs"
test $pass = conv && continue
newdependency_libs="$deplib $newdependency_libs"
newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
;;
prog)
if test $pass = conv; then
deplibs="$deplib $deplibs"
continue
fi
if test $pass = scan; then
deplibs="$deplib $deplibs"
newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
;;
*)
$echo "$modename: warning: \`-L' is ignored for archives/objects: $deplib" 1>&2
;;
esac # linkmode
continue
;; # -L
-R*)
if test $pass = link; then
dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'`
# Make sure the xrpath contains only unique directories.
case "$xrpath " in
*" $dir "*) ;;
*) xrpath="$xrpath $dir" ;;
esac
fi
deplibs="$deplib $deplibs"
continue
;;
*.la) lib="$deplib" ;;
*.$libext)
if test $pass = conv; then
deplibs="$deplib $deplibs"
continue
fi
case $linkmode in
lib)
if test "$deplibs_check_method" != pass_all; then
echo
echo "*** Warning: This library needs some functionality provided by $deplib."
echo "*** I have the capability to make that library automatically link in when"
echo "*** you link to this library. But I can only do this if you have a"
echo "*** shared version of the library, which you do not appear to have."
else
echo
echo "*** Warning: Linking the shared library $output against the"
echo "*** static library $deplib is not portable!"
deplibs="$deplib $deplibs"
fi
continue
;;
prog)
if test $pass != link; then
deplibs="$deplib $deplibs"
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
continue
;;
esac # linkmode
;; # *.$libext
*.lo | *.$objext)
if test $pass = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
# If there is no dlopen support or we're linking statically,
# we need to preload.
newdlprefiles="$newdlprefiles $deplib"
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
newdlfiles="$newdlfiles $deplib"
fi
continue
;;
%DEPLIBS%)
alldeplibs=yes
continue
;;
esac # case $deplib
if test $found = yes || test -f "$lib"; then :
else
$echo "$modename: cannot find the library \`$lib'" 1>&2
exit 1
fi
# Check to see that this really is a libtool archive.
if (sed -e '2q' $lib | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit 1
fi
ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'`
test "X$ladir" = "X$lib" && ladir="."
dlname=
dlopen=
dlpreopen=
libdir=
library_names=
old_library=
# If the library was installed with an old release of libtool,
# it will not redefine variable installed.
installed=yes
# Read the .la file
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
if test "$linkmode,$pass" = "lib,link" ||
test "$linkmode,$pass" = "prog,scan" ||
{ test $linkmode = oldlib && test $linkmode = obj; }; then
# Add dl[pre]opened files of deplib
test -n "$dlopen" && dlfiles="$dlfiles $dlopen"
test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen"
fi
if test $pass = conv; then
# Only check for convenience libraries
deplibs="$lib $deplibs"
if test -z "$libdir"; then
if test -z "$old_library"; then
$echo "$modename: cannot find name of link library for \`$lib'" 1>&2
exit 1
fi
# It is a libtool convenience library, so add in its objects.
convenience="$convenience $ladir/$objdir/$old_library"
old_convenience="$old_convenience $ladir/$objdir/$old_library"
tmp_libs=
for deplib in $dependency_libs; do
deplibs="$deplib $deplibs"
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
tmp_libs="$tmp_libs $deplib"
done
elif test $linkmode != prog && test $linkmode != lib; then
$echo "$modename: \`$lib' is not a convenience library" 1>&2
exit 1
fi
continue
fi # $pass = conv
# Get the name of the library we link against.
linklib=
for l in $old_library $library_names; do
linklib="$l"
done
if test -z "$linklib"; then
$echo "$modename: cannot find name of link library for \`$lib'" 1>&2
exit 1
fi
# This library was specified with -dlopen.
if test $pass = dlopen; then
if test -z "$libdir"; then
$echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2
exit 1
fi
if test -z "$dlname" || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
# If there is no dlname, no dlopen support or we're linking
# statically, we need to preload.
dlprefiles="$dlprefiles $lib"
else
newdlfiles="$newdlfiles $lib"
fi
continue
fi # $pass = dlopen
# We need an absolute path.
case $ladir in
[\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;;
*)
abs_ladir=`cd "$ladir" && pwd`
if test -z "$abs_ladir"; then
$echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2
$echo "$modename: passing it literally to the linker, although it might fail" 1>&2
abs_ladir="$ladir"
fi
;;
esac
laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
# Find the relevant object directory and library name.
if test "X$installed" = Xyes; then
if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
$echo "$modename: warning: library \`$lib' was moved." 1>&2
dir="$ladir"
absdir="$abs_ladir"
libdir="$abs_ladir"
else
dir="$libdir"
absdir="$libdir"
fi
else
dir="$ladir/$objdir"
absdir="$abs_ladir/$objdir"
# Remove this search path later
notinst_path="$notinst_path $abs_ladir"
fi # $installed = yes
name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
# This library was specified with -dlpreopen.
if test $pass = dlpreopen; then
if test -z "$libdir"; then
$echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2
exit 1
fi
# Prefer using a static library (so that no silly _DYNAMIC symbols
# are required to link).
if test -n "$old_library"; then
newdlprefiles="$newdlprefiles $dir/$old_library"
# Otherwise, use the dlname, so that lt_dlopen finds it.
elif test -n "$dlname"; then
newdlprefiles="$newdlprefiles $dir/$dlname"
else
newdlprefiles="$newdlprefiles $dir/$linklib"
fi
fi # $pass = dlpreopen
if test -z "$libdir"; then
# Link the convenience library
if test $linkmode = lib; then
deplibs="$dir/$old_library $deplibs"
elif test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$dir/$old_library $compile_deplibs"
finalize_deplibs="$dir/$old_library $finalize_deplibs"
else
deplibs="$lib $deplibs"
fi
continue
fi
if test $linkmode = prog && test $pass != link; then
newlib_search_path="$newlib_search_path $ladir"
deplibs="$lib $deplibs"
linkalldeplibs=no
if test "$link_all_deplibs" != no || test -z "$library_names" ||
test "$build_libtool_libs" = no; then
linkalldeplibs=yes
fi
tmp_libs=
for deplib in $dependency_libs; do
case $deplib in
-L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test
esac
# Need to link against all dependency_libs?
if test $linkalldeplibs = yes; then
deplibs="$deplib $deplibs"
else
# Need to hardcode shared library paths
# or/and link against static libraries
newdependency_libs="$deplib $newdependency_libs"
fi
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
tmp_libs="$tmp_libs $deplib"
done # for deplib
continue
fi # $linkmode = prog...
link_static=no # Whether the deplib will be linked statically
if test -n "$library_names" &&
{ test "$prefer_static_libs" = no || test -z "$old_library"; }; then
# Link against this shared library
if test "$linkmode,$pass" = "prog,link" ||
{ test $linkmode = lib && test $hardcode_into_libs = yes; }; then
# Hardcode the library path.
# Skip directories that are in the system default run-time
# search path.
case " $sys_lib_dlsearch_path " in
*" $absdir "*) ;;
*)
case "$compile_rpath " in
*" $absdir "*) ;;
*) compile_rpath="$compile_rpath $absdir"
esac
;;
esac
case " $sys_lib_dlsearch_path " in
*" $libdir "*) ;;
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir"
esac
;;
esac
if test $linkmode = prog; then
# We need to hardcode the library path
if test -n "$shlibpath_var"; then
# Make sure the rpath contains only unique directories.
case "$temp_rpath " in
*" $dir "*) ;;
*" $absdir "*) ;;
*) temp_rpath="$temp_rpath $dir" ;;
esac
fi
fi
fi # $linkmode,$pass = prog,link...
if test "$alldeplibs" = yes &&
{ test "$deplibs_check_method" = pass_all ||
{ test "$build_libtool_libs" = yes &&
test -n "$library_names"; }; }; then
# We only need to search for static libraries
continue
fi
if test "$installed" = no; then
notinst_deplibs="$notinst_deplibs $lib"
need_relink=yes
fi
if test -n "$old_archive_from_expsyms_cmds"; then
# figure out the soname
set dummy $library_names
realname="$2"
shift; shift
libname=`eval \\$echo \"$libname_spec\"`
# use dlname if we got it. it's perfectly good, no?
if test -n "$dlname"; then
soname="$dlname"
elif test -n "$soname_spec"; then
# bleh windows
case $host in
*cygwin*)
major=`expr $current - $age`
versuffix="-$major"
;;
esac
eval soname=\"$soname_spec\"
else
soname="$realname"
fi
# Make a new name for the extract_expsyms_cmds to use
soroot="$soname"
soname=`echo $soroot | sed -e 's/^.*\///'`
newlib="libimp-`echo $soname | sed 's/^lib//;s/\.dll$//'`.a"
# If the library has no export list, then create one now
if test -f "$output_objdir/$soname-def"; then :
else
$show "extracting exported symbol list from \`$soname'"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
eval cmds=\"$extract_expsyms_cmds\"
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# Create $newlib
if test -f "$output_objdir/$newlib"; then :; else
$show "generating import library for \`$soname'"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
eval cmds=\"$old_archive_from_expsyms_cmds\"
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# make sure the library variables are pointing to the new library
dir=$output_objdir
linklib=$newlib
fi # test -n $old_archive_from_expsyms_cmds
if test $linkmode = prog || test "$mode" != relink; then
add_shlibpath=
add_dir=
add=
lib_linked=yes
case $hardcode_action in
immediate | unsupported)
if test "$hardcode_direct" = no; then
add="$dir/$linklib"
elif test "$hardcode_minus_L" = no; then
case $host in
*-*-sunos*) add_shlibpath="$dir" ;;
esac
add_dir="-L$dir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = no; then
add_shlibpath="$dir"
add="-l$name"
else
lib_linked=no
fi
;;
relink)
if test "$hardcode_direct" = yes; then
add="$dir/$linklib"
elif test "$hardcode_minus_L" = yes; then
add_dir="-L$dir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
add_shlibpath="$dir"
add="-l$name"
else
lib_linked=no
fi
;;
*) lib_linked=no ;;
esac
if test "$lib_linked" != yes; then
$echo "$modename: configuration error: unsupported hardcode properties"
exit 1
fi
if test -n "$add_shlibpath"; then
case :$compile_shlibpath: in
*":$add_shlibpath:"*) ;;
*) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;;
esac
fi
if test $linkmode = prog; then
test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
test -n "$add" && compile_deplibs="$add $compile_deplibs"
else
test -n "$add_dir" && deplibs="$add_dir $deplibs"
test -n "$add" && deplibs="$add $deplibs"
if test "$hardcode_direct" != yes && \
test "$hardcode_minus_L" != yes && \
test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
*) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
fi
fi
fi
if test $linkmode = prog || test "$mode" = relink; then
add_shlibpath=
add_dir=
add=
# Finalize command for both is simple: just hardcode it.
if test "$hardcode_direct" = yes; then
add="$libdir/$linklib"
elif test "$hardcode_minus_L" = yes; then
add_dir="-L$libdir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
*) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
add="-l$name"
else
# We cannot seem to hardcode it, guess we'll fake it.
add_dir="-L$libdir"
add="-l$name"
fi
if test $linkmode = prog; then
test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
else
test -n "$add_dir" && deplibs="$add_dir $deplibs"
test -n "$add" && deplibs="$add $deplibs"
fi
fi
elif test $linkmode = prog; then
if test "$alldeplibs" = yes &&
{ test "$deplibs_check_method" = pass_all ||
{ test "$build_libtool_libs" = yes &&
test -n "$library_names"; }; }; then
# We only need to search for static libraries
continue
fi
# Try to link the static library
# Here we assume that one of hardcode_direct or hardcode_minus_L
# is not unsupported. This is valid on all known static and
# shared platforms.
if test "$hardcode_direct" != unsupported; then
test -n "$old_library" && linklib="$old_library"
compile_deplibs="$dir/$linklib $compile_deplibs"
finalize_deplibs="$dir/$linklib $finalize_deplibs"
else
compile_deplibs="-l$name -L$dir $compile_deplibs"
finalize_deplibs="-l$name -L$dir $finalize_deplibs"
fi
elif test "$build_libtool_libs" = yes; then
# Not a shared library
if test "$deplibs_check_method" != pass_all; then
# We're trying link a shared library against a static one
# but the system doesn't support it.
# Just print a warning and add the library to dependency_libs so
# that the program can be linked against the static library.
echo
echo "*** Warning: This library needs some functionality provided by $lib."
echo "*** I have the capability to make that library automatically link in when"
echo "*** you link to this library. But I can only do this if you have a"
echo "*** shared version of the library, which you do not appear to have."
if test "$module" = yes; then
echo "*** Therefore, libtool will create a static module, that should work "
echo "*** as long as the dlopening application is linked with the -dlopen flag."
if test -z "$global_symbol_pipe"; then
echo
echo "*** However, this would only work if libtool was able to extract symbol"
echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
echo "*** not find such a program. So, this module is probably useless."
echo "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
fi
else
convenience="$convenience $dir/$old_library"
old_convenience="$old_convenience $dir/$old_library"
deplibs="$dir/$old_library $deplibs"
link_static=yes
fi
fi # link shared/static library?
if test $linkmode = lib; then
if test -n "$dependency_libs" &&
{ test $hardcode_into_libs != yes || test $build_old_libs = yes ||
test $link_static = yes; }; then
# Extract -R from dependency_libs
temp_deplibs=
for libdir in $dependency_libs; do
case $libdir in
-R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'`
case " $xrpath " in
*" $temp_xrpath "*) ;;
*) xrpath="$xrpath $temp_xrpath";;
esac;;
*) temp_deplibs="$temp_deplibs $libdir";;
esac
done
dependency_libs="$temp_deplibs"
fi
newlib_search_path="$newlib_search_path $absdir"
# Link against this library
test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
# ... and its dependency_libs
tmp_libs=
for deplib in $dependency_libs; do
newdependency_libs="$deplib $newdependency_libs"
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
tmp_libs="$tmp_libs $deplib"
done
if test $link_all_deplibs != no; then
# Add the search paths of all dependency libraries
for deplib in $dependency_libs; do
case $deplib in
-L*) path="$deplib" ;;
*.la)
dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$deplib" && dir="."
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
*)
absdir=`cd "$dir" && pwd`
if test -z "$absdir"; then
$echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2
absdir="$dir"
fi
;;
esac
if grep "^installed=no" $deplib > /dev/null; then
path="-L$absdir/$objdir"
else
eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
if test -z "$libdir"; then
$echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
exit 1
fi
if test "$absdir" != "$libdir"; then
$echo "$modename: warning: \`$deplib' seems to be moved" 1>&2
fi
path="-L$absdir"
fi
;;
*) continue ;;
esac
case " $deplibs " in
*" $path "*) ;;
*) deplibs="$deplibs $path" ;;
esac
done
fi # link_all_deplibs != no
fi # linkmode = lib
done # for deplib in $libs
if test $pass = dlpreopen; then
# Link the dlpreopened libraries before other libraries
for deplib in $save_deplibs; do
deplibs="$deplib $deplibs"
done
fi
if test $pass != dlopen; then
test $pass != scan && dependency_libs="$newdependency_libs"
if test $pass != conv; then
# Make sure lib_search_path contains only unique directories.
lib_search_path=
for dir in $newlib_search_path; do
case "$lib_search_path " in
*" $dir "*) ;;
*) lib_search_path="$lib_search_path $dir" ;;
esac
done
newlib_search_path=
fi
if test "$linkmode,$pass" != "prog,link"; then
vars="deplibs"
else
vars="compile_deplibs finalize_deplibs"
fi
for var in $vars dependency_libs; do
# Add libraries to $var in reverse order
eval tmp_libs=\"\$$var\"
new_libs=
for deplib in $tmp_libs; do
case $deplib in
-L*) new_libs="$deplib $new_libs" ;;
*)
case " $specialdeplibs " in
*" $deplib "*) new_libs="$deplib $new_libs" ;;
*)
case " $new_libs " in
*" $deplib "*) ;;
*) new_libs="$deplib $new_libs" ;;
esac
;;
esac
;;
esac
done
tmp_libs=
for deplib in $new_libs; do
case $deplib in
-L*)
case " $tmp_libs " in
*" $deplib "*) ;;
*) tmp_libs="$tmp_libs $deplib" ;;
esac
;;
*) tmp_libs="$tmp_libs $deplib" ;;
esac
done
eval $var=\"$tmp_libs\"
done # for var
fi
if test "$pass" = "conv" &&
{ test "$linkmode" = "lib" || test "$linkmode" = "prog"; }; then
libs="$deplibs" # reset libs
deplibs=
fi
done # for pass
if test $linkmode = prog; then
dlfiles="$newdlfiles"
dlprefiles="$newdlprefiles"
fi
case $linkmode in
oldlib)
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2
fi
if test -n "$rpath"; then
$echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2
fi
if test -n "$xrpath"; then
$echo "$modename: warning: \`-R' is ignored for archives" 1>&2
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for archives" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for archives" 1>&2
fi
if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
$echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2
fi
# Now set the variables for building old libraries.
build_libtool_libs=no
oldlibs="$output"
objs="$objs$old_deplibs"
;;
lib)
# Make sure we only generate libraries of the form `libNAME.la'.
case $outputname in
lib*)
name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
eval libname=\"$libname_spec\"
;;
*)
if test "$module" = no; then
$echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2
$echo "$help" 1>&2
exit 1
fi
if test "$need_lib_prefix" != no; then
# Add the "lib" prefix for modules if required
name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
eval libname=\"$libname_spec\"
else
libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
fi
;;
esac
if test -n "$objs"; then
if test "$deplibs_check_method" != pass_all; then
$echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1
exit 1
else
echo
echo "*** Warning: Linking the shared library $output against the non-libtool"
echo "*** objects $objs is not portable!"
libobjs="$libobjs $objs"
fi
fi
if test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2
fi
set dummy $rpath
if test $# -gt 2; then
$echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2
fi
install_libdir="$2"
oldlibs=
if test -z "$rpath"; then
if test "$build_libtool_libs" = yes; then
# Building a libtool convenience library.
libext=al
oldlibs="$output_objdir/$libname.$libext $oldlibs"
build_libtool_libs=convenience
build_old_libs=yes
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for convenience libraries" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2
fi
else
# Parse the version information argument.
IFS="${IFS= }"; save_ifs="$IFS"; IFS=':'
set dummy $vinfo 0 0 0
IFS="$save_ifs"
if test -n "$8"; then
$echo "$modename: too many parameters to \`-version-info'" 1>&2
$echo "$help" 1>&2
exit 1
fi
current="$2"
revision="$3"
age="$4"
# Check that each of the things are valid numbers.
case $current in
0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
*)
$echo "$modename: CURRENT \`$current' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit 1
;;
esac
case $revision in
0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
*)
$echo "$modename: REVISION \`$revision' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit 1
;;
esac
case $age in
0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
*)
$echo "$modename: AGE \`$age' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit 1
;;
esac
if test $age -gt $current; then
$echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit 1
fi
# Calculate the version variables.
major=
versuffix=
verstring=
case $version_type in
none) ;;
darwin)
# Like Linux, but with the current version available in
# verstring for coding it into the library header
major=.`expr $current - $age`
versuffix="$major.$age.$revision"
# Darwin ld doesn't like 0 for these options...
minor_current=`expr $current + 1`
verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
;;
freebsd-aout)
major=".$current"
versuffix=".$current.$revision";
;;
freebsd-elf)
major=".$current"
versuffix=".$current";
;;
irix)
major=`expr $current - $age + 1`
verstring="sgi$major.$revision"
# Add in all the interfaces that we are compatible with.
loop=$revision
while test $loop != 0; do
iface=`expr $revision - $loop`
loop=`expr $loop - 1`
verstring="sgi$major.$iface:$verstring"
done
# Before this point, $major must not contain `.'.
major=.$major
versuffix="$major.$revision"
;;
linux)
major=.`expr $current - $age`
versuffix="$major.$age.$revision"
;;
osf)
major=`expr $current - $age`
versuffix=".$current.$age.$revision"
verstring="$current.$age.$revision"
# Add in all the interfaces that we are compatible with.
loop=$age
while test $loop != 0; do
iface=`expr $current - $loop`
loop=`expr $loop - 1`
verstring="$verstring:${iface}.0"
done
# Make executables depend on our current version.
verstring="$verstring:${current}.0"
;;
sunos)
major=".$current"
versuffix=".$current.$revision"
;;
windows)
# Use '-' rather than '.', since we only want one
# extension on DOS 8.3 filesystems.
major=`expr $current - $age`
versuffix="-$major"
;;
*)
$echo "$modename: unknown library version type \`$version_type'" 1>&2
echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
exit 1
;;
esac
# Clear the version info if we defaulted, and they specified a release.
if test -z "$vinfo" && test -n "$release"; then
major=
verstring="0.0"
if test "$need_version" = no; then
versuffix=
else
versuffix=".0.0"
fi
fi
# Remove version info from name if versioning should be avoided
if test "$avoid_version" = yes && test "$need_version" = no; then
major=
versuffix=
verstring=""
fi
# Check to see if the archive will have undefined symbols.
if test "$allow_undefined" = yes; then
if test "$allow_undefined_flag" = unsupported; then
$echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2
build_libtool_libs=no
build_old_libs=yes
fi
else
# Don't allow undefined symbols.
allow_undefined_flag="$no_undefined_flag"
fi
fi
if test "$mode" != relink; then
# Remove our outputs.
$show "${rm}r $output_objdir/$outputname $output_objdir/$libname.* $output_objdir/${libname}${release}.*"
$run ${rm}r $output_objdir/$outputname $output_objdir/$libname.* $output_objdir/${libname}${release}.*
fi
# Now set the variables for building old libraries.
if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
oldlibs="$oldlibs $output_objdir/$libname.$libext"
# Transform .lo files to .o files.
oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP`
fi
# Eliminate all temporary directories.
for path in $notinst_path; do
lib_search_path=`echo "$lib_search_path " | sed -e 's% $path % %g'`
deplibs=`echo "$deplibs " | sed -e 's% -L$path % %g'`
dependency_libs=`echo "$dependency_libs " | sed -e 's% -L$path % %g'`
done
if test -n "$xrpath"; then
# If the user specified any rpath flags, then add them.
temp_xrpath=
for libdir in $xrpath; do
temp_xrpath="$temp_xrpath -R$libdir"
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
esac
done
if test $hardcode_into_libs != yes || test $build_old_libs = yes; then
dependency_libs="$temp_xrpath $dependency_libs"
fi
fi
# Make sure dlfiles contains only unique files that won't be dlpreopened
old_dlfiles="$dlfiles"
dlfiles=
for lib in $old_dlfiles; do
case " $dlprefiles $dlfiles " in
*" $lib "*) ;;
*) dlfiles="$dlfiles $lib" ;;
esac
done
# Make sure dlprefiles contains only unique files
old_dlprefiles="$dlprefiles"
dlprefiles=
for lib in $old_dlprefiles; do
case "$dlprefiles " in
*" $lib "*) ;;
*) dlprefiles="$dlprefiles $lib" ;;
esac
done
if test "$build_libtool_libs" = yes; then
if test -n "$rpath"; then
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*)
# these systems don't actually have a c library (as such)!
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C library is in the System framework
deplibs="$deplibs -framework System"
;;
*-*-netbsd*)
# Don't link with libc until the a.out ld.so is fixed.
;;
*)
# Add libc to deplibs on all other systems if necessary.
if test $build_libtool_need_lc = "yes"; then
deplibs="$deplibs -lc"
fi
;;
esac
fi
# Transform deplibs into only deplibs that can be linked in shared.
name_save=$name
libname_save=$libname
release_save=$release
versuffix_save=$versuffix
major_save=$major
# I'm not sure if I'm treating the release correctly. I think
# release should show up in the -l (ie -lgmp5) so we don't want to
# add it in twice. Is that correct?
release=""
versuffix=""
major=""
newdeplibs=
droppeddeps=no
case $deplibs_check_method in
pass_all)
# Don't check for shared/static. Everything works.
# This might be a little naive. We might want to check
# whether the library exists or not. But this is on
# osf3 & osf4 and I'm not really sure... Just
# implementing what was already the behaviour.
newdeplibs=$deplibs
;;
test_compile)
# This code stresses the "libraries are programs" paradigm to its
# limits. Maybe even breaks it. We compile a program, linking it
# against the deplibs as a proxy for the library. Then we can check
# whether they linked in statically or dynamically with ldd.
$rm conftest.c
cat > conftest.c <<EOF
int main() { return 0; }
EOF
$rm conftest
$CC -o conftest conftest.c $deplibs
if test $? -eq 0 ; then
ldd_output=`ldd conftest`
for i in $deplibs; do
name="`expr $i : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test -n "$name" && test "$name" != "0"; then
libname=`eval \\$echo \"$libname_spec\"`
deplib_matches=`eval \\$echo \"$library_names_spec\"`
set dummy $deplib_matches
deplib_match=$2
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
newdeplibs="$newdeplibs $i"
else
droppeddeps=yes
echo
echo "*** Warning: This library needs some functionality provided by $i."
echo "*** I have the capability to make that library automatically link in when"
echo "*** you link to this library. But I can only do this if you have a"
echo "*** shared version of the library, which you do not appear to have."
fi
else
newdeplibs="$newdeplibs $i"
fi
done
else
# Error occured in the first compile. Let's try to salvage the situation:
# Compile a seperate program for each library.
for i in $deplibs; do
name="`expr $i : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test -n "$name" && test "$name" != "0"; then
$rm conftest
$CC -o conftest conftest.c $i
# Did it work?
if test $? -eq 0 ; then
ldd_output=`ldd conftest`
libname=`eval \\$echo \"$libname_spec\"`
deplib_matches=`eval \\$echo \"$library_names_spec\"`
set dummy $deplib_matches
deplib_match=$2
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
newdeplibs="$newdeplibs $i"
else
droppeddeps=yes
echo
echo "*** Warning: This library needs some functionality provided by $i."
echo "*** I have the capability to make that library automatically link in when"
echo "*** you link to this library. But I can only do this if you have a"
echo "*** shared version of the library, which you do not appear to have."
fi
else
droppeddeps=yes
echo
echo "*** Warning! Library $i is needed by this library but I was not able to"
echo "*** make it link in! You will probably need to install it or some"
echo "*** library that it depends on before this library will be fully"
echo "*** functional. Installing it before continuing would be even better."
fi
else
newdeplibs="$newdeplibs $i"
fi
done
fi
;;
file_magic*)
set dummy $deplibs_check_method
file_magic_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
for a_deplib in $deplibs; do
name="`expr $a_deplib : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test -n "$name" && test "$name" != "0"; then
libname=`eval \\$echo \"$libname_spec\"`
for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
# Follow soft links.
if ls -lLd "$potent_lib" 2>/dev/null \
| grep " -> " >/dev/null; then
continue
fi
# The statement above tries to avoid entering an
# endless loop below, in case of cyclic links.
# We might still enter an endless loop, since a link
# loop can be closed while we follow links,
# but so what?
potlib="$potent_lib"
while test -h "$potlib" 2>/dev/null; do
potliblink=`ls -ld $potlib | sed 's/.* -> //'`
case $potliblink in
[\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
*) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";;
esac
done
if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \
| sed 10q \
| egrep "$file_magic_regex" > /dev/null; then
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
break 2
fi
done
done
if test -n "$a_deplib" ; then
droppeddeps=yes
echo
echo "*** Warning: This library needs some functionality provided by $a_deplib."
echo "*** I have the capability to make that library automatically link in when"
echo "*** you link to this library. But I can only do this if you have a"
echo "*** shared version of the library, which you do not appear to have."
fi
else
# Add a -L argument.
newdeplibs="$newdeplibs $a_deplib"
fi
done # Gone through all deplibs.
;;
match_pattern*)
set dummy $deplibs_check_method
match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
for a_deplib in $deplibs; do
name="`expr $a_deplib : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test -n "$name" && test "$name" != "0"; then
libname=`eval \\$echo \"$libname_spec\"`
for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
if eval echo \"$potent_lib\" 2>/dev/null \
| sed 10q \
| egrep "$match_pattern_regex" > /dev/null; then
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
break 2
fi
done
done
if test -n "$a_deplib" ; then
droppeddeps=yes
echo
echo "*** Warning: This library needs some functionality provided by $a_deplib."
echo "*** I have the capability to make that library automatically link in when"
echo "*** you link to this library. But I can only do this if you have a"
echo "*** shared version of the library, which you do not appear to have."
fi
else
# Add a -L argument.
newdeplibs="$newdeplibs $a_deplib"
fi
done # Gone through all deplibs.
;;
none | unknown | *)
newdeplibs=""
if $echo "X $deplibs" | $Xsed -e 's/ -lc$//' \
-e 's/ -[LR][^ ]*//g' -e 's/[ ]//g' |
grep . >/dev/null; then
echo
if test "X$deplibs_check_method" = "Xnone"; then
echo "*** Warning: inter-library dependencies are not supported in this platform."
else
echo "*** Warning: inter-library dependencies are not known to be supported."
fi
echo "*** All declared inter-library dependencies are being dropped."
droppeddeps=yes
fi
;;
esac
versuffix=$versuffix_save
major=$major_save
release=$release_save
libname=$libname_save
name=$name_save
case $host in
*-*-rhapsody* | *-*-darwin1.[012])
# On Rhapsody replace the C library is the System framework
newdeplibs=`$echo "X $newdeplibs" | $Xsed -e 's/ -lc / -framework System /'`
;;
esac
if test "$droppeddeps" = yes; then
if test "$module" = yes; then
echo
echo "*** Warning: libtool could not satisfy all declared inter-library"
echo "*** dependencies of module $libname. Therefore, libtool will create"
echo "*** a static module, that should work as long as the dlopening"
echo "*** application is linked with the -dlopen flag."
if test -z "$global_symbol_pipe"; then
echo
echo "*** However, this would only work if libtool was able to extract symbol"
echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
echo "*** not find such a program. So, this module is probably useless."
echo "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
else
echo "*** The inter-library dependencies that have been dropped here will be"
echo "*** automatically added whenever a program is linked with this library"
echo "*** or is declared to -dlopen it."
if test $allow_undefined = no; then
echo
echo "*** Since this library must not contain undefined symbols,"
echo "*** because either the platform does not support them or"
echo "*** it was explicitly requested with -no-undefined,"
echo "*** libtool will only create a static version of it."
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
fi
fi
fi
# Done checking deplibs!
deplibs=$newdeplibs
fi
# All the library-specific variables (install_libdir is set above).
library_names=
old_library=
dlname=
# Test again, we may have decided not to build it any more
if test "$build_libtool_libs" = yes; then
if test $hardcode_into_libs = yes; then
# Hardcode the library paths
hardcode_libdirs=
dep_rpath=
rpath="$finalize_rpath"
test "$mode" != relink && rpath="$compile_rpath$rpath"
for libdir in $rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
dep_rpath="$dep_rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval dep_rpath=\"$hardcode_libdir_flag_spec\"
fi
if test -n "$runpath_var" && test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
rpath="$rpath$dir:"
done
eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
fi
test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
fi
shlibpath="$finalize_shlibpath"
test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
if test -n "$shlibpath"; then
eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
fi
# Get the real and link names of the library.
eval library_names=\"$library_names_spec\"
set dummy $library_names
realname="$2"
shift; shift
if test -n "$soname_spec"; then
eval soname=\"$soname_spec\"
else
soname="$realname"
fi
test -z "$dlname" && dlname=$soname
lib="$output_objdir/$realname"
for link
do
linknames="$linknames $link"
done
# Ensure that we have .o objects for linkers which dislike .lo
# (e.g. aix) in case we are running --disable-static
for obj in $libobjs; do
xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$obj"; then
xdir="."
else
xdir="$xdir"
fi
baseobj=`$echo "X$obj" | $Xsed -e 's%^.*/%%'`
oldobj=`$echo "X$baseobj" | $Xsed -e "$lo2o"`
if test ! -f $xdir/$oldobj; then
$show "(cd $xdir && ${LN_S} $baseobj $oldobj)"
$run eval '(cd $xdir && ${LN_S} $baseobj $oldobj)' || exit $?
fi
done
# Use standard objects if they are pic
test -z "$pic_flag" && libobjs=`$echo "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
# Prepare the list of exported symbols
if test -z "$export_symbols"; then
if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then
$show "generating symbol list for \`$libname.la'"
export_symbols="$output_objdir/$libname.exp"
$run $rm $export_symbols
eval cmds=\"$export_symbols_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
if test -n "$export_symbols_regex"; then
$show "egrep -e \"$export_symbols_regex\" \"$export_symbols\" > \"${export_symbols}T\""
$run eval 'egrep -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
$show "$mv \"${export_symbols}T\" \"$export_symbols\""
$run eval '$mv "${export_symbols}T" "$export_symbols"'
fi
fi
fi
if test -n "$export_symbols" && test -n "$include_expsyms"; then
$run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"'
fi
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
else
gentop="$output_objdir/${outputname}x"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "mkdir $gentop"
$run mkdir "$gentop"
status=$?
if test $status -ne 0 && test ! -d "$gentop"; then
exit $status
fi
generated="$generated $gentop"
for xlib in $convenience; do
# Extract the objects.
case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
xdir="$gentop/$xlib"
$show "${rm}r $xdir"
$run ${rm}r "$xdir"
$show "mkdir $xdir"
$run mkdir "$xdir"
status=$?
if test $status -ne 0 && test ! -d "$xdir"; then
exit $status
fi
$show "(cd $xdir && $AR x $xabs)"
$run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
libobjs="$libobjs "`find $xdir -name \*.o -print -o -name \*.lo -print | $NL2SP`
done
fi
fi
if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
eval flag=\"$thread_safe_flag_spec\"
linker_flags="$linker_flags $flag"
fi
# Make a backup of the uninstalled library when relinking
if test "$mode" = relink; then
$run eval '(cd $output_objdir && $rm ${realname}U && $mv $realname ${realname}U)' || exit $?
fi
# Do each of the archive commands.
if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
eval cmds=\"$archive_expsym_cmds\"
else
eval cmds=\"$archive_cmds\"
fi
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
# Restore the uninstalled library and exit
if test "$mode" = relink; then
$run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $?
exit 0
fi
# Create links to the real library.
for linkname in $linknames; do
if test "$realname" != "$linkname"; then
$show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)"
$run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $?
fi
done
# If -module or -export-dynamic was specified, set the dlname.
if test "$module" = yes || test "$export_dynamic" = yes; then
# On all known operating systems, these are identical.
dlname="$soname"
fi
fi
;;
obj)
if test -n "$deplibs"; then
$echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2
fi
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2
fi
if test -n "$rpath"; then
$echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2
fi
if test -n "$xrpath"; then
$echo "$modename: warning: \`-R' is ignored for objects" 1>&2
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for objects" 1>&2
fi
case $output in
*.lo)
if test -n "$objs$old_deplibs"; then
$echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2
exit 1
fi
libobj="$output"
obj=`$echo "X$output" | $Xsed -e "$lo2o"`
;;
*)
libobj=
obj="$output"
;;
esac
# Delete the old objects.
$run $rm $obj $libobj
# Objects from convenience libraries. This assumes
# single-version convenience libraries. Whenever we create
# different ones for PIC/non-PIC, this we'll have to duplicate
# the extraction.
reload_conv_objs=
gentop=
# reload_cmds runs $LD directly, so let us get rid of
# -Wl from whole_archive_flag_spec
wl=
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\"
else
gentop="$output_objdir/${obj}x"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "mkdir $gentop"
$run mkdir "$gentop"
status=$?
if test $status -ne 0 && test ! -d "$gentop"; then
exit $status
fi
generated="$generated $gentop"
for xlib in $convenience; do
# Extract the objects.
case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
xdir="$gentop/$xlib"
$show "${rm}r $xdir"
$run ${rm}r "$xdir"
$show "mkdir $xdir"
$run mkdir "$xdir"
status=$?
if test $status -ne 0 && test ! -d "$xdir"; then
exit $status
fi
$show "(cd $xdir && $AR x $xabs)"
$run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
reload_conv_objs="$reload_objs "`find $xdir -name \*.o -print -o -name \*.lo -print | $NL2SP`
done
fi
fi
# Create the old-style object.
reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
output="$obj"
eval cmds=\"$reload_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
# Exit if we aren't doing a library object file.
if test -z "$libobj"; then
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
exit 0
fi
if test "$build_libtool_libs" != yes; then
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
# Create an invalid libtool object if no PIC, so that we don't
# accidentally link it into a program.
$show "echo timestamp > $libobj"
$run eval "echo timestamp > $libobj" || exit $?
exit 0
fi
if test -n "$pic_flag" || test "$pic_mode" != default; then
# Only do commands if we really have different PIC objects.
reload_objs="$libobjs $reload_conv_objs"
output="$libobj"
eval cmds=\"$reload_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
else
# Just create a symlink.
$show $rm $libobj
$run $rm $libobj
xdir=`$echo "X$libobj" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$libobj"; then
xdir="."
else
xdir="$xdir"
fi
baseobj=`$echo "X$libobj" | $Xsed -e 's%^.*/%%'`
oldobj=`$echo "X$baseobj" | $Xsed -e "$lo2o"`
$show "(cd $xdir && $LN_S $oldobj $baseobj)"
$run eval '(cd $xdir && $LN_S $oldobj $baseobj)' || exit $?
fi
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
exit 0
;;
prog)
case $host in
*cygwin*) output=`echo $output | sed -e 's,.exe$,,;s,$,.exe,'` ;;
esac
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for programs" 1>&2
fi
if test "$preload" = yes; then
if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown &&
test "$dlopen_self_static" = unknown; then
$echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support."
fi
fi
case $host in
*-*-rhapsody* | *-*-darwin1.[012])
# On Rhapsody replace the C library is the System framework
compile_deplibs=`$echo "X $compile_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
finalize_deplibs=`$echo "X $finalize_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
;;
esac
compile_command="$compile_command $compile_deplibs"
finalize_command="$finalize_command $finalize_deplibs"
if test -n "$rpath$xrpath"; then
# If the user specified any rpath flags, then add them.
for libdir in $rpath $xrpath; do
# This is the magic to use -rpath.
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
esac
done
fi
# Now hardcode the library paths
rpath=
hardcode_libdirs=
for libdir in $compile_rpath $finalize_rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
rpath="$rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
case :$dllsearchpath: in
*":$libdir:"*) ;;
*) dllsearchpath="$dllsearchpath:$libdir";;
esac
;;
esac
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval rpath=\" $hardcode_libdir_flag_spec\"
fi
compile_rpath="$rpath"
rpath=
hardcode_libdirs=
for libdir in $finalize_rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
rpath="$rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$finalize_perm_rpath " in
*" $libdir "*) ;;
*) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;;
esac
fi
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval rpath=\" $hardcode_libdir_flag_spec\"
fi
finalize_rpath="$rpath"
if test -n "$libobjs" && test "$build_old_libs" = yes; then
# Transform all the library objects into standard objects.
compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
fi
dlsyms=
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
if test -n "$NM" && test -n "$global_symbol_pipe"; then
dlsyms="${outputname}S.c"
else
$echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2
fi
fi
if test -n "$dlsyms"; then
case $dlsyms in
"") ;;
*.c)
# Discover the nlist of each of the dlfiles.
nlist="$output_objdir/${outputname}.nm"
$show "$rm $nlist ${nlist}S ${nlist}T"
$run $rm "$nlist" "${nlist}S" "${nlist}T"
# Parse the name list into a source file.
$show "creating $output_objdir/$dlsyms"
test -z "$run" && $echo > "$output_objdir/$dlsyms" "\
/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */
/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */
#ifdef __cplusplus
extern \"C\" {
#endif
/* Prevent the only kind of declaration conflicts we can make. */
#define lt_preloaded_symbols some_other_symbol
/* External symbol declarations for the compiler. */\
"
if test "$dlself" = yes; then
$show "generating symbol list for \`$output'"
test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist"
# Add our own program objects to the symbol list.
progfiles=`$echo "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
for arg in $progfiles; do
$show "extracting global C symbols from \`$arg'"
$run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
done
if test -n "$exclude_expsyms"; then
$run eval 'egrep -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
$run eval '$mv "$nlist"T "$nlist"'
fi
if test -n "$export_symbols_regex"; then
$run eval 'egrep -e "$export_symbols_regex" "$nlist" > "$nlist"T'
$run eval '$mv "$nlist"T "$nlist"'
fi
# Prepare the list of exported symbols
if test -z "$export_symbols"; then
export_symbols="$output_objdir/$output.exp"
$run $rm $export_symbols
$run eval "sed -n -e '/^: @PROGRAM@$/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
else
$run eval "sed -e 's/\([][.*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$output.exp"'
$run eval 'grep -f "$output_objdir/$output.exp" < "$nlist" > "$nlist"T'
$run eval 'mv "$nlist"T "$nlist"'
fi
fi
for arg in $dlprefiles; do
$show "extracting global C symbols from \`$arg'"
name=`echo "$arg" | sed -e 's%^.*/%%'`
$run eval 'echo ": $name " >> "$nlist"'
$run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
done
if test -z "$run"; then
# Make sure we have at least an empty file.
test -f "$nlist" || : > "$nlist"
if test -n "$exclude_expsyms"; then
egrep -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
$mv "$nlist"T "$nlist"
fi
# Try sorting and uniquifying the output.
if grep -v "^: " < "$nlist" | sort +2 | uniq > "$nlist"S; then
:
else
grep -v "^: " < "$nlist" > "$nlist"S
fi
if test -f "$nlist"S; then
eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"'
else
echo '/* NONE */' >> "$output_objdir/$dlsyms"
fi
$echo >> "$output_objdir/$dlsyms" "\
#undef lt_preloaded_symbols
#if defined (__STDC__) && __STDC__
# define lt_ptr_t void *
#else
# define lt_ptr_t char *
# define const
#endif
/* The mapping between symbol names and symbols. */
const struct {
const char *name;
lt_ptr_t address;
}
lt_preloaded_symbols[] =
{\
"
sed -n -e 's/^: \([^ ]*\) $/ {\"\1\", (lt_ptr_t) 0},/p' \
-e 's/^. \([^ ]*\) \([^ ]*\)$/ {"\2", (lt_ptr_t) \&\2},/p' \
< "$nlist" >> "$output_objdir/$dlsyms"
$echo >> "$output_objdir/$dlsyms" "\
{0, (lt_ptr_t) 0}
};
/* This works around a problem in FreeBSD linker */
#ifdef FREEBSD_WORKAROUND
static const void *lt_preloaded_setup() {
return lt_preloaded_symbols;
}
#endif
#ifdef __cplusplus
}
#endif\
"
fi
pic_flag_for_symtable=
case $host in
# compiling the symbol table file with pic_flag works around
# a FreeBSD bug that causes programs to crash when -lm is
# linked before any other PIC object. But we must not use
# pic_flag when linking with -static. The problem exists in
# FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
*-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
case "$compile_command " in
*" -static "*) ;;
*) pic_flag_for_symtable=" $pic_flag -DPIC -DFREEBSD_WORKAROUND";;
esac;;
*-*-hpux*)
case "$compile_command " in
*" -static "*) ;;
*) pic_flag_for_symtable=" $pic_flag -DPIC";;
esac
esac
# Now compile the dynamic symbol file.
$show "(cd $output_objdir && $CC -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")"
$run eval '(cd $output_objdir && $CC -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $?
# Clean up the generated files.
$show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T"
$run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T"
# Transform the symbol file into the correct name.
compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
;;
*)
$echo "$modename: unknown suffix for \`$dlsyms'" 1>&2
exit 1
;;
esac
else
# We keep going just in case the user didn't refer to
# lt_preloaded_symbols. The linker will fail if global_symbol_pipe
# really was required.
# Nullify the symbol file.
compile_command=`$echo "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"`
finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"`
fi
if test $need_relink = no || test "$build_libtool_libs" != yes; then
# Replace the output file specification.
compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
link_command="$compile_command$compile_rpath"
# We have no uninstalled library dependencies, so finalize right now.
$show "$link_command"
$run eval "$link_command"
status=$?
# Delete the generated files.
if test -n "$dlsyms"; then
$show "$rm $output_objdir/${outputname}S.${objext}"
$run $rm "$output_objdir/${outputname}S.${objext}"
fi
exit $status
fi
if test -n "$shlibpath_var"; then
# We should set the shlibpath_var
rpath=
for dir in $temp_rpath; do
case $dir in
[\\/]* | [A-Za-z]:[\\/]*)
# Absolute path.
rpath="$rpath$dir:"
;;
*)
# Relative path: add a thisdir entry.
rpath="$rpath\$thisdir/$dir:"
;;
esac
done
temp_rpath="$rpath"
fi
if test -n "$compile_shlibpath$finalize_shlibpath"; then
compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
fi
if test -n "$finalize_shlibpath"; then
finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
fi
compile_var=
finalize_var=
if test -n "$runpath_var"; then
if test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
rpath="$rpath$dir:"
done
compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
if test -n "$finalize_perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $finalize_perm_rpath; do
rpath="$rpath$dir:"
done
finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
fi
if test "$no_install" = yes; then
# We don't need to create a wrapper script.
link_command="$compile_var$compile_command$compile_rpath"
# Replace the output file specification.
link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
# Delete the old output file.
$run $rm $output
# Link the executable and exit
$show "$link_command"
$run eval "$link_command" || exit $?
exit 0
fi
if test "$hardcode_action" = relink; then
# Fast installation is not supported
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
$echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2
$echo "$modename: \`$output' will be relinked during installation" 1>&2
else
if test "$fast_install" != no; then
link_command="$finalize_var$compile_command$finalize_rpath"
if test "$fast_install" = yes; then
relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'`
else
# fast_install is set to needless
relink_command=
fi
else
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
fi
fi
# Replace the output file specification.
link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
# Delete the old output files.
$run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname
$show "$link_command"
$run eval "$link_command" || exit $?
# Now create the wrapper script.
$show "creating $output"
# Quote the relink command for shipping.
if test -n "$relink_command"; then
# Preserve any variables that may affect compiler behavior
for var in $variables_saved_for_relink; do
if eval test -z \"\${$var+set}\"; then
relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
elif eval var_value=\$$var; test -z "$var_value"; then
relink_command="$var=; export $var; $relink_command"
else
var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
relink_command="$var=\"$var_value\"; export $var; $relink_command"
fi
done
relink_command="cd `pwd`; $relink_command"
relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
fi
# Quote $echo for shipping.
if test "X$echo" = "X$SHELL $0 --fallback-echo"; then
case $0 in
[\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $0 --fallback-echo";;
*) qecho="$SHELL `pwd`/$0 --fallback-echo";;
esac
qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"`
else
qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"`
fi
# Only actually do things if our run command is non-null.
if test -z "$run"; then
# win32 will think the script is a binary if it has
# a .exe suffix, so we strip it off here.
case $output in
*.exe) output=`echo $output|sed 's,.exe$,,'` ;;
esac
# test for cygwin because mv fails w/o .exe extensions
case $host in
*cygwin*) exeext=.exe ;;
*) exeext= ;;
esac
$rm $output
trap "$rm $output; exit 1" 1 2 15
$echo > $output "\
#! $SHELL
# $output - temporary wrapper script for $objdir/$outputname
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# The $output program cannot be directly executed until all the libtool
# libraries that it depends on are installed.
#
# This wrapper script should never be moved out of the build directory.
# If it is, it will not operate correctly.
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
Xsed='sed -e 1s/^X//'
sed_quote_subst='$sed_quote_subst'
# The HP-UX ksh and POSIX shell print the target directory to stdout
# if CDPATH is set.
if test \"\${CDPATH+set}\" = set; then CDPATH=:; export CDPATH; fi
relink_command=\"$relink_command\"
# This environment variable determines our operation mode.
if test \"\$libtool_install_magic\" = \"$magic\"; then
# install mode needs the following variable:
notinst_deplibs='$notinst_deplibs'
else
# When we are sourced in execute mode, \$file and \$echo are already set.
if test \"\$libtool_execute_magic\" != \"$magic\"; then
echo=\"$qecho\"
file=\"\$0\"
# Make sure echo works.
if test \"X\$1\" = X--no-reexec; then
# Discard the --no-reexec flag, and continue.
shift
elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then
# Yippee, \$echo works!
:
else
# Restart under the correct shell, and then maybe \$echo will work.
exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"}
fi
fi\
"
$echo >> $output "\
# Find the directory that this script lives in.
thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\`
test \"x\$thisdir\" = \"x\$file\" && thisdir=.
# Follow symbolic links until we get to the real thisdir.
file=\`ls -ld \"\$file\" | sed -n 's/.*-> //p'\`
while test -n \"\$file\"; do
destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\`
# If there was a directory component, then change thisdir.
if test \"x\$destdir\" != \"x\$file\"; then
case \"\$destdir\" in
[\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
*) thisdir=\"\$thisdir/\$destdir\" ;;
esac
fi
file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\`
file=\`ls -ld \"\$thisdir/\$file\" | sed -n 's/.*-> //p'\`
done
# Try to get the absolute directory name.
absdir=\`cd \"\$thisdir\" && pwd\`
test -n \"\$absdir\" && thisdir=\"\$absdir\"
"
if test "$fast_install" = yes; then
echo >> $output "\
program=lt-'$outputname'$exeext
progdir=\"\$thisdir/$objdir\"
if test ! -f \"\$progdir/\$program\" || \\
{ file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | sed 1q\`; \\
test \"X\$file\" != \"X\$progdir/\$program\"; }; then
file=\"\$\$-\$program\"
if test ! -d \"\$progdir\"; then
$mkdir \"\$progdir\"
else
$rm \"\$progdir/\$file\"
fi"
echo >> $output "\
# relink executable if necessary
if test -n \"\$relink_command\"; then
if (eval \$relink_command); then :
else
$rm \"\$progdir/\$file\"
exit 1
fi
fi
$mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
{ $rm \"\$progdir/\$program\";
$mv \"\$progdir/\$file\" \"\$progdir/\$program\"; }
$rm \"\$progdir/\$file\"
fi"
else
echo >> $output "\
program='$outputname'
progdir=\"\$thisdir/$objdir\"
"
fi
echo >> $output "\
if test -f \"\$progdir/\$program\"; then"
# Export our shlibpath_var if we have one.
if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
$echo >> $output "\
# Add our own library path to $shlibpath_var
$shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
# Some systems cannot cope with colon-terminated $shlibpath_var
# The second colon is a workaround for a bug in BeOS R4 sed
$shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\`
export $shlibpath_var
"
fi
# fixup the dll searchpath if we need to.
if test -n "$dllsearchpath"; then
$echo >> $output "\
# Add the dll search path components to the executable PATH
PATH=$dllsearchpath:\$PATH
"
fi
$echo >> $output "\
if test \"\$libtool_execute_magic\" != \"$magic\"; then
# Run the actual program with our arguments.
"
case $host in
# win32 systems need to use the prog path for dll
# lookup to work
*-*-cygwin* | *-*-pw32*)
$echo >> $output "\
exec \$progdir/\$program \${1+\"\$@\"}
"
;;
# Backslashes separate directories on plain windows
*-*-mingw | *-*-os2*)
$echo >> $output "\
exec \$progdir\\\\\$program \${1+\"\$@\"}
"
;;
*)
$echo >> $output "\
# Export the path to the program.
PATH=\"\$progdir:\$PATH\"
export PATH
exec \$program \${1+\"\$@\"}
"
;;
esac
$echo >> $output "\
\$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\"
exit 1
fi
else
# The program doesn't exist.
\$echo \"\$0: error: \$progdir/\$program does not exist\" 1>&2
\$echo \"This script is just a wrapper for \$program.\" 1>&2
echo \"See the $PACKAGE documentation for more information.\" 1>&2
exit 1
fi
fi\
"
chmod +x $output
fi
exit 0
;;
esac
# See if we need to build an old-fashioned archive.
for oldlib in $oldlibs; do
if test "$build_libtool_libs" = convenience; then
oldobjs="$libobjs_save"
addlibs="$convenience"
build_libtool_libs=no
else
if test "$build_libtool_libs" = module; then
oldobjs="$libobjs_save"
build_libtool_libs=no
else
oldobjs="$objs$old_deplibs "`$echo "X$libobjs_save" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`
fi
addlibs="$old_convenience"
fi
if test -n "$addlibs"; then
gentop="$output_objdir/${outputname}x"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "mkdir $gentop"
$run mkdir "$gentop"
status=$?
if test $status -ne 0 && test ! -d "$gentop"; then
exit $status
fi
generated="$generated $gentop"
# Add in members from convenience archives.
for xlib in $addlibs; do
# Extract the objects.
case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
xdir="$gentop/$xlib"
$show "${rm}r $xdir"
$run ${rm}r "$xdir"
$show "mkdir $xdir"
$run mkdir "$xdir"
status=$?
if test $status -ne 0 && test ! -d "$xdir"; then
exit $status
fi
$show "(cd $xdir && $AR x $xabs)"
$run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
oldobjs="$oldobjs "`find $xdir -name \*.${objext} -print -o -name \*.lo -print | $NL2SP`
done
fi
# Do each command in the archive commands.
if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
eval cmds=\"$old_archive_from_new_cmds\"
else
# Ensure that we have .o objects in place in case we decided
# not to build a shared library, and have fallen back to building
# static libs even though --disable-static was passed!
for oldobj in $oldobjs; do
if test ! -f $oldobj; then
xdir=`$echo "X$oldobj" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$oldobj"; then
xdir="."
else
xdir="$xdir"
fi
baseobj=`$echo "X$oldobj" | $Xsed -e 's%^.*/%%'`
obj=`$echo "X$baseobj" | $Xsed -e "$o2lo"`
$show "(cd $xdir && ${LN_S} $obj $baseobj)"
$run eval '(cd $xdir && ${LN_S} $obj $baseobj)' || exit $?
fi
done
eval cmds=\"$old_archive_cmds\"
fi
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
done
if test -n "$generated"; then
$show "${rm}r$generated"
$run ${rm}r$generated
fi
# Now create the libtool archive.
case $output in
*.la)
old_library=
test "$build_old_libs" = yes && old_library="$libname.$libext"
$show "creating $output"
# Preserve any variables that may affect compiler behavior
for var in $variables_saved_for_relink; do
if eval test -z \"\${$var+set}\"; then
relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
elif eval var_value=\$$var; test -z "$var_value"; then
relink_command="$var=; export $var; $relink_command"
else
var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
relink_command="$var=\"$var_value\"; export $var; $relink_command"
fi
done
# Quote the link command for shipping.
relink_command="cd `pwd`; $SHELL $0 --mode=relink $libtool_args"
relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
# Only create the output if not a dry run.
if test -z "$run"; then
for installed in no yes; do
if test "$installed" = yes; then
if test -z "$install_libdir"; then
break
fi
output="$output_objdir/$outputname"i
# Replace all uninstalled libtool libraries with the installed ones
newdependency_libs=
for deplib in $dependency_libs; do
case $deplib in
*.la)
name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'`
eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
if test -z "$libdir"; then
$echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
exit 1
fi
newdependency_libs="$newdependency_libs $libdir/$name"
;;
*) newdependency_libs="$newdependency_libs $deplib" ;;
esac
done
dependency_libs="$newdependency_libs"
newdlfiles=
for lib in $dlfiles; do
name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
if test -z "$libdir"; then
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit 1
fi
newdlfiles="$newdlfiles $libdir/$name"
done
dlfiles="$newdlfiles"
newdlprefiles=
for lib in $dlprefiles; do
name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
eval libdir=`sed -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
if test -z "$libdir"; then
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit 1
fi
newdlprefiles="$newdlprefiles $libdir/$name"
done
dlprefiles="$newdlprefiles"
fi
$rm $output
# place dlname in correct position for cygwin
tdlname=$dlname
case $host,$output,$installed,$module,$dlname in
*cygwin*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;;
esac
$echo > $output "\
# $outputname - a libtool library file
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# The name that we can dlopen(3).
dlname='$tdlname'
# Names of this library.
library_names='$library_names'
# The name of the static archive.
old_library='$old_library'
# Libraries that this one depends upon.
dependency_libs='$dependency_libs'
# Version information for $libname.
current=$current
age=$age
revision=$revision
# Is this an already installed library?
installed=$installed
# Files to dlopen/dlpreopen
dlopen='$dlfiles'
dlpreopen='$dlprefiles'
# Directory that this library needs to be installed in:
libdir='$install_libdir'"
if test "$installed" = no && test $need_relink = yes; then
$echo >> $output "\
relink_command=\"$relink_command\""
fi
done
fi
# Do a symbolic link so that the libtool archive can be found in
# LD_LIBRARY_PATH before the program is installed.
$show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)"
$run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $?
;;
esac
exit 0
;;
# libtool install mode
install)
modename="$modename: install"
# There may be an optional sh(1) argument at the beginning of
# install_prog (especially on Windows NT).
if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
# Allow the use of GNU shtool's install command.
$echo "X$nonopt" | $Xsed | grep shtool > /dev/null; then
# Aesthetically quote it.
arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
esac
install_prog="$arg "
arg="$1"
shift
else
install_prog=
arg="$nonopt"
fi
# The real first argument should be the name of the installation program.
# Aesthetically quote it.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
esac
install_prog="$install_prog$arg"
# We need to accept at least all the BSD install flags.
dest=
files=
opts=
prev=
install_type=
isdir=no
stripme=
for arg
do
if test -n "$dest"; then
files="$files $dest"
dest="$arg"
continue
fi
case $arg in
-d) isdir=yes ;;
-f) prev="-f" ;;
-g) prev="-g" ;;
-m) prev="-m" ;;
-o) prev="-o" ;;
-s)
stripme=" -s"
continue
;;
-*) ;;
*)
# If the previous option needed an argument, then skip it.
if test -n "$prev"; then
prev=
else
dest="$arg"
continue
fi
;;
esac
# Aesthetically quote the argument.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
esac
install_prog="$install_prog $arg"
done
if test -z "$install_prog"; then
$echo "$modename: you must specify an install program" 1>&2
$echo "$help" 1>&2
exit 1
fi
if test -n "$prev"; then
$echo "$modename: the \`$prev' option requires an argument" 1>&2
$echo "$help" 1>&2
exit 1
fi
if test -z "$files"; then
if test -z "$dest"; then
$echo "$modename: no file or destination specified" 1>&2
else
$echo "$modename: you must specify a destination" 1>&2
fi
$echo "$help" 1>&2
exit 1
fi
# Strip any trailing slash from the destination.
dest=`$echo "X$dest" | $Xsed -e 's%/$%%'`
# Check to see that the destination is a directory.
test -d "$dest" && isdir=yes
if test "$isdir" = yes; then
destdir="$dest"
destname=
else
destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'`
test "X$destdir" = "X$dest" && destdir=.
destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'`
# Not a directory, so check to see that there is only one file specified.
set dummy $files
if test $# -gt 2; then
$echo "$modename: \`$dest' is not a directory" 1>&2
$echo "$help" 1>&2
exit 1
fi
fi
case $destdir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
for file in $files; do
case $file in
*.lo) ;;
*)
$echo "$modename: \`$destdir' must be an absolute directory name" 1>&2
$echo "$help" 1>&2
exit 1
;;
esac
done
;;
esac
# This variable tells wrapper scripts just to set variables rather
# than running their programs.
libtool_install_magic="$magic"
staticlibs=
future_libdirs=
current_libdirs=
for file in $files; do
# Do each installation.
case $file in
*.$libext)
# Do the static libraries later.
staticlibs="$staticlibs $file"
;;
*.la)
# Check to see that this really is a libtool archive.
if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$file' is not a valid libtool archive" 1>&2
$echo "$help" 1>&2
exit 1
fi
library_names=
old_library=
relink_command=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Add the libdir to current_libdirs if it is the destination.
if test "X$destdir" = "X$libdir"; then
case "$current_libdirs " in
*" $libdir "*) ;;
*) current_libdirs="$current_libdirs $libdir" ;;
esac
else
# Note the libdir as a future libdir.
case "$future_libdirs " in
*" $libdir "*) ;;
*) future_libdirs="$future_libdirs $libdir" ;;
esac
fi
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/
test "X$dir" = "X$file/" && dir=
dir="$dir$objdir"
if test -n "$relink_command"; then
$echo "$modename: warning: relinking \`$file'" 1>&2
$show "$relink_command"
if $run eval "$relink_command"; then :
else
$echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
continue
fi
fi
# See the names of the shared library.
set dummy $library_names
if test -n "$2"; then
realname="$2"
shift
shift
srcname="$realname"
test -n "$relink_command" && srcname="$realname"T
# Install the shared library and build the symlinks.
$show "$install_prog $dir/$srcname $destdir/$realname"
$run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $?
if test -n "$stripme" && test -n "$striplib"; then
$show "$striplib $destdir/$realname"
$run eval "$striplib $destdir/$realname" || exit $?
fi
if test $# -gt 0; then
# Delete the old symlinks, and create new ones.
for linkname
do
if test "$linkname" != "$realname"; then
$show "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
$run eval "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
fi
done
fi
# Do each command in the postinstall commands.
lib="$destdir/$realname"
eval cmds=\"$postinstall_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# Install the pseudo-library for information purposes.
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
instname="$dir/$name"i
$show "$install_prog $instname $destdir/$name"
$run eval "$install_prog $instname $destdir/$name" || exit $?
# Maybe install the static library, too.
test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library"
;;
*.lo)
# Install (i.e. copy) a libtool object.
# Figure out destination file name, if it wasn't already specified.
if test -n "$destname"; then
destfile="$destdir/$destname"
else
destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
destfile="$destdir/$destfile"
fi
# Deduce the name of the destination old-style object file.
case $destfile in
*.lo)
staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"`
;;
*.$objext)
staticdest="$destfile"
destfile=
;;
*)
$echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2
$echo "$help" 1>&2
exit 1
;;
esac
# Install the libtool object if requested.
if test -n "$destfile"; then
$show "$install_prog $file $destfile"
$run eval "$install_prog $file $destfile" || exit $?
fi
# Install the old object if enabled.
if test "$build_old_libs" = yes; then
# Deduce the name of the old-style object file.
staticobj=`$echo "X$file" | $Xsed -e "$lo2o"`
$show "$install_prog $staticobj $staticdest"
$run eval "$install_prog \$staticobj \$staticdest" || exit $?
fi
exit 0
;;
*)
# Figure out destination file name, if it wasn't already specified.
if test -n "$destname"; then
destfile="$destdir/$destname"
else
destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
destfile="$destdir/$destfile"
fi
# Do a test to see if this is really a libtool program.
if (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
notinst_deplibs=
relink_command=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Check the variables that should have been set.
if test -z "$notinst_deplibs"; then
$echo "$modename: invalid libtool wrapper script \`$file'" 1>&2
exit 1
fi
finalize=yes
for lib in $notinst_deplibs; do
# Check to see that each library is installed.
libdir=
if test -f "$lib"; then
# If there is no directory component, then add one.
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
fi
libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test
if test -n "$libdir" && test ! -f "$libfile"; then
$echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2
finalize=no
fi
done
relink_command=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
outputname=
if test "$fast_install" = no && test -n "$relink_command"; then
if test "$finalize" = yes && test -z "$run"; then
tmpdir="/tmp"
test -n "$TMPDIR" && tmpdir="$TMPDIR"
tmpdir="$tmpdir/libtool-$$"
if $mkdir -p "$tmpdir" && chmod 700 "$tmpdir"; then :
else
$echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2
continue
fi
file=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
outputname="$tmpdir/$file"
# Replace the output file specification.
relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'`
$show "$relink_command"
if $run eval "$relink_command"; then :
else
$echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
${rm}r "$tmpdir"
continue
fi
file="$outputname"
else
$echo "$modename: warning: cannot relink \`$file'" 1>&2
fi
else
# Install the binary that we compiled earlier.
file=`$echo "X$file" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"`
fi
fi
# remove .exe since cygwin /usr/bin/install will append another
# one anyways
case $install_prog,$host in
/usr/bin/install*,*cygwin*)
case $file:$destfile in
*.exe:*.exe)
# this is ok
;;
*.exe:*)
destfile=$destfile.exe
;;
*:*.exe)
destfile=`echo $destfile | sed -e 's,.exe$,,'`
;;
esac
;;
esac
$show "$install_prog$stripme $file $destfile"
$run eval "$install_prog\$stripme \$file \$destfile" || exit $?
test -n "$outputname" && ${rm}r "$tmpdir"
;;
esac
done
for file in $staticlibs; do
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
# Set up the ranlib parameters.
oldlib="$destdir/$name"
$show "$install_prog $file $oldlib"
$run eval "$install_prog \$file \$oldlib" || exit $?
if test -n "$stripme" && test -n "$striplib"; then
$show "$old_striplib $oldlib"
$run eval "$old_striplib $oldlib" || exit $?
fi
# Do each command in the postinstall commands.
eval cmds=\"$old_postinstall_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
done
if test -n "$future_libdirs"; then
$echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2
fi
if test -n "$current_libdirs"; then
# Maybe just do a dry run.
test -n "$run" && current_libdirs=" -n$current_libdirs"
exec $SHELL $0 --finish$current_libdirs
exit 1
fi
exit 0
;;
# libtool finish mode
finish)
modename="$modename: finish"
libdirs="$nonopt"
admincmds=
if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
for dir
do
libdirs="$libdirs $dir"
done
for libdir in $libdirs; do
if test -n "$finish_cmds"; then
# Do each command in the finish commands.
eval cmds=\"$finish_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || admincmds="$admincmds
$cmd"
done
IFS="$save_ifs"
fi
if test -n "$finish_eval"; then
# Do the single finish_eval.
eval cmds=\"$finish_eval\"
$run eval "$cmds" || admincmds="$admincmds
$cmds"
fi
done
fi
# Exit here if they wanted silent mode.
test "$show" = ":" && exit 0
echo "----------------------------------------------------------------------"
echo "Libraries have been installed in:"
for libdir in $libdirs; do
echo " $libdir"
done
echo
echo "If you ever happen to want to link against installed libraries"
echo "in a given directory, LIBDIR, you must either use libtool, and"
echo "specify the full pathname of the library, or use the \`-LLIBDIR'"
echo "flag during linking and do at least one of the following:"
if test -n "$shlibpath_var"; then
echo " - add LIBDIR to the \`$shlibpath_var' environment variable"
echo " during execution"
fi
if test -n "$runpath_var"; then
echo " - add LIBDIR to the \`$runpath_var' environment variable"
echo " during linking"
fi
if test -n "$hardcode_libdir_flag_spec"; then
libdir=LIBDIR
eval flag=\"$hardcode_libdir_flag_spec\"
echo " - use the \`$flag' linker flag"
fi
if test -n "$admincmds"; then
echo " - have your system administrator run these commands:$admincmds"
fi
if test -f /etc/ld.so.conf; then
echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
fi
echo
echo "See any operating system documentation about shared libraries for"
echo "more information, such as the ld(1) and ld.so(8) manual pages."
echo "----------------------------------------------------------------------"
exit 0
;;
# libtool execute mode
execute)
modename="$modename: execute"
# The first argument is the command name.
cmd="$nonopt"
if test -z "$cmd"; then
$echo "$modename: you must specify a COMMAND" 1>&2
$echo "$help"
exit 1
fi
# Handle -dlopen flags immediately.
for file in $execute_dlfiles; do
if test ! -f "$file"; then
$echo "$modename: \`$file' is not a file" 1>&2
$echo "$help" 1>&2
exit 1
fi
dir=
case $file in
*.la)
# Check to see that this really is a libtool archive.
if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
$echo "$help" 1>&2
exit 1
fi
# Read the libtool library.
dlname=
library_names=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Skip this library if it cannot be dlopened.
if test -z "$dlname"; then
# Warn if it was a shared library.
test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'"
continue
fi
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$file" && dir=.
if test -f "$dir/$objdir/$dlname"; then
dir="$dir/$objdir"
else
$echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2
exit 1
fi
;;
*.lo)
# Just add the directory containing the .lo file.
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$file" && dir=.
;;
*)
$echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2
continue
;;
esac
# Get the absolute pathname.
absdir=`cd "$dir" && pwd`
test -n "$absdir" && dir="$absdir"
# Now add the directory to shlibpath_var.
if eval "test -z \"\$$shlibpath_var\""; then
eval "$shlibpath_var=\"\$dir\""
else
eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
fi
done
# This variable tells wrapper scripts just to set shlibpath_var
# rather than running their programs.
libtool_execute_magic="$magic"
# Check if any of the arguments is a wrapper script.
args=
for file
do
case $file in
-*) ;;
*)
# Do a test to see if this is really a libtool program.
if (sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Transform arg to wrapped name.
file="$progdir/$program"
fi
;;
esac
# Quote arguments (to preserve shell metacharacters).
file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"`
args="$args \"$file\""
done
if test -z "$run"; then
if test -n "$shlibpath_var"; then
# Export the shlibpath_var.
eval "export $shlibpath_var"
fi
# Restore saved enviroment variables
if test "${save_LC_ALL+set}" = set; then
LC_ALL="$save_LC_ALL"; export LC_ALL
fi
if test "${save_LANG+set}" = set; then
LANG="$save_LANG"; export LANG
fi
# Now actually exec the command.
eval "exec \$cmd$args"
$echo "$modename: cannot exec \$cmd$args"
exit 1
else
# Display what would be done.
if test -n "$shlibpath_var"; then
eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\""
$echo "export $shlibpath_var"
fi
$echo "$cmd$args"
exit 0
fi
;;
# libtool clean and uninstall mode
clean | uninstall)
modename="$modename: $mode"
rm="$nonopt"
files=
rmforce=
exit_status=0
# This variable tells wrapper scripts just to set variables rather
# than running their programs.
libtool_install_magic="$magic"
for arg
do
case $arg in
-f) rm="$rm $arg"; rmforce=yes ;;
-*) rm="$rm $arg" ;;
*) files="$files $arg" ;;
esac
done
if test -z "$rm"; then
$echo "$modename: you must specify an RM program" 1>&2
$echo "$help" 1>&2
exit 1
fi
rmdirs=
for file in $files; do
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
if test "X$dir" = "X$file"; then
dir=.
objdir="$objdir"
else
objdir="$dir/$objdir"
fi
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
test $mode = uninstall && objdir="$dir"
# Remember objdir for removal later, being careful to avoid duplicates
if test $mode = clean; then
case " $rmdirs " in
*" $objdir "*) ;;
*) rmdirs="$rmdirs $objdir" ;;
esac
fi
# Don't error if the file doesn't exist and rm -f was used.
if (test -L "$file") >/dev/null 2>&1 \
|| (test -h "$file") >/dev/null 2>&1 \
|| test -f "$file"; then
:
elif test -d "$file"; then
exit_status=1
continue
elif test "$rmforce" = yes; then
continue
fi
rmfiles="$file"
case $name in
*.la)
# Possibly a libtool archive, so verify it.
if (sed -e '2q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
. $dir/$name
# Delete the libtool libraries and symlinks.
for n in $library_names; do
rmfiles="$rmfiles $objdir/$n"
done
test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library"
test $mode = clean && rmfiles="$rmfiles $objdir/$name $objdir/${name}i"
if test $mode = uninstall; then
if test -n "$library_names"; then
# Do each command in the postuninstall commands.
eval cmds=\"$postuninstall_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd"
if test $? != 0 && test "$rmforce" != yes; then
exit_status=1
fi
done
IFS="$save_ifs"
fi
if test -n "$old_library"; then
# Do each command in the old_postuninstall commands.
eval cmds=\"$old_postuninstall_cmds\"
IFS="${IFS= }"; save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd"
if test $? != 0 && test "$rmforce" != yes; then
exit_status=1
fi
done
IFS="$save_ifs"
fi
# FIXME: should reinstall the best remaining shared library.
fi
fi
;;
*.lo)
if test "$build_old_libs" = yes; then
oldobj=`$echo "X$name" | $Xsed -e "$lo2o"`
rmfiles="$rmfiles $dir/$oldobj"
fi
;;
*)
# Do a test to see if this is a libtool program.
if test $mode = clean &&
(sed -e '4q' $file | egrep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
relink_command=
. $dir/$file
rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}"
if test "$fast_install" = yes && test -n "$relink_command"; then
rmfiles="$rmfiles $objdir/lt-$name"
fi
fi
;;
esac
$show "$rm $rmfiles"
$run $rm $rmfiles || exit_status=1
done
# Try to remove the ${objdir}s in the directories where we deleted files
for dir in $rmdirs; do
if test -d "$dir"; then
$show "rmdir $dir"
$run rmdir $dir >/dev/null 2>&1
fi
done
exit $exit_status
;;
"")
$echo "$modename: you must specify a MODE" 1>&2
$echo "$generic_help" 1>&2
exit 1
;;
esac
$echo "$modename: invalid operation mode \`$mode'" 1>&2
$echo "$generic_help" 1>&2
exit 1
fi # test -z "$show_help"
# We need to display help for each of the modes.
case $mode in
"") $echo \
"Usage: $modename [OPTION]... [MODE-ARG]...
Provide generalized library-building support services.
--config show all configuration variables
--debug enable verbose shell tracing
-n, --dry-run display commands without modifying any files
--features display basic configuration information and exit
--finish same as \`--mode=finish'
--help display this help message and exit
--mode=MODE use operation mode MODE [default=inferred from MODE-ARGS]
--quiet same as \`--silent'
--silent don't print informational messages
--version print version information
MODE must be one of the following:
clean remove files from the build directory
compile compile a source file into a libtool object
execute automatically set library path, then run a program
finish complete the installation of libtool libraries
install install libraries or executables
link create a library or an executable
uninstall remove libraries from an installed directory
MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for
a more detailed description of MODE."
exit 0
;;
clean)
$echo \
"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
Remove files from the build directory.
RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
to RM.
If FILE is a libtool library, object or program, all the files associated
with it are deleted. Otherwise, only FILE itself is deleted using RM."
;;
compile)
$echo \
"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
Compile a source file into a libtool library object.
This mode accepts the following additional options:
-o OUTPUT-FILE set the output file name to OUTPUT-FILE
-prefer-pic try to building PIC objects only
-prefer-non-pic try to building non-PIC objects only
-static always build a \`.o' file suitable for static linking
COMPILE-COMMAND is a command to be used in creating a \`standard' object file
from the given SOURCEFILE.
The output file name is determined by removing the directory component from
SOURCEFILE, then substituting the C source code suffix \`.c' with the
library object suffix, \`.lo'."
;;
execute)
$echo \
"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]...
Automatically set library path, then run a program.
This mode accepts the following additional options:
-dlopen FILE add the directory containing FILE to the library path
This mode sets the library path environment variable according to \`-dlopen'
flags.
If any of the ARGS are libtool executable wrappers, then they are translated
into their corresponding uninstalled binary, and any of their required library
directories are added to the library path.
Then, COMMAND is executed, with ARGS as arguments."
;;
finish)
$echo \
"Usage: $modename [OPTION]... --mode=finish [LIBDIR]...
Complete the installation of libtool libraries.
Each LIBDIR is a directory that contains libtool libraries.
The commands that this mode executes may require superuser privileges. Use
the \`--dry-run' option if you just want to see what would be executed."
;;
install)
$echo \
"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND...
Install executables or libraries.
INSTALL-COMMAND is the installation command. The first component should be
either the \`install' or \`cp' program.
The rest of the components are interpreted as arguments to that command (only
BSD-compatible install options are recognized)."
;;
link)
$echo \
"Usage: $modename [OPTION]... --mode=link LINK-COMMAND...
Link object files or libraries together to form another library, or to
create an executable program.
LINK-COMMAND is a command using the C compiler that you would use to create
a program from several object files.
The following components of LINK-COMMAND are treated specially:
-all-static do not do any dynamic linking at all
-avoid-version do not add a version suffix if possible
-dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime
-dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols
-export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
-export-symbols SYMFILE
try to export only the symbols listed in SYMFILE
-export-symbols-regex REGEX
try to export only the symbols matching REGEX
-LLIBDIR search LIBDIR for required installed libraries
-lNAME OUTPUT-FILE requires the installed library libNAME
-module build a library that can dlopened
-no-fast-install disable the fast-install mode
-no-install link a not-installable executable
-no-undefined declare that a library does not refer to external symbols
-o OUTPUT-FILE create OUTPUT-FILE from the specified objects
-release RELEASE specify package release information
-rpath LIBDIR the created library will eventually be installed in LIBDIR
-R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries
-static do not do any dynamic linking of libtool libraries
-version-info CURRENT[:REVISION[:AGE]]
specify library version info [each variable defaults to 0]
All other options (arguments beginning with \`-') are ignored.
Every other argument is treated as a filename. Files ending in \`.la' are
treated as uninstalled libtool libraries, other files are standard or library
object files.
If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
only library objects (\`.lo' files) may be specified, and \`-rpath' is
required, except when creating a convenience library.
If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
using \`ar' and \`ranlib', or on Windows using \`lib'.
If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
is created, otherwise an executable program is created."
;;
uninstall)
$echo \
"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
Remove libraries from an installation directory.
RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
to RM.
If FILE is a libtool library, all the files associated with it are deleted.
Otherwise, only FILE itself is deleted using RM."
;;
*)
$echo "$modename: invalid operation mode \`$mode'" 1>&2
$echo "$help" 1>&2
exit 1
;;
esac
echo
$echo "Try \`$modename --help' for more information about other modes."
exit 0
# Local Variables:
# mode:shell-script
# sh-indentation:2
# End:
|
aehlke/eb
|
zlib/ltmain.sh
|
Shell
|
gpl-2.0
| 138,312 |
#!/bin/bash
while :
do
cat /proc/[0-9]*/cmdline > /dev/null 2>/dev/null
done
|
liaoqingwei/ltp
|
testscripts/adp_test.sh
|
Shell
|
gpl-2.0
| 79 |
#!/bin/sh
if test $# -lt 3; then
echo "Usage: $0 <tree> <tag> <dir>"
exit 1
fi
TREE=$1
TAG=$2
DIR=$3
set -e
if test \! -d $DIR-remote; then
rm -rf $DIR-remote $DIR-remote.tmp
mkdir -p $DIR-remote.tmp; rmdir $DIR-remote.tmp
$GIT clone $TREE $DIR-remote.tmp
if test "$TAG" ; then
cd $DIR-remote.tmp
$GIT branch -D dummy >/dev/null 2>&1 ||:
$GIT checkout -b dummy $TAG
cd ..
fi
mv $DIR-remote.tmp $DIR-remote
fi
rm -f $DIR
ln -sf $DIR-remote $DIR
|
timtianyang/rt-xen4.6
|
scripts/git-checkout.sh
|
Shell
|
gpl-2.0
| 464 |
#!/usr/bin/env bash
function fg_activate_machine {
eval $(docker-machine env ForestGuardianWebSummit)
}
function fg_clean {
fg_activate_machine
docker-compose -f docker-compose-production.yml down
docker volume rm $(docker volume ls -f dangling=true -q)
}
function fg_start {
fg_activate_machine
cd containers/production
# --build-arg CACHE_DATE=$(date) forces docker to build the image after a particular step.
docker build --build-arg CACHE_DATE=$(date +%s) -t forestguardian/backend .
docker push forestguardian/backend
cd ../..
docker-compose -f docker-compose-production.yml up -d redis db
# waits for database to be initialized.
sleep 10
# setup database
RAILS_ENV=production docker-compose -f docker-compose-production.yml run web bundle exec rake db:create
RAILS_ENV=production docker-compose -f docker-compose-production.yml run web bundle exec rake db:schema:load
RAILS_ENV=production docker-compose -f docker-compose-production.yml run web bundle exec rake db:migrate
RAILS_ENV=production docker-compose -f docker-compose-production.yml run web bundle exec rake db:seed
docker-compose -f docker-compose-production.yml up -d sidekiq
# sync with NASA files.
RAILS_ENV=production docker-compose -f docker-compose-production.yml run web bundle exec rails runner 'SyncDailyDataJob.new.perform'
docker-compose -f docker-compose-production.yml up -d web
}
function fg_quick_reload {
tmp_start=$(date +%s) &&
fg_activate_machine &&
docker-compose -f docker-compose-production.yml exec web bundle exec bash -c 'git pull' &&
docker-compose -f docker-compose-production.yml exec sidekiq bundle exec bash -c 'git pull' &&
docker-compose -f docker-compose-production.yml restart sidekiq web &&
tmp_end=$(date +%s)
echo Quick Reload lasted $(( tmp_end - tmp_start )) seconds.
}
function fg_quick_precompile {
tmp_start=$(date +%s) &&
fg_activate_machine &&
docker-compose -f docker-compose-production.yml exec web bundle exec bash -c 'bundle exec rake assets:precompile' &&
docker-compose -f docker-compose-production.yml restart web &&
tmp_end=$(date +%s)
echo Quick Precompile lasted $(( tmp_end - tmp_start )) seconds.
}
function fg_quick_gem_install {
tmp_start=$(date +%s) &&
fg_activate_machine &&
docker-compose -f docker-compose-production.yml exec web bundle exec bash -c 'bundle install' &&
docker-compose -f docker-compose-production.yml exec sidekiq bundle exec bash -c 'bundle install' &&
tmp_end=$(date +%s)
echo Quick Gems Install lasted $(( tmp_end - tmp_start )) seconds.
}
function fg_reload {
fg_activate_machine
docker-compose -f docker-compose-production.yml stop web
docker-compose -f docker-compose-production.yml build web
docker-compose -f docker-compose-production.yml up -d web
}
function fg_ip {
fg_activate_machine
docker-machine ip forestguardian
}
|
ForestGuardian/ForestGuardianBackend
|
deploy-production.sh
|
Shell
|
mit
| 2,978 |
#!/bin/sh
composer run-script post-update-cmd
apache2ctl -DFOREGROUND
|
datatheke/datatheke
|
docker/build/run.sh
|
Shell
|
mit
| 71 |
#!/usr/bin/env bash
(cd consumer && npm start)
|
swarajban/node-express-jwt
|
run_consumer.sh
|
Shell
|
mit
| 47 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2014:1870
#
# Security announcement date: 2014-11-18 18:33:30 UTC
# Script generation date: 2017-01-01 21:11:17 UTC
#
# Operating System: CentOS 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libXfont.i686:1.4.5-4.el6_6
# - libXfont-devel.i686:1.4.5-4.el6_6
# - libXfont.x86_64:1.4.5-4.el6_6
# - libXfont-devel.x86_64:1.4.5-4.el6_6
#
# Last versions recommanded by security team:
# - libXfont.i686:1.4.5-5.el6_7
# - libXfont-devel.i686:1.4.5-5.el6_7
# - libXfont.x86_64:1.4.5-5.el6_7
# - libXfont-devel.x86_64:1.4.5-5.el6_7
#
# CVE List:
# - CVE-2014-0209
# - CVE-2014-0210
# - CVE-2014-0211
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install libXfont.i686-1.4.5 -y
sudo yum install libXfont-devel.i686-1.4.5 -y
sudo yum install libXfont.x86_64-1.4.5 -y
sudo yum install libXfont-devel.x86_64-1.4.5 -y
|
Cyberwatch/cbw-security-fixes
|
CentOS_6/x86_64/2014/CESA-2014:1870.sh
|
Shell
|
mit
| 1,013 |
#!/bin/bash
# Joseph Harriott Sun 13 Feb 2022
# Recursively find all *.md files in the current directory,
# convert those that haven't been done yet or have changed since last converted to pdf.
# Use LaTeX Chapter as the first level heading, and Subsubsection as the 5th
# (and preferably last) level heading. Apply some neater formatting.
# -------------------------------------------------------------------------------------
# If a non-zero first argument is given, all of the pdf's are re-done.
# If a second argument is given, ToC is switched off.
# m4ps0, m4ps1, mt are defined in $Bash/bashrc-wm
# just incase this script was stopped previously
[ -s md4pdf.md ] && rm md4pdf.md
absm4p="$( dirname "${BASH_SOURCE[0]}" )/m4p.sh" # $MD4PDF/GNULinux/md4pdf.sh
if ( [ $1 ] && [ $1 = 0 ] ); then
sure='y' # first argument was 0, so we're sure
else
read -p "About to recursively create a load of PDFs from markdowns ${tpf5b} - are you in the right parent directory?${tpfn} " sure
fi
[ ! $sure ] || [ $sure != "y" ] && exit
MI=$MACHINE/jo
if [ -d $MI ]; then
log=$MI/m4ps.log
else
log=$HOME/m4ps.log
fi
[[ -f $log ]] && rm $log
shopt -s globstar
for mdfile in **/*.md; do
mdf=${mdfile%.*}
if [[ ! $mdf =~ " " ]]; then
# record progress
ls -l $mdf.md >> $log
[ -f $mdf.pdf ] && ls -l $mdf.pdf >> $log
if ( [ $1 ] && [ $1 != 0 ] ) || [ $mdf.pdf -ot $mdf.md ]; then
bash $absm4p $mdf $2 # the conversion
echo "Pandoc'd $mdf.md" >> $log
fi
else
echo "skipped '$mdfile'" # because space in name
fi
done
|
harriott/md4pdf
|
GNULinux/m4ps.sh
|
Shell
|
mit
| 1,624 |
if hash wofi 2>/dev/null; then
wofi --dmenu --allow-images --insensitive --prompt "${1}"
elif hash kitty 2>/dev/null; then
fzf-kitty -i --reverse --prompt "'${1}: '"
elif hash mako 2>/dev/null; then
notify-send 'No application to display menu' 'Please, install wofi or kitty'
elif hash swaynag 2>/dev/null; then
swaynag -m 'No application to display menu. Please, install wofi or kitty'
fi
|
ahaasler/dotfiles
|
sway/config/scripts/menu.sh
|
Shell
|
mit
| 394 |
#!/bin/sh
set -xe
_last_apt_get_update() {
[ -z "${1}" ] && cache_seconds="3600" || cache_seconds="${1}"
cache_file="/var/cache/apt/pkgcache.bin"
if [ -f "${cache_file}" ]; then
last="$(stat -c %Y "${cache_file}")"
now="$(date +'%s')"
diff="$(($now - $last))"
if [ "${diff}" -lt "${cache_seconds}" ]; then
return 1
else
return 0
fi
else
return 0
fi
}
#enable google repository and download chrome developer console
if [ ! -f /etc/apt/sources.list.d/google-chrome.list ]; then
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
printf "%s\\n" "deb http://dl.google.com/linux/chrome/deb/ stable main" | sudo tee /etc/apt/sources.list.d/google-chrome.list
_require_apt_get_update="1"
fi
if [ ! -f /etc/apt/sources.list.d/npm.list ]; then
wget --quiet -O - https://deb.nodesource.com/gpgkey/nodesource.gpg.key | sudo apt-key add -
VERSION="node_7.x"
DISTRO="$(lsb_release -s -c)"
printf "%s\\n" "deb https://deb.nodesource.com/${VERSION} ${DISTRO} main" | sudo tee /etc/apt/sources.list.d/npm.list
_require_apt_get_update="1"
fi
if [ ! -f /etc/apt/sources.list.d/yarn.list ]; then
wget --quiet -O - https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
printf "%s\\n" "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
_require_apt_get_update="1"
fi
if ! command -v "watchman" >/dev/null 2>&1; then
if ! command -v "add-apt-repository" >/dev/null 2>&1; then
sudo apt-get install software-properties-common
fi
sudo add-apt-repository ppa:mwhiteley/watchman-daily
_require_apt_get_update="1"
#git clone https://github.com/facebook/watchman.git
#cd watchman
#./autogen.sh
#./configure
#make
#sudo make install
fi
if ! command -v "java" >/dev/null 2>&1; then
if ! command -v "add-apt-repository" >/dev/null 2>&1; then
sudo apt-get install software-properties-common
fi
sudo add-apt-repository ppa:webupd8team/java -y
printf "%s\\n" 'oracle-java8-installer shared/accepted-oracle-license-v1-1 select true' | \
sudo /usr/bin/debconf-set-selections
_require_apt_get_update="1"
fi
if [ X"${_require_apt_get_update}" = X"1" ] || _last_apt_get_update 86400; then
sudo apt-get update
fi
dpkg -l | grep squid-deb-proxy-client >/dev/null 2>&1 || \
sudo apt-get install --no-install-recommends -y squid-deb-proxy-client
#install them everytime to ensure updates
sudo apt-get install --no-install-recommends -y \
ant \
autoconf \
automake \
build-essential \
expect \
git \
google-chrome-stable \
htop \
lib32stdc++6 \
lib32z1 \
libssl-dev \
libffi-dev \
oracle-java8-installer \
python-dev \
python-pip \
nodejs \
watchman \
yarn \
apache2
command -v "n" >/dev/null 2>&1 || sudo npm install -g n
sudo n stable
#sudo npm install -g yarn
sudo yarn self-update || :
|
noinarisak/my-starter-kit
|
vagrant/flask/provision/001-install-base-dependencies.sh
|
Shell
|
mit
| 3,310 |
#!/usr/bin/env bash
apt-get update
apt-get install -y rabbitmq-server haskell-platform
/usr/lib/rabbitmq/lib/rabbitmq_server-2.7.1/sbin/rabbitmq-plugins enable rabbitmq_management
service rabbitmq-server restart
cd /vagrant
cabal update
cabal install --only-dep
cabal configure
cabal build
|
puffnfresh/amqp-pathwatcher
|
bootstrap.sh
|
Shell
|
mit
| 293 |
# get emecs' tramp (ssh) mode to work
if [[ "$TERM" == "dumb" ]]
then
unsetopt zle
unsetopt prompt_cr
unsetopt prompt_subst
unfunction precmd
unfunction preexec
PS1='$ '
fi
|
jeroenrietveld/dotfiles
|
emacs/tramp_fix.zsh
|
Shell
|
mit
| 183 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.