code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
. ${srcdir}/d4test_common.sh
set -e
echo "test_raw.sh:"
# Compute the set of testfiles
cd ${srcdir}/daptestfiles
F=`ls -1d *.dap`
cd -
F=`echo $F | tr '\r\n' ' '`
F=`echo $F | sed -e s/.dap//g`
# Do cleanup on the baseline file
baseclean() {
if test $# != 2 ; then
echo "simplify: too few args"
else
rm -f $2
while read -r iline; do
oline=`echo $iline | tr "'" '"'`
echo "$oline" >> $2
done < $1
fi
}
# Do cleanup on the result file
resultclean() {
if test $# != 2 ; then
echo "simplify: too few args"
else
rm -f $2
while read -r iline; do
oline=`echo $iline | sed -e 's|^\(netcdf.*\)[.]nc\(.*\)$|\\1\\2|'`
echo "$oline" >> $2
done < $1
fi
}
setresultdir results_test_raw
if test "x${RESET}" = x1 ; then rm -fr ${BASELINERAW}/*.dmp ; fi
for f in $F ; do
echo "testing: $f"
URL="[log][dap4]file://${DAPTESTFILES}/${f}"
if ! ${NCDUMP} "${URL}" > ${builddir}/results_test_raw/${f}.dmp; then
failure "${URL}"
fi
if test "x${TEST}" = x1 ; then
if ! diff -wBb ${BASELINERAW}/${f}.dmp ${builddir}/results_test_raw/${f}.dmp ; then
failure "diff ${f}.dmp"
fi
elif test "x${RESET}" = x1 ; then
echo "${f}:"
cp ${builddir}/results_test_raw/${f}.dmp ${BASELINERAW}/${f}.dmp
elif test "x${DIFF}" = x1 ; then
echo "hdrtest: ${f}"
baseclean
if ! diff -wBb ${BASELINERAW}/${f}.dmp ${BASELINE}/${f}.ncdump ; then
failure diff -wBb ${BASELINERAW}/${f}.dmp ${BASELINE}/${f}.ncdump
fi
fi
done
finish
|
Unidata/netcdf-c
|
dap4_test/test_raw.sh
|
Shell
|
bsd-3-clause
| 1,606 |
#!/bin/bash
set -x
set -e
# copy the latest version of setup to a standard name for other scripts
cd $WORKSPACE/ObjectModel.Tests/TestSetup
pwd
rm -f VersionOne.Setup-Ultimate-latest.exe
SETUP=`ls -1rt VersionOne.Setup-Ultimate-*.*.*.*.exe | tail -1`
cp $SETUP VersionOne.Setup-Ultimate-latest.exe
|
versionone/VersionOne.SDK.NET.ObjectModel
|
ObjectModel.Tests/TestSetup/copy_latest_setup_to_standard_name.sh
|
Shell
|
bsd-3-clause
| 305 |
#!/bin/bash
#
# University of Luxembourg
# Laboratory of Algorithmics, Cryptology and Security (LACS)
#
# FELICS - Fair Evaluation of Lightweight Cryptographic Systems
#
# Copyright (C) 2015 University of Luxembourg
#
# Written in 2015 by Daniel Dinu <[email protected]>
#
# This file is part of FELICS.
#
# FELICS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# FELICS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Display help information
function display_help()
{
echo ""
echo "Call this script to extract the cipher code size"
echo " ./cipher_code_size.sh [{-h|--help}] [--version] [{-m|--mode}=[0|1]] [{-s|--scenario}=[0|1]] [{-a|--architecture}=[PC|AVR|MSP|ARM]] [{-t|--target}=[...]] [{-o|--output}=[...]] [{-b|build}=[0|1]] [{-co|--compiler_options}='...']"
echo " To call from a cipher build folder use:"
echo " ./../../../../scripts/cipher/cipher_code_size.sh [options]"
echo ""
echo " Options:"
echo " -h, --help"
echo " Display help information"
echo " --version"
echo " Display version information"
echo " -m, --mode"
echo " Specifies which output mode to use"
echo " 0 - raw table for given cipher"
echo " 1 - raw data for given cipher"
echo " Default: 0"
echo " -s, --scenario"
echo " Specifies which scenario is used"
echo " 0 - cipher scenario"
echo " 1 - scenario 1"
echo " Default: 0"
echo " -a, --architecture"
echo " Specifies which architecture is used"
echo " PC - binary files are build for PC"
echo " AVR - binary files are build for AVR device"
echo " MSP - binary file are build for MSP device"
echo " ARM - binary files are build for ARM device"
echo " Default: PC"
echo " -t, --target"
echo " Specifies which is the target path. The relative path is computed from the directory where script was called"
echo " Default: ."
echo " -o, --output"
echo " Specifies where to output the results. The relative path is computed from the directory where script was called"
echo " Default: /dev/tty"
echo " -b, --build"
echo " Specifies if script should build the source files"
echo " 0 - do not build source files"
echo " 1 - build source files"
echo " Default: 1"
echo " -co,--compiler_options"
echo " Specifies the compiler options"
echo " List of values: '-O3 --param max-unroll-times=5 --param max-unrolled-insns=100 ...'"
echo " Default: -O3"
echo ""
echo " Examples:"
echo " ./../../../../scripts/cipher/cipher_code_size.sh -m=0"
echo " ./../../../../scripts/cipher/cipher_code_size.sh --mode=1 --architecture=MSP"
echo " ./../../../../scripts/cipher/cipher_code_size.sh -o=results.txt"
echo " ./cipher_code_size.sh -t=./../../source/ciphers/CipherName_StateSizeInBits_KeySizeInBits_IVSizeInBits_v01/build"
echo ""
exit
}
|
GaloisInc/hacrypto
|
src/C/FELICS/stream_ciphers/scripts/help/cipher/cipher_code_size.sh
|
Shell
|
bsd-3-clause
| 3,314 |
#! /bin/bash
SONIC_BIN=$HOME/sonic/bin
# test for 1518 and 64~72 B packets
function Test {
mode=$1
name=$2
# echo $mode $extra $togrep $name
echo $2
for len in 1518;
# for len in 1518 72 71 70 69 68 67 66 65 64;
do
valgrind ./tester mode=$mode pkt_len=$len pkt_cnt=30000000 $extra
done
echo ""
}
make clean 2>&1 > /dev/null
make tester SONIC_DDEBUG=0 SONIC_DEBUG=0 SONIC_FAST_CRC=0 2>&1 > /dev/null
Test 0 "..."
# FastCRC Performance
#Test 1 "FAST CRC"
#encoder
#Test 2 "ENCODER"
#decoder
#Test 3 "DECODER"
#Gen
#Test 10 "PKT_GEN"
#Recv
#Test 20 "PKT_RCV"
#Cap
#Test 23 "PKT_CAP"
|
hanw/sonic
|
driver/bin/memory_regression.sh
|
Shell
|
bsd-3-clause
| 632 |
#!/bin/bash
#
# General purpose MEPO model runs for ops-only (UnitCommit) model.
#
# Features:
# - Handles directory, model, and runcode/svn manipulation
# - Setups full week for max run time of 168hr (1 week) with 160hr for solver
# - Still uses Nehalem nodes for fair time comparisons
#
# Notes:
# - All run options are assumed to be included in MEPO_ops_list.csv file
# - Use in non-planning context. Does not extract capacity info from planning
# result outputs.
#
# To actually submit the job use:
# qsub SCRIPT_NAME -t [specify items/lines from csv to run]
# Version History
# Ver Date Time Who What
# --- ---------- ----- -------------- ---------------------------------
# 1 2013-10-14 10:35 bpalmintier Adapted from whenUC_full_long.sh v1 and CpUc_co2_pol_run.sh v5
# 2 2013-10-15 13:35 bpalmintier Revert to nehalem nodes for fair comparisons
#========= Setup Job Queue Parameters ==========
# IMPORTANT: The lines beginning #PBS set various queuing parameters, they are not simple comments
#
# Specify node type. Options on svante include: amd64, nehalem, sandy
#PBS -l nodes=1:nehalem,mem=10gb
#
# Merges any error messages into output file
#PBS -j oe
#
# Select the queue based on maximum run times:
# short 2hr
# medium 8hr
# long 24hr
# xlong 48hr, extendable to 168hr using -l walltime= option below
#PBS -q xlong
# And up the run time to the maximum of a full week (168 hrs)
#PBS -l walltime=168:00:00
#
# Setup Array of runs. Format is
# -t RANGE%MAX
# where RANGE is any sequence of run numbers using #-# and/or #,#
# and MAX is the maximum number of simultaneous tasks
#PBS -t 2-4,13-17
# The corresponding array ID number is set in ${PBS_ARRAYID}
#--------------------
# Shared Setup
#--------------------
MODEL_DIR_BASE="${HOME}/projects/advpower/models"
#Establish our model directory
CONFIG_DIR="${MODEL_DIR_BASE}/config"
#Establish our model directory
MODEL_DIR="${MODEL_DIR_BASE}/ops"
GAMS_MODEL="UnitCommit"
#Setup output
# IMPORTANT: Include trailing directory separator
OUT_DIR="${HOME}/projects/advpower/results/gams/mepo/"
#Make sure output directory exists
mkdir ${OUT_DIR}
# Default GAMS OPT to:
# errmsg: enable in-line description of errors in list file
# lf & lo: store the solver log (normally printed to screen) in $OUT_DIR
# o: rename the list file and store in $OUT_DIR
# inputdir: Look for $include and $batinclude files in $WORK_DIR
# And Advanced Power Model OPT to:
# out_dir: specify directory for CSV output files
# out_prefix: add a unique run_id to all output files
#
# Plus additional user supplied OPT pasted into template
# Options shared by all runs across all files
COMMON_IO_OPT=" -errmsg=1 -lo=2 -inputdir=${MODEL_DIR} --out_dir=${OUT_DIR} "
ALL_RUN_OPT=" "
# Options common to the runs in this file
THIS_FILE_OPT=" ${ALL_RUN_OPT} "
# Note: 600000sec=160hrs
LONG_OPT=" --max_solve_time=600000 "
#--------------------
# Run Subtasks
#--------------------
# Here we use the sed utility to extract the given line from a configuration file
# source: calls an external script
# the back ticks ``: pass the result of the command inside to build up the command line
# sed: does the line extraction and search and replace
# -n Prevents printing lines unless requested
# # specifies which line in the file to use
# s/old/new/ does a regular expression substitution for this line, in this case removing commas
# g Make this search "global" to replace all occurrences on the line
# p Prints the result
# < Specifies the input config file to extract the line from
source ${CONFIG_DIR}/MEPO_run.sh `sed -n "${PBS_ARRAYID} s/[, ]/ /gp" < ${CONFIG_DIR}/MEPO_ops_list.csv`
#Let caller know that everything seems fine
exit 0
|
bpalmintier/mepo
|
models/config/MEPO_ops_168hr.sh
|
Shell
|
bsd-3-clause
| 3,858 |
#创建工程project到仓库/var/svn
cd /var/svn
svnadmin create project
#检出工程project到当前目录,并重命名为new_project_name
#project所在svn仓库路径为svn://127.0.0.1/
#svn分配的账号密码分别是account, 123456
svn co svn://127.0.0.1/project new_project_name --username account --password 123456
#添加文件到版本控制
#欲添加的文件file_for_add
svn add file_for_add
#添加目录到版本控制
#欲添加的目录folder_for_add
svn add folder_for_add
#从版本控制中移除文件file_for_delete
#文件已提交到仓库的情况下,使用这种方法将会从目录中删除文件
svn delete file_for_delete
#文件未提交到仓库的情况下,使用这种方法,该文件不会丢失
svn revert file_for_delete
#从版本控制中移除目录folder_for_delete
#文件已提交到仓库的情况下,使用这种方法将会从目录中删除目录
svn delete folder_for_delete
#文件未提交到仓库的情况下,使用这种方法,该目录不会丢失
#如果folder_for_delete中还有其他文件或者目录的话,需要添加 --depth infinity 选项才能成功还原
svn revert folder_for_delete --depth infinity
#提交更改到版本库 project
#svn分配的账号密码为account, 123456
svn ci --username account --password 123456
#更新当前目录
#svn分配的账号密码为account, 123456
svn update ./ --username account --password 123456
#当前文件夹被rm掉了想要还原(未从版本库中删除)
svn update --force folder-deleted
#查看当前工作目录的信息
svn info
#为当前目录"."添加ignore
svn propset svn:ignore "*.o
*.so
*.tmp
bin
gen
out
" .
#svn 账号分配
vi conf/passwd 在这里面添加账号密码,修改或删除用户也在这里
vi conf/authz 在这里设置权限,要设置某个目录的权限需要写全路径,比如[/A-project], [/B-project/B-subproject]
#删除账号缓存信息
rm ~/.subversion/auth/svn.simple/*
|
ijsonlin/life
|
linux/svn.sh
|
Shell
|
mit
| 2,020 |
#!/bin/bash
JARS=../../jars/lib/jars
CLASSPATH=.:\
$JARS/kbase/shock/shock-client-0.0.14.jar:\
$JARS/kbase/auth/kbase-auth-0.4.1.jar:\
$JARS/apache_commons/commons-logging-1.1.1.jar:\
$JARS/apache_commons/http/httpclient-4.3.1.jar:\
$JARS/apache_commons/http/httpcore-4.3.jar:\
$JARS/apache_commons/http/httpmime-4.3.1.jar:\
$JARS/jackson/jackson-annotations-2.2.3.jar:\
$JARS/jackson/jackson-core-2.2.3.jar:\
$JARS/jackson/jackson-databind-2.2.3.jar:\
$JARS/apache_commons/commons-io-2.4.jar
javac -cp $CLASSPATH us/kbase/workspace/performance/shockclient/SaveAndGetFromShock.java
java -cp $CLASSPATH us.kbase.workspace.performance.shockclient.SaveAndGetFromShock $@
|
MrCreosote/workspace_deluxe
|
performance/run_shock_perf_test.sh
|
Shell
|
mit
| 669 |
#!/bin/sh
if [ -z $PROJECT_PATH ]; then
echo "Environment PROJECT_PATH required"
exit 1
else
cd "$PROJECT_PATH" || exit
fi
if [ -z $CONFIG_FILE ]; then
echo "Config file not specified"
else
CONFIG_FILE="-- $CONFIG_FILE"
export CONFIG_FILE
fi
exec "$@"
|
researchlab/experiments
|
debug/docker/entrypoint.sh
|
Shell
|
mit
| 276 |
export DISABLE_SPRING=1
|
anhari/dotfiles-local
|
zsh/configs/rails.zsh
|
Shell
|
mit
| 24 |
#!/bin/bash
FN="TxDb.Rnorvegicus.UCSC.rn6.refGene_3.4.6.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.9/data/annotation/src/contrib/TxDb.Rnorvegicus.UCSC.rn6.refGene_3.4.6.tar.gz"
"https://bioarchive.galaxyproject.org/TxDb.Rnorvegicus.UCSC.rn6.refGene_3.4.6.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-txdb.rnorvegicus.ucsc.rn6.refgene/bioconductor-txdb.rnorvegicus.ucsc.rn6.refgene_3.4.6_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-txdb.rnorvegicus.ucsc.rn6.refgene/bioconductor-txdb.rnorvegicus.ucsc.rn6.refgene_3.4.6_src_all.tar.gz"
)
MD5="ea525daa75bcf165eb24f6e93f4dbf6c"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
jerowe/bioconda-recipes
|
recipes/bioconductor-txdb.rnorvegicus.ucsc.rn6.refgene/post-link.sh
|
Shell
|
mit
| 1,572 |
#!/bin/bash
name=$1
shift
../bin/minion -redump $name | ../bin/minion $* --
|
LeslieW/minion
|
mini-scripts/redumpstrap.sh
|
Shell
|
gpl-2.0
| 77 |
PART_NAME=firmware
REQUIRE_IMAGE_METADATA=1
RAMFS_COPY_BIN='fw_printenv fw_setenv'
RAMFS_COPY_DATA='/etc/fw_env.config /var/lock/fw_printenv.lock'
platform_check_image() {
case "$(board_name)" in
asus,rt-ac58u)
CI_UBIPART="UBI_DEV"
local ubidev=$(nand_find_ubi $CI_UBIPART)
local asus_root=$(nand_find_volume $ubidev jffs2)
[ -n "$asus_root" ] || return 0
cat << EOF
jffs2 partition is still present.
There's probably no space left
to install the filesystem.
You need to delete the jffs2 partition first:
# ubirmvol /dev/ubi0 --name=jffs2
Once this is done. Retry.
EOF
return 1
;;
esac
return 0;
}
zyxel_do_upgrade() {
local tar_file="$1"
local board_dir=$(tar tf $tar_file | grep -m 1 '^sysupgrade-.*/$')
board_dir=${board_dir%/}
tar Oxf $tar_file ${board_dir}/kernel | mtd write - kernel
if [ "$SAVE_CONFIG" -eq 1 ]; then
tar Oxf $tar_file ${board_dir}/root | mtd -j "$CONF_TAR" write - rootfs
else
tar Oxf $tar_file ${board_dir}/root | mtd write - rootfs
fi
}
platform_do_upgrade() {
case "$(board_name)" in
8dev,jalapeno |\
alfa-network,ap120c-ac |\
avm,fritzbox-7530 |\
avm,fritzrepeater-3000 |\
qxwlan,e2600ac-c2)
nand_do_upgrade "$ARGV"
;;
asus,map-ac2200)
CI_KERNPART="linux"
nand_do_upgrade "$1"
;;
asus,rt-ac58u)
CI_UBIPART="UBI_DEV"
CI_KERNPART="linux"
nand_do_upgrade "$1"
;;
linksys,ea6350v3 |\
linksys,ea8300)
platform_do_upgrade_linksys "$ARGV"
;;
meraki,mr33)
CI_KERNPART="part.safe"
nand_do_upgrade "$1"
;;
openmesh,a42 |\
openmesh,a62)
PART_NAME="inactive"
platform_do_upgrade_openmesh "$ARGV"
;;
zyxel,nbg6617)
zyxel_do_upgrade "$1"
;;
*)
default_do_upgrade "$ARGV"
;;
esac
}
platform_nand_pre_upgrade() {
case "$(board_name)" in
alfa-network,ap120c-ac)
part="$(awk -F 'ubi.mtd=' '{printf $2}' /proc/cmdline | sed -e 's/ .*$//')"
if [ "$part" = "rootfs1" ]; then
fw_setenv active 2 || exit 1
CI_UBIPART="rootfs2"
else
fw_setenv active 1 || exit 1
CI_UBIPART="rootfs1"
fi
;;
asus,rt-ac58u)
CI_UBIPART="UBI_DEV"
CI_KERNPART="linux"
;;
meraki,mr33)
CI_KERNPART="part.safe"
;;
esac
}
|
jcadduono/lede
|
target/linux/ipq40xx/base-files/lib/upgrade/platform.sh
|
Shell
|
gpl-2.0
| 2,135 |
#!/bin/bash
#
# Verify that subovolume sync waits until the subvolume is cleaned
source $TOP/tests/common
check_prereq mkfs.btrfs
setup_root_helper
run_check truncate -s 2G $IMAGE
run_check $TOP/mkfs.btrfs -f $IMAGE
run_check $SUDO_HELPER mount $IMAGE $TEST_MNT
run_check $SUDO_HELPER chmod a+rw $TEST_MNT
cd $TEST_MNT
for i in `seq 5`; do
run_check dd if=/dev/zero of=file$i bs=1M count=10
done
for sn in `seq 4`;do
run_check $SUDO_HELPER $TOP/btrfs subvolume snapshot . snap$sn
for i in `seq 10`; do
run_check dd if=/dev/zero of=snap$sn/file$i bs=1M count=10
done
done
run_check $SUDO_HELPER $TOP/btrfs subvolume list .
run_check $SUDO_HELPER $TOP/btrfs subvolume list -d .
idtodel=`run_check_stdout $SUDO_HELPER $TOP/btrfs inspect-internal rootid snap3`
# delete, sync after some time
run_check $SUDO_HELPER $TOP/btrfs subvolume delete -c snap3
{ sleep 5; run_check $TOP/btrfs filesystem sync $TEST_MNT; } &
run_check $SUDO_HELPER $TOP/btrfs subvolume sync . $idtodel
if run_check_stdout $SUDO_HELPER $TOP/btrfs subvolume list -d . |
grep -q "ID $idtodel.*DELETED"; then
_fail "sync did not wait for the subvolume cleanup"
fi
run_check $TOP/btrfs filesystem sync $TEST_MNT
run_check $SUDO_HELPER $TOP/btrfs subvolume list -d .
wait
cd ..
run_check $SUDO_HELPER umount $TEST_MNT
|
ralt/btrfs-progs
|
tests/misc-tests/009-subvolume-sync-must-wait/test.sh
|
Shell
|
gpl-2.0
| 1,304 |
#!/bin/sh
# genscripts.sh - generate the ld-emulation-target specific files
#
# Usage: genscripts.sh srcdir libdir exec_prefix \
# host target target_alias default_emulation \
# native_lib_dirs this_emulation tool_dir
#
# Sample usage:
# genscripts.sh /djm/ld-devo/devo/ld /usr/local/lib /usr/local \
# sparc-sun-sunos4.1.3 sparc-sun-sunos4.1.3 sparc-sun-sunos4.1.3 sun4 \
# "" sun3 sparc-sun-sunos4.1.3
# produces sun3.x sun3.xbn sun3.xn sun3.xr sun3.xu em_sun3.c
srcdir=$1
libdir=$2
exec_prefix=$3
host=$4
target=$5
target_alias=$6
EMULATION_LIBPATH=$7
NATIVE_LIB_DIRS=$8
EMULATION_NAME=$9
shift 9
# Can't use ${1:-$target_alias} here due to an Ultrix shell bug.
if [ "x$1" = "x" ] ; then
tool_lib=${exec_prefix}/${target_alias}/lib
else
tool_lib=${exec_prefix}/$1/lib
fi
# Include the emulation-specific parameters:
. ${srcdir}/emulparams/${EMULATION_NAME}.sh
if test -d ldscripts; then
true
else
mkdir ldscripts
fi
# Set the library search path, for libraries named by -lfoo.
# If LIB_PATH is defined (e.g., by Makefile) and non-empty, it is used.
# Otherwise, the default is set here.
#
# The format is the usual list of colon-separated directories.
# To force a logically empty LIB_PATH, do LIBPATH=":".
if [ "x${LIB_PATH}" = "x" ] ; then
if [ "x${host}" = "x${target}" ] ; then
case " $EMULATION_LIBPATH " in
*" ${EMULATION_NAME} "*)
# Native, and default or emulation requesting LIB_PATH.
LIB_PATH=${libdir}
for lib in ${NATIVE_LIB_DIRS}; do
case :${LIB_PATH}: in
*:${lib}:*) ;;
*) LIB_PATH=${LIB_PATH}:${lib} ;;
esac
done
esac
fi
fi
# Always search $(tooldir)/lib, aka /usr/local/TARGET/lib.
LIB_PATH=${tool_lib}:${LIB_PATH}
LIB_SEARCH_DIRS=`echo ${LIB_PATH} | sed -e 's/:/ /g' -e 's/\([^ ][^ ]*\)/SEARCH_DIR(\\"\1\\");/g'`
# Generate 5 or 6 script files from a master script template in
# ${srcdir}/scripttempl/${SCRIPT_NAME}.sh. Which one of the 5 or 6
# script files is actually used depends on command line options given
# to ld. (SCRIPT_NAME was set in the emulparams_file.)
#
# A .x script file is the default script.
# A .xr script is for linking without relocation (-r flag).
# A .xu script is like .xr, but *do* create constructors (-Ur flag).
# A .xn script is for linking with -n flag (mix text and data on same page).
# A .xbn script is for linking with -N flag (mix text and data on same page).
# A .xs script is for generating a shared library with the --shared
# flag; it is only generated if $GENERATE_SHLIB_SCRIPT is set by the
# emulation parameters.
# A .xc script is for linking with -z combreloc; it is only generated if
# $GENERATE_COMBRELOC_SCRIPT is set by the emulation parameters or
# $SCRIPT_NAME is "elf".
# A .xsc script is for linking with --shared -z combreloc; it is generated
# if $GENERATE_COMBRELOC_SCRIPT is set by the emulation parameters or
# $SCRIPT_NAME is "elf" and $GENERATE_SHLIB_SCRIPT is set by the emulation
# parameters too.
if [ "x$SCRIPT_NAME" = "xelf" ]; then
GENERATE_COMBRELOC_SCRIPT=yes
fi
SEGMENT_SIZE=${SEGMENT_SIZE-${TARGET_PAGE_SIZE}}
# Determine DATA_ALIGNMENT for the 5 variants, using
# values specified in the emulparams/<emulation>.sh file or default.
DATA_ALIGNMENT_="${DATA_ALIGNMENT_-${DATA_ALIGNMENT-ALIGN(${SEGMENT_SIZE})}}"
DATA_ALIGNMENT_n="${DATA_ALIGNMENT_n-${DATA_ALIGNMENT_}}"
DATA_ALIGNMENT_N="${DATA_ALIGNMENT_N-${DATA_ALIGNMENT-.}}"
DATA_ALIGNMENT_r="${DATA_ALIGNMENT_r-${DATA_ALIGNMENT-}}"
DATA_ALIGNMENT_u="${DATA_ALIGNMENT_u-${DATA_ALIGNMENT_r}}"
LD_FLAG=r
DATA_ALIGNMENT=${DATA_ALIGNMENT_r}
DEFAULT_DATA_ALIGNMENT="ALIGN(${SEGMENT_SIZE})"
( echo "/* Script for ld -r: link without relocation */"
. ${srcdir}/emulparams/${EMULATION_NAME}.sh
. ${srcdir}/scripttempl/${SCRIPT_NAME}.sc
) | sed -e '/^ *$/d;s/[ ]*$//' > ldscripts/${EMULATION_NAME}.xr
LD_FLAG=u
DATA_ALIGNMENT=${DATA_ALIGNMENT_u}
CONSTRUCTING=" "
( echo "/* Script for ld -Ur: link w/out relocation, do create constructors */"
. ${srcdir}/emulparams/${EMULATION_NAME}.sh
. ${srcdir}/scripttempl/${SCRIPT_NAME}.sc
) | sed -e '/^ *$/d;s/[ ]*$//' > ldscripts/${EMULATION_NAME}.xu
LD_FLAG=
DATA_ALIGNMENT=${DATA_ALIGNMENT_}
RELOCATING=" "
( echo "/* Default linker script, for normal executables */"
. ${srcdir}/emulparams/${EMULATION_NAME}.sh
. ${srcdir}/scripttempl/${SCRIPT_NAME}.sc
) | sed -e '/^ *$/d;s/[ ]*$//' > ldscripts/${EMULATION_NAME}.x
LD_FLAG=n
DATA_ALIGNMENT=${DATA_ALIGNMENT_n}
TEXT_START_ADDR=${NONPAGED_TEXT_START_ADDR-${TEXT_START_ADDR}}
( echo "/* Script for -n: mix text and data on same page */"
. ${srcdir}/emulparams/${EMULATION_NAME}.sh
. ${srcdir}/scripttempl/${SCRIPT_NAME}.sc
) | sed -e '/^ *$/d;s/[ ]*$//' > ldscripts/${EMULATION_NAME}.xn
LD_FLAG=N
DATA_ALIGNMENT=${DATA_ALIGNMENT_N}
( echo "/* Script for -N: mix text and data on same page; don't align data */"
. ${srcdir}/emulparams/${EMULATION_NAME}.sh
. ${srcdir}/scripttempl/${SCRIPT_NAME}.sc
) | sed -e '/^ *$/d;s/[ ]*$//' > ldscripts/${EMULATION_NAME}.xbn
if test -n "$GENERATE_COMBRELOC_SCRIPT"; then
DATA_ALIGNMENT=${DATA_ALIGNMENT_c-${DATA_ALIGNMENT_}}
LD_FLAG=c
COMBRELOC=ldscripts/${EMULATION_NAME}.xc.tmp
( echo "/* Script for -z combreloc: combine and sort reloc sections */"
. ${srcdir}/emulparams/${EMULATION_NAME}.sh
. ${srcdir}/scripttempl/${SCRIPT_NAME}.sc
) | sed -e '/^ *$/d;s/[ ]*$//' > ldscripts/${EMULATION_NAME}.xc
rm -f ${COMBRELOC}
COMBRELOC=
fi
if test -n "$GENERATE_SHLIB_SCRIPT"; then
LD_FLAG=shared
DATA_ALIGNMENT=${DATA_ALIGNMENT_s-${DATA_ALIGNMENT_}}
CREATE_SHLIB=" "
# Note that TEXT_START_ADDR is set to NONPAGED_TEXT_START_ADDR.
(
echo "/* Script for ld --shared: link shared library */"
. ${srcdir}/emulparams/${EMULATION_NAME}.sh
. ${srcdir}/scripttempl/${SCRIPT_NAME}.sc
) | sed -e '/^ *$/d;s/[ ]*$//' > ldscripts/${EMULATION_NAME}.xs
if test -n "$GENERATE_COMBRELOC_SCRIPT"; then
LD_FLAG=cshared
DATA_ALIGNMENT=${DATA_ALIGNMENT_sc-${DATA_ALIGNMENT}}
COMBRELOC=ldscripts/${EMULATION_NAME}.xc.tmp
( echo "/* Script for --shared -z combreloc: shared library, combine & sort relocs */"
. ${srcdir}/emulparams/${EMULATION_NAME}.sh
. ${srcdir}/scripttempl/${SCRIPT_NAME}.sc
) | sed -e '/^ *$/d;s/[ ]*$//' > ldscripts/${EMULATION_NAME}.xsc
rm -f ${COMBRELOC}
COMBRELOC=
fi
fi
case " $EMULATION_LIBPATH " in
*" ${EMULATION_NAME} "*) COMPILE_IN=true;;
esac
# Generate e${EMULATION_NAME}.c.
. ${srcdir}/emultempl/${TEMPLATE_NAME-generic}.em
|
nslu2/Build-binutils-2.13.2
|
ld/genscripts.sh
|
Shell
|
gpl-2.0
| 6,527 |
#!/bin/bash
TESTDIR="./cpc/test/lib/plumed/mdruns"
./cpcc rm plumed_test_mdruns
./cpcc start plumed_test_mdruns
./cpcc import plumed
./cpcc instance plumed::mdruns mdruns
./cpcc transact
./cpcc set-file mdruns:in.tpr[0] $TESTDIR/topol.tpr
./cpcc set-file mdruns:in.tpr[1] $TESTDIR/topol.tpr
./cpcc set-file mdruns:in.tpr[2] $TESTDIR/topol.tpr
./cpcc set-file mdruns:in.plumed[0] $TESTDIR/plumed.dat
./cpcc set-file mdruns:in.plumed[1] $TESTDIR/plumed.dat
./cpcc set-file mdruns:in.plumed[2] $TESTDIR/plumed.dat
./cpcc commit
./cpcc activate mdruns
|
soellman/copernicus
|
test/lib/plumed/mdruns/run-test.sh
|
Shell
|
gpl-2.0
| 551 |
#!/bin/bash
./runtests.py --text --host sunburn --plugin ../transferplugin/transfer --plugin-xenapi ../../api.hg/scripts/examples/python/XenAPIPlugin.py --vm-template ../../../output/hyde-transfervm/transfervm.xva -v -v $@
|
xenserver/transfervm
|
transfertests/runtestsonsunburn.sh
|
Shell
|
gpl-2.0
| 223 |
alias fastfc='cd $WM_PROJECT_USER_DIR/FastFC'
alias fclib='cd $WM_PROJECT_USER_DIR/FastFC/libSrc'
alias fcsol='cd $WM_PROJECT_USER_DIR/FastFC/appSrc/solvers'
|
srifilter/FastFC
|
config/aliases.sh
|
Shell
|
gpl-3.0
| 158 |
#!/bin/sh -x
DATE=$(date "+%Y%m%d")
BASE=/tmp/traceset
OUT=lttng-traceset-$DATE/
cd $BASE
time tar -C $BASE -cjf lttng-traceset-$DATE.tar.bz2 $OUT/
cd -
|
mogeb/workload-kit
|
maintainer/2-zip.sh
|
Shell
|
gpl-3.0
| 155 |
#!/bin/bash
set -e
pegasus_lite_version_major="4"
pegasus_lite_version_minor="7"
pegasus_lite_version_patch="0"
pegasus_lite_enforce_strict_wp_check="true"
pegasus_lite_version_allow_wp_auto_download="true"
. pegasus-lite-common.sh
pegasus_lite_init
# cleanup in case of failures
trap pegasus_lite_signal_int INT
trap pegasus_lite_signal_term TERM
trap pegasus_lite_exit EXIT
echo -e "\n################################ Setting up workdir ################################" 1>&2
# work dir
export pegasus_lite_work_dir=$PWD
pegasus_lite_setup_work_dir
echo -e "\n###################### figuring out the worker package to use ######################" 1>&2
# figure out the worker package to use
pegasus_lite_worker_package
echo -e "\n##################### setting the xbit for executables staged #####################" 1>&2
# set the xbit for any executables staged
/bin/chmod +x example_workflow-calculateratio_0-1.0
echo -e "\n############################# executing the user tasks #############################" 1>&2
# execute the tasks
set +e
pegasus-kickstart -n example_workflow::calculateratio_0:1.0 -N ID0000007 -R condorpool -L example_workflow -T 2016-12-08T14:57:42+00:00 ./example_workflow-calculateratio_0-1.0
job_ec=$?
set -e
|
elainenaomi/sciwonc-dataflow-examples
|
dissertation2017/Experiment 1B/logs/w-09-A/20161208T145743+0000/00/00/calculateratio_0_ID0000007.sh
|
Shell
|
gpl-3.0
| 1,252 |
#!/bin/sh
echo "Generating build side..."
python genbuild.py > 016M_build.tbl
echo "Generating probe side..."
python genprobe.py > 256M_probe.tbl
|
siddharthsmenon/CS764-SortMerge
|
sort-merge/floki/test/datagen/generate.sh
|
Shell
|
gpl-3.0
| 147 |
#!/bin/bash
# @ account_no = my_cineca_computing_account
# @ shell = /bin/bash
# @ job_type = serial
# @ job_name = building_piccante.$(jobid)
# @ output = building_piccante.out.$(jobid)
# @ error = building_piccante.err.$(jobid)
# @ wall_clock_limit = 0:30:00
# @ class = serial
# @ notification = always
# @ notify_user = myemail@address
# @ queue
module purge
module load profile/advanced
module load bgq-xl
module load boost/1.51.0--bgq-xl--1.0
cd /path/to/piccante/
make fermi-perf
|
ALaDyn/piccante
|
script/build/make.fermi.perf.sh
|
Shell
|
gpl-3.0
| 493 |
#! /usr/bin/env sh
python register.py
python setup.py register -r pypitest
rm README.txt
python setup.py sdist upload -r pypitest
|
umn-earth-surface/IceFlow
|
topypi-test.sh
|
Shell
|
gpl-3.0
| 131 |
#!/bin/bash
#L_PATH=/usr/share/lua/5.1
#L_CPATH=/usr/lib/lua/5.1
L_PATH=
L_CPATH=
if [[ $L_PATH == "" ]]; then
echo "L_PATH not set, set it (LUA_PATH)."
exit
fi
if [[ $L_CPATH == "" ]]; then
echo "L_CPATH not set, set it (LUA_CPATH)."
exit
fi
echo "This script will install Splay Lua modules and Lua C modules."
echo
echo "These are only Lua modules of the Splay package, for the installation"
echo "of the other modules (that can already be installed in your system), see"
echo "INSTALL."
echo
echo "You need to have already compiled splayd. If not see INSTALL."
echo
echo "Are you ready ? (y/n)"
read ready
if [[ $ready != "y" ]]; then
exit
fi
echo "Lua libraries will go in $L_PATH."
echo "Lua C libraries will go in $L_CPATH."
echo "Is this correct ? (y/n)"
read correct
if [[ $correct != "y" ]]; then
echo "Aborting installation, edit this file to fix good values."
exit
fi
echo
echo "Installing Splay Lua libraries."
mkdir -p $L_PATH
mkdir -p $L_CPATH
cp modules/json.lua $L_PATH/
mkdir $L_PATH/splay
cp modules/splay/*.lua $L_PATH/splay
rm $L_PATH/splay/splay.lua
cp modules/*.lua $L_PATH/
mkdir $L_CPATH/splay
cp splay.so $L_CPATH/splay_core.so
cp luacrypto/crypto.so $L_CPATH/crypto.so
cp misc.so $L_CPATH/splay/misc_core.so
cp data_bits.so $L_CPATH/splay/data_bits_core.so
echo
echo
lua install_check.lua
|
splay-project/splay
|
tools/scripts/planetlab_utils/splay_pl_pack/splayd_planetlab/splayd/install.sh
|
Shell
|
gpl-3.0
| 1,334 |
#!/bin/bash
# Test for disk-only queue mode (with fsync for queue files)
# This test checks if queue files can be correctly written
# and read back, but it does not test the transition from
# memory to disk mode for DA queues.
# added 2009-06-09 by Rgerhards
# This file is part of the rsyslog project, released under GPLv3
# uncomment for debugging support:
echo \[diskqueue-fsync.sh\]: testing queue disk-only mode, fsync case
uname
if [ `uname` = "SunOS" ] ; then
echo "This test currently does not work on all flavors of Solaris."
exit 77
fi
. $srcdir/diag.sh init
. $srcdir/diag.sh startup diskqueue-fsync.conf
# 1000 messages should be enough - the disk fsync test is very slow!
. $srcdir/diag.sh injectmsg 0 1000
. $srcdir/diag.sh shutdown-when-empty # shut down rsyslogd when done processing messages
. $srcdir/diag.sh wait-shutdown
. $srcdir/diag.sh seq-check 0 999
. $srcdir/diag.sh exit
|
madedotcom/rsyslog
|
tests/diskqueue-fsync.sh
|
Shell
|
gpl-3.0
| 907 |
#!/bin/bash
# Integration tests for findpkg
set -e
GIT_BASE_URL="https://review.rdoproject.org/r"
WORKSPACE="${WORKSPACE:-/tmp}"
function test_rdopkg_findpkg(){
PKG_NAME=$(rdopkg findpkg $1 -l ${WORKSPACE}/rdoinfo | awk '/^name/ {print $2}')
if [ $2 != "$PKG_NAME" ]; then
echo "$0 FAILED EXPECTED: $@ (GOT: $PKG_NAME)"
return 1
fi
echo -n .
return 0
}
RDOINFO_REL_PATH=$(realpath --relative-to="$PWD" ${WORKSPACE}/rdoinfo)
function test_rdopkg_findpkg_relpath(){
PKG_NAME=$(rdopkg findpkg $1 -l ${RDOINFO_REL_PATH}| awk '/^name/ {print $2}')
if [ $2 != "$PKG_NAME" ]; then
echo "$0 FAILED EXPECTED: $@ (GOT: $PKG_NAME)"
return 1
fi
echo -n .
return 0
}
if [ -e /usr/bin/zuul-cloner ]; then
zuul-cloner --workspace $WORKSPACE $GIT_BASE_URL rdoinfo
else
# We're outside the gate, just do a regular git clone
pushd ${WORKSPACE}
# rm -rf first for idempotency
rm -rf rdoinfo
git clone "${GIT_BASE_URL}/rdoinfo" rdoinfo
popd
fi
echo -n "testing findpkg"
test_rdopkg_findpkg glance openstack-glance
test_rdopkg_findpkg glance-distgit openstack-glance
test_rdopkg_findpkg openstack-glance openstack-glance
test_rdopkg_findpkg puppet-glance puppet-glance
test_rdopkg_findpkg puppet/puppet-glance puppet-glance
echo -n "with relative path"
test_rdopkg_findpkg_relpath glanceclient python-glanceclient
test_rdopkg_findpkg_relpath openstack/glanceclient-distgit python-glanceclient
test_rdopkg_findpkg_relpath python-glanceclient python-glanceclient
echo 'OK'
|
redhat-openstack/rdopkg
|
tests/integration/test_findpkg.sh
|
Shell
|
apache-2.0
| 1,679 |
#!/bin/bash
LOG=/var/log/tomcat7/catalina.out
until [ -e $LOG ] ; do
sleep 2
done
started=`cat $LOG | grep "INFO: Server startup in"`
while [ -z "$started" ] ; do
echo "Waiting for Tomcat to finish startup."
sleep 5
started=`cat $LOG | grep "INFO: Server startup in"`
done
|
oanc/docker-brandeis
|
waitforstart.sh
|
Shell
|
apache-2.0
| 279 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
# Cross-compiles the kubectl client binaries and compresses them into platform specific tarballs.
#
# Assumes GOPATH has already been set up.
# Assumes all source is in the GOPATH.
# Builds into `../_output`.
#
# Use `make client-cross` to handle the GOPATH.
# TODO: move gopath env setup into this file, using lib scripts in k8s master
set -o errexit
set -o nounset
set -o pipefail
echo "Using GOPATH: ${GOPATH}"
KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/.. && pwd)
source "${KUBE_ROOT}/hack/lib/version.sh"
readonly KUBE_GO_PACKAGE=github.com/GoogleCloudPlatform/kubernetes
readonly LOCAL_OUTPUT_ROOT="${KUBE_ROOT}/_output"
readonly LOCAL_OUTPUT_BINPATH="${LOCAL_OUTPUT_ROOT}/bin"
readonly RELEASE_DIR="${LOCAL_OUTPUT_ROOT}/release-tars"
readonly KUBE_CLIENT_PLATFORMS=(
linux/amd64
darwin/amd64
)
# The set of client targets that we are building for all platforms
readonly KUBE_CLIENT_TARGETS=(
cmd/kubectl
)
readonly KUBE_CLIENT_BINARIES=("${KUBE_CLIENT_TARGETS[@]##*/}")
kube::version::get_version_vars
echo "Building version ${KUBE_GIT_VERSION}"
echo "Building client binaries"
for platform in "${KUBE_CLIENT_PLATFORMS[@]}"; do
# export env for go build
GOOS=${platform%/*}
GOARCH=${platform##*/}
# Fetch the version.
version_ldflags=$(kube::version::ldflags)
for target in "${KUBE_CLIENT_TARGETS[@]}"; do
binary=${target##*/}
dest_dir="${LOCAL_OUTPUT_BINPATH}/${GOOS}/${GOARCH}"
env GOPATH=${GOPATH} GOOS=${GOOS} GOARCH=${GOARCH} CGO_ENABLED=0 \
go build \
-ldflags "-extldflags '-static' ${version_ldflags}" \
-o "${dest_dir}/${binary}" \
"${KUBE_GO_PACKAGE}/${target}"
done
done
echo "Compressing client binaries"
mkdir -p "${RELEASE_DIR}"
for platform in "${KUBE_CLIENT_PLATFORMS[@]}"; do
GOOS=${platform%/*}
GOARCH=${platform##*/}
for binary in "${KUBE_CLIENT_BINARIES[@]}"; do
cd "${LOCAL_OUTPUT_BINPATH}/${GOOS}/${GOARCH}"
dest="${RELEASE_DIR}/${binary}-${KUBE_GIT_VERSION}-${GOOS}-${GOARCH}.tgz"
tar -cvz -f "${dest}" "${binary}"
echo "Created: ${dest}"
done
done
|
w4ngyi/kubernetes-mesos
|
hack/build-client-cross.sh
|
Shell
|
apache-2.0
| 2,783 |
# ========================================================================
# Special Deployment Parameters needed for the SchemaSpy instance.
# ------------------------------------------------------------------------
# The results need to be encoded as OpenShift template
# parameters for use with oc process.
#
# The generated config map is used to update the Caddy configuration
# ========================================================================
CONFIG_MAP_NAME=caddy-conf
SOURCE_FILE=$( dirname "$0" )/templates/schema-spy/Caddyfile
OUTPUT_FORMAT=json
OUTPUT_FILE=caddy-configmap_DeploymentConfig.json
generateConfigMap() {
_config_map_name=${1}
_source_file=${2}
_output_format=${3}
_output_file=${4}
if [ -z "${_config_map_name}" ] || [ -z "${_source_file}" ] || [ -z "${_output_format}" ] || [ -z "${_output_file}" ]; then
echo -e \\n"generateConfigMap; Missing parameter!"\\n
exit 1
fi
oc create configmap ${_config_map_name} --from-file ${_source_file} --dry-run -o ${_output_format} > ${_output_file}
}
generateUsername() {
# Generate a random username and Base64 encode the result ...
_userName=USER_$( cat /dev/urandom | LC_CTYPE=C tr -dc 'a-zA-Z0-9' | fold -w 4 | head -n 1 )
_userName=$(echo -n "${_userName}"|base64)
echo ${_userName}
}
generatePassword() {
# Generate a random password and Base64 encode the result ...
_password=$( cat /dev/urandom | LC_CTYPE=C tr -dc 'a-zA-Z0-9_' | fold -w 16 | head -n 1 )
_password=$(echo -n "${_password}"|base64)
echo ${_password}
}
generateConfigMap "${CONFIG_MAP_NAME}" "${SOURCE_FILE}" "${OUTPUT_FORMAT}" "${OUTPUT_FILE}"
_userName=$(generateUsername)
_password=$(generatePassword)
SPECIALDEPLOYPARMS="-p SCHEMASPY_USER=${_userName} -p SCHEMASPY_PASSWORD=${_password}"
echo ${SPECIALDEPLOYPARMS}
|
WadeBarnes/TheOrgBook
|
tob-api/openshift/schema-spy-deploy.overrides.sh
|
Shell
|
apache-2.0
| 1,811 |
#!/bin/sh
# Make a new OS X Terminal tab with the current working directory.
if [ $# -ne 1 ]; then
PATHDIR=`pwd`
else
PATHDIR=$1
fi
/usr/bin/osascript <<EOF
activate application "Terminal"
tell application "System Events"
keystroke "t" using {command down}
end tell
tell application "Terminal"
repeat with win in windows
try
if get frontmost of win is true then
do script "cd $PATHDIR; clear" in (selected tab of win)
end if
end try
end repeat
end tell
EOF
clear
python -m SimpleHTTPServer 8001
|
frangucc/up
|
run_python_server_for_ionic.sh
|
Shell
|
apache-2.0
| 573 |
#!/bin/bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
set -e
set -x
source tensorflow/tools/ci_build/release/common.sh
install_ubuntu_16_pip_deps pip3.5
# Update bazel
update_bazel_linux
# Run configure.
export TF_NEED_GCP=1
export TF_NEED_HDFS=1
export TF_NEED_S3=1
export TF_NEED_CUDA=1
export TF_CUDA_VERSION=10.1
export TF_CUDNN_VERSION=7
export TF_NEED_TENSORRT=1
export TENSORRT_INSTALL_PATH=/usr/local/tensorrt
export CC_OPT_FLAGS='-mavx'
export PYTHON_BIN_PATH=$(which python3.5)
export TF2_BEHAVIOR=1
export PROJECT_NAME="tensorflow_gpu"
export LD_LIBRARY_PATH="/usr/local/cuda:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:$TENSORRT_INSTALL_PATH/lib"
export TF_CUDA_COMPUTE_CAPABILITIES=3.5,3.7,5.2,6.0,6.1,7.0
yes "" | "$PYTHON_BIN_PATH" configure.py
# Get the default test targets for bazel.
source tensorflow/tools/ci_build/build_scripts/PRESUBMIT_BUILD_TARGETS.sh
tag_filters="gpu,requires-gpu,-no_gpu,-no_oss,-oss_serial,-no_oss_py35"
bazel test --config=cuda --config=opt \
--crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.1:toolchain \
--linkopt=-lrt \
--action_env=TF2_BEHAVIOR="${TF2_BEHAVIOR}" \
--test_lang_filters=py \
--test_tag_filters=${tag_filters} \
--build_tag_filters=${tag_filters} \
--test_timeout="300,450,1200,3600" --local_test_jobs=4 \
--test_output=errors --verbose_failures=true --keep_going \
--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute \
-- ${DEFAULT_BAZEL_TARGETS} -//tensorflow/lite/...
|
adit-chandra/tensorflow
|
tensorflow/tools/ci_build/release/ubuntu_16/gpu_py35_full/nonpip.sh
|
Shell
|
apache-2.0
| 2,185 |
#!/bin/bash
# Assumes that spark-submit is in your PATH
# and the MNIST dataset has been downloaded using the script in the data directory
export NUM_ALCHEMIST_RANKS=3
export TMPDIR=/tmp
filepath=`pwd`/data/mnist.t
format=LIBSVM
numFeatures=10000
gamma=.001
numClass=10
whereRFM=ALCHEMIST
spark-submit --verbose\
--master local[*] \
--driver-memory 2G\
--class alchemist.test.regression.AlchemistRFMClassification\
test/target/scala-2.11/alchemist-tests-assembly-0.0.2.jar $filepath $format $numFeatures $gamma $numClass $whereRFM
exit
|
jey/alchemist
|
run-cgkrr-test-mac.sh
|
Shell
|
apache-2.0
| 548 |
#!/bin/bash
set -e
export LC_ALL=C
source cdx/cdx.sh
source variables_entrypoint.sh
source common_functions.sh
source debug.sh
###########################
# CONFIGURATION GENERATOR #
###########################
# Load in the bootstrapping routines
# based on the data store
case "$KV_TYPE" in
etcd)
# TAG: kv_type_etcd
source /config.kv.etcd.sh
;;
k8s|kubernetes)
# TAG: kv_type_k8s
source /config.k8s.sh
;;
*)
source /config.static.sh
;;
esac
###############
# CEPH_DAEMON #
###############
# Normalize DAEMON to lowercase
CEPH_DAEMON=$(to_lowercase "${CEPH_DAEMON}")
create_mandatory_directories >/dev/null
# If we are given a valid first argument, set the
# CEPH_DAEMON variable from it
case "$CEPH_DAEMON" in
populate_kvstore)
# TAG: populate_kvstore
source populate_kv.sh
populate_kv
;;
mon)
# TAG: mon
source start_mon.sh
start_mon
;;
osd)
# TAG: osd
source start_osd.sh
start_osd
;;
osd_directory)
# TAG: osd_directory
source start_osd.sh
OSD_TYPE="directory"
start_osd
;;
osd_directory_single)
# TAG: osd_directory_single
source start_osd.sh
OSD_TYPE="directory_single"
start_osd
;;
osd_ceph_disk)
# TAG: osd_ceph_disk
source start_osd.sh
OSD_TYPE="disk"
start_osd
;;
osd_ceph_disk_prepare)
# TAG: osd_ceph_disk_prepare
source start_osd.sh
OSD_TYPE="prepare"
start_osd
;;
osd_ceph_disk_activate)
# TAG: osd_ceph_disk_activate
source start_osd.sh
OSD_TYPE="activate"
start_osd
;;
osd_ceph_activate_journal)
# TAG: osd_ceph_activate_journal
source start_osd.sh
OSD_TYPE="activate_journal"
start_osd
;;
mds)
# TAG: mds
source start_mds.sh
start_mds
;;
rgw)
# TAG: rgw
source start_rgw.sh
start_rgw
;;
rgw_user)
# TAG: rgw_user
source start_rgw.sh
create_rgw_user
;;
restapi)
# TAG: restapi
source start_restapi.sh
start_restapi
;;
rbd_mirror)
# TAG: rbd_mirror
source start_rbd_mirror.sh
start_rbd_mirror
;;
nfs)
# TAG: nfs
source start_nfs.sh
start_nfs
;;
zap_device)
# TAG: zap_device
source zap_device.sh
zap_device
;;
mon_health)
# TAG: mon_health
source watch_mon_health.sh
watch_mon_health
;;
mgr)
# TAG: mgr
source start_mgr.sh
start_mgr
;;
disk_introspection)
# TAG: disk_introspection
if [[ "$KV_TYPE" =~ k8s|kubernetes ]]; then
source disk_introspection.sh
else
log "You can not use the disk introspection method outside a Kubernetes environment"
log "Make sure KV_TYPE equals either k8s or kubernetes"
fi
;;
demo)
# TAG: demo
source demo.sh
;;
disk_list)
# TAG: disk_list
source disk_list.sh
start_disk_list
;;
cdx*|ceph-api|admin)
shift
cdx_entrypoint "$CEPH_DAEMON" $@
;;
*)
invalid_ceph_daemon
;;
esac
exit 0
|
cdxvirt/ceph-docker
|
ceph-releases/luminous/ubuntu/16.04/daemon/cdx/entrypoint.sh
|
Shell
|
apache-2.0
| 3,017 |
#!/bin/bash
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# If we have any arguments at all, this is a push and not just setup.
is_push=$@
function ensure-install-dir() {
INSTALL_DIR="/var/cache/kubernetes-install"
mkdir -p ${INSTALL_DIR}
cd ${INSTALL_DIR}
}
function set-broken-motd() {
echo -e '\nBroken (or in progress) GCE Kubernetes node setup! Suggested first step:\n tail /var/log/startupscript.log\n' > /etc/motd
}
function set-good-motd() {
echo -e '\n=== GCE Kubernetes node setup complete ===\n' > /etc/motd
}
function curl-metadata() {
curl --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/attributes/${1}"
}
function set-kube-env() {
local kube_env_yaml="${INSTALL_DIR}/kube_env.yaml"
until curl-metadata kube-env > "${kube_env_yaml}"; do
echo 'Waiting for kube-env...'
sleep 3
done
# kube-env has all the environment variables we care about, in a flat yaml format
eval $(python -c '''
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print "readonly {var}={value}".format(var = k, value = pipes.quote(str(v)))
''' < "${kube_env_yaml}")
# We bake the KUBELET_TOKEN in separately to avoid auth information
# having to be re-communicated on kube-push. (Otherwise the client
# has to keep the bearer token around to handle generating a valid
# kube-env.)
if [[ -z "${KUBELET_TOKEN:-}" ]]; then
until KUBELET_TOKEN=$(curl-metadata kube-token); do
echo 'Waiting for metadata KUBELET_TOKEN...'
sleep 3
done
fi
# Infer master status from presence in node pool
if [[ $(hostname) = ${NODE_INSTANCE_PREFIX}* ]]; then
KUBERNETES_MASTER="false"
else
KUBERNETES_MASTER="true"
fi
if [[ "${KUBERNETES_MASTER}" != "true" ]] && [[ -z "${MINION_IP_RANGE:-}" ]]; then
# This block of code should go away once the master can allocate CIDRs
until MINION_IP_RANGE=$(curl-metadata node-ip-range); do
echo 'Waiting for metadata MINION_IP_RANGE...'
sleep 3
done
fi
}
function remove-docker-artifacts() {
echo "== Deleting docker0 =="
# Forcibly install bridge-utils (options borrowed from Salt logs).
until apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef install bridge-utils; do
echo "== install of bridge-utils failed, retrying =="
sleep 5
done
# Remove docker artifacts on minion nodes, if present
iptables -t nat -F || true
ifconfig docker0 down || true
brctl delbr docker0 || true
echo "== Finished deleting docker0 =="
}
# Retry a download until we get it.
#
# $1 is the URL to download
download-or-bust() {
local -r url="$1"
local -r file="${url##*/}"
rm -f "$file"
until [[ -e "${1##*/}" ]]; do
echo "Downloading file ($1)"
curl --ipv4 -Lo "$file" --connect-timeout 20 --retry 6 --retry-delay 10 "$1"
done
}
# Install salt from GCS. See README.md for instructions on how to update these
# debs.
install-salt() {
echo "== Refreshing package database =="
until apt-get update; do
echo "== apt-get update failed, retrying =="
echo sleep 5
done
mkdir -p /var/cache/salt-install
cd /var/cache/salt-install
DEBS=(
libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb
python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb
salt-common_2014.1.13+ds-1~bpo70+1_all.deb
salt-minion_2014.1.13+ds-1~bpo70+1_all.deb
)
URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
for deb in "${DEBS[@]}"; do
download-or-bust "${URL_BASE}/${deb}"
done
# Based on
# https://major.io/2014/06/26/install-debian-packages-without-starting-daemons/
# We do this to prevent Salt from starting the salt-minion
# daemon. The other packages don't have relevant daemons. (If you
# add a package that needs a daemon started, add it to a different
# list.)
cat > /usr/sbin/policy-rc.d <<EOF
#!/bin/sh
echo "Salt shall not start." >&2
exit 101
EOF
chmod 0755 /usr/sbin/policy-rc.d
for deb in "${DEBS[@]}"; do
echo "== Installing ${deb}, ignore dependency complaints (will fix later) =="
dpkg --force-depends -i "${deb}"
done
# This will install any of the unmet dependencies from above.
echo "== Installing unmet dependencies =="
until apt-get install -f -y; do
echo "== apt-get install failed, retrying =="
echo sleep 5
done
rm /usr/sbin/policy-rc.d
# Log a timestamp
echo "== Finished installing Salt =="
}
# Ensure salt-minion never runs
stop-salt-minion() {
# This ensures it on next reboot
echo manual > /etc/init/salt-minion.override
if service salt-minion status >/dev/null; then
echo "salt-minion started in defiance of runlevel policy, aborting startup." >&2
return 1
fi
}
# Mounts a persistent disk (formatting if needed) to store the persistent data
# on the master -- etcd's data, a few settings, and security certs/keys/tokens.
#
# This function can be reused to mount an existing PD because all of its
# operations modifying the disk are idempotent -- safe_format_and_mount only
# formats an unformatted disk, and mkdir -p will leave a directory be if it
# already exists.
mount-master-pd() {
# TODO(zmerlynn): GKE is still lagging in master-pd creation
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
device_path="/dev/disk/by-id/${relative_path}"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
mkdir -p /mnt/master-pd
/usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${device_path}" /mnt/master-pd
# Contains all the data stored in etcd
mkdir -m 700 -p /mnt/master-pd/var/etcd
# Contains the dynamically generated apiserver auth certs and keys
mkdir -p /mnt/master-pd/srv/kubernetes
# Contains the cluster's initial config parameters and auth tokens
mkdir -p /mnt/master-pd/srv/salt-overlay
ln -s /mnt/master-pd/var/etcd /var/etcd
ln -s /mnt/master-pd/srv/kubernetes /srv/kubernetes
ln -s /mnt/master-pd/srv/salt-overlay /srv/salt-overlay
# This is a bit of a hack to get around the fact that salt has to run after the
# PD and mounted directory are already set up. We can't give ownership of the
# directory to etcd until the etcd user and group exist, but they don't exist
# until salt runs if we don't create them here. We could alternatively make the
# permissions on the directory more permissive, but this seems less bad.
useradd -s /sbin/nologin -d /var/etcd etcd
chown etcd /mnt/master-pd/var/etcd
chgrp etcd /mnt/master-pd/var/etcd
}
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
function create-salt-pillar() {
# Always overwrite the cluster-params.sls (even on a push, we have
# these variables)
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
EOF
}
# This should only happen on cluster initialization
function create-salt-auth() {
mkdir -p /srv/salt-overlay/salt/nginx
echo "${MASTER_HTPASSWD}" > /srv/salt-overlay/salt/nginx/htpasswd
mkdir -p /srv/salt-overlay/salt/kube-apiserver
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
(umask 077;
echo "${KUBELET_TOKEN},kubelet,kubelet" > "${known_tokens_file}")
mkdir -p /srv/salt-overlay/salt/kubelet
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
(umask 077;
echo "{\"BearerToken\": \"${KUBELET_TOKEN}\", \"Insecure\": true }" > "${kubelet_auth_file}")
}
function download-release() {
echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)"
download-or-bust "$SERVER_BINARY_TAR_URL"
echo "Downloading Salt tar ($SALT_TAR_URL)"
download-or-bust "$SALT_TAR_URL"
echo "Unpacking Salt tree"
rm -rf kubernetes
tar xzf "${SALT_TAR_URL##*/}"
echo "Running release install script"
sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"
}
function fix-apt-sources() {
sed -i -e "\|^deb.*http://http.debian.net/debian| s/^/#/" /etc/apt/sources.list
sed -i -e "\|^deb.*http://ftp.debian.org/debian| s/^/#/" /etc/apt/sources.list.d/backports.list
}
function salt-run-local() {
cat <<EOF >/etc/salt/minion.d/local.conf
file_client: local
file_roots:
base:
- /srv/salt
EOF
}
function salt-debug-log() {
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
}
function salt-master-role() {
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-master
cbr-cidr: ${MASTER_IP_RANGE}
cloud: gce
EOF
}
function salt-node-role() {
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-pool
cbr-cidr: '$(echo "$MINION_IP_RANGE" | sed -e "s/'/''/g")'
cloud: gce
EOF
}
function salt-docker-opts() {
DOCKER_OPTS=""
if [[ -n "${EXTRA_DOCKER_OPTS-}" ]]; then
DOCKER_OPTS="${EXTRA_DOCKER_OPTS}"
fi
# Decide whether to enable the cache
if [[ "${ENABLE_DOCKER_REGISTRY_CACHE}" == "true" ]]; then
REGION=$(echo "${ZONE}" | cut -f 1,2 -d -)
echo "Enable docker registry cache at region: " $REGION
DOCKER_OPTS="${DOCKER_OPTS} --registry-mirror='https://${REGION}.docker-cache.clustermaster.net'"
fi
if [[ -n "{DOCKER_OPTS}" ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
EOF
fi
}
function salt-set-apiserver() {
local kube_master_fqdn
until kube_master_fqdn=$(getent hosts ${KUBERNETES_MASTER_NAME} | awk '{ print $2 }'); do
echo 'Waiting for DNS resolution of ${KUBERNETES_MASTER_NAME}...'
sleep 3
done
cat <<EOF >>/etc/salt/minion.d/grains.conf
api_servers: '${kube_master_fqdn}'
apiservers: '${kube_master_fqdn}'
EOF
}
function configure-salt() {
fix-apt-sources
mkdir -p /etc/salt/minion.d
salt-run-local
if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
salt-master-role
else
salt-node-role
salt-docker-opts
salt-set-apiserver
fi
install-salt
stop-salt-minion
}
function run-salt() {
echo "== Calling Salt =="
salt-call --local state.highstate || true
}
####################################################################################
if [[ -z "${is_push}" ]]; then
echo "== kube-up node config starting =="
set-broken-motd
ensure-install-dir
set-kube-env
[[ "${KUBERNETES_MASTER}" == "true" ]] && mount-master-pd
create-salt-pillar
create-salt-auth
download-release
configure-salt
remove-docker-artifacts
run-salt
set-good-motd
echo "== kube-up node config done =="
else
echo "== kube-push node config starting =="
ensure-install-dir
set-kube-env
create-salt-pillar
download-release
run-salt
echo "== kube-push node config done =="
fi
|
zhanglianx111/kubernetes
|
cluster/gce/configure-vm.sh
|
Shell
|
apache-2.0
| 12,498 |
#!/bin/bash
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Used to train festvox voices.
#
# Example usage -
# ./festival_utils/build_festvox_voice.sh ~/Desktop/audio/si_lk/ si ~/si_lk_2/
set -x
set -o errexit
set -o pipefail
set -o nounset
# Whether to run festvox TTS training.
TRAIN=true
# Path to custom txt.done.data
CUSTOM_TXT_DONE_DATA_PATH=""
while getopts "tc:" opt; do
case ${opt} in
c) # Path to custom txt.done.data
CUSTOM_TXT_DONE_DATA_PATH="${OPTARG}"
echo "Using custom txt.done.data - ${CUSTOM_TXT_DONE_DATA_PATH}"
;;
t ) # Option to disable training.
TRAIN=false
echo "Disable festvox training"
;;
esac
done
shift $((OPTIND-1))
if [[ $# -ne 3 ]]; then
echo "Usage: ./festival_utils/build_festvox_voice.sh <path to wavs> <lang> <voice_dir>"
exit 1
fi
BASEDIR=$(dirname "$0")
PATH_TO_WAVS=$1
LANG=$2
VOICE_DIR=$3
TXT_DONE_DATA_PATH=""
if [[ "${CUSTOM_TXT_DONE_DATA_PATH}" == "" ]]; then
TXT_DONE_DATA_PATH="${LANG}/festvox/txt.done.data"
else
TXT_DONE_DATA_PATH="${CUSTOM_TXT_DONE_DATA_PATH}"
fi
# Check required env variables.
echo "${FESTVOXDIR?Set env variable FESTVOXDIR}"
# Set up the Festvox Clustergen build:
CWD=${PWD}
mkdir -p "${VOICE_DIR}"
cd "${VOICE_DIR}"
"${FESTVOXDIR}/src/clustergen/setup_cg" goog "${LANG}" unison
cd "${CWD}"
# Symlink wavs
rm -rf "${VOICE_DIR}/wav"
ln -sf "${PATH_TO_WAVS}" "${VOICE_DIR}/wav"
# Copy prompts
cp "${TXT_DONE_DATA_PATH}" "${VOICE_DIR}/etc/txt.done.data"
# Copy festvox lexicon file.
cp "${LANG}/festvox/lexicon.scm" "${VOICE_DIR}/festvox/lexicon.scm"
# Setup the phonology.
PHONOLOGY="${LANG}/festvox/ipa_phonology.json"
if [ ! -f "${PHONOLOGY}" ];
then
PHONOLOGY="${LANG}/festvox/phonology.json"
fi
# Generate various festvox files (wagon description files for (mcep,f0,dur), festvox phoneset etc).
python "${BASEDIR}/apply_phonology.py" "${PHONOLOGY}" "${VOICE_DIR}"
cd "${VOICE_DIR}"
if [[ ${TRAIN} == true ]]; then
# Run the Festvox Clustergen build. This will take couple of hours to complete.
# Total running time depends heavily on the number of CPU cores available.
echo "Training festvox ${LANG} voice"
time bin/build_cg_voice
fi
|
googlei18n/language-resources
|
festival_utils/build_festvox_voice.sh
|
Shell
|
apache-2.0
| 2,744 |
#! /bin/bash
. ./config.sh
start_suite "Various launch-proxy configurations"
# Booting it over unix socket listens on unix socket
run_on $HOST1 COVERAGE=$COVERAGE weave launch-proxy
assert_raises "run_on $HOST1 sudo docker -H unix:///var/run/weave/weave.sock ps"
assert_raises "proxy docker_on $HOST1 ps" 1
weave_on $HOST1 stop-proxy
# Booting it over tcp listens on tcp
weave_on $HOST1 launch-proxy
assert_raises "run_on $HOST1 sudo docker -H unix:///var/run/weave/weave.sock ps" 1
assert_raises "proxy docker_on $HOST1 ps"
weave_on $HOST1 stop-proxy
# Booting it over tcp (no prefix) listens on tcp
DOCKER_HOST=tcp://$HOST1:$DOCKER_PORT $WEAVE launch-proxy
assert_raises "run_on $HOST1 sudo docker -H unix:///var/run/weave/weave.sock ps" 1
assert_raises "proxy docker_on $HOST1 ps"
weave_on $HOST1 stop-proxy
# Booting it with -H outside /var/run/weave, still works
socket="$($SSH $HOST1 mktemp -d)/weave.sock"
weave_on $HOST1 launch-proxy -H unix://$socket
assert_raises "run_on $HOST1 sudo docker -H unix:///$socket ps" 0
weave_on $HOST1 stop-proxy
# Booting it against non-standard docker unix sock
run_on $HOST1 "DOCKER_HOST=unix:///var/run/alt-docker.sock COVERAGE=$COVERAGE weave launch-proxy -H tcp://0.0.0.0:12375"
assert_raises "proxy docker_on $HOST1 ps"
weave_on $HOST1 stop-proxy
# Booting it over tls errors
assert_raises "DOCKER_CLIENT_ARGS='--tls' weave_on $HOST1 launch-proxy" 1
assert_raises "DOCKER_CERT_PATH='./tls' DOCKER_TLS_VERIFY=1 weave_on $HOST1 launch-proxy" 1
# Booting it with a specific -H overrides defaults
weave_on $HOST1 launch-proxy -H tcp://0.0.0.0:12345
assert_raises "run_on $HOST1 sudo docker -H tcp://$HOST1:12345 ps"
assert_raises "proxy docker_on $HOST1 ps" 1
weave_on $HOST1 stop-proxy
end_suite
|
rade/weave
|
test/690_proxy_config_test.sh
|
Shell
|
apache-2.0
| 1,750 |
#!/bin/bash
# Install conda
# http://conda.pydata.org/docs/travis.html#the-travis-yml-file
wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh
bash miniconda.sh -b -p $HOME/miniconda
export PATH="$HOME/miniconda/bin:$PATH"
# Install Python dependencies
source "$( dirname "${BASH_SOURCE[0]}" )"/setup_dependencies_common.sh
|
dannygoldstein/sncosmo
|
.continuous-integration/travis/setup_environment_osx.sh
|
Shell
|
bsd-3-clause
| 365 |
#!/bin/bash
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# abort on error
set -e
# Load common constants and variables.
. "$(dirname "$0")/common.sh"
main() {
if [[ $# -ne 1 ]]; then
echo "Usage $0 <image>"
exit 1
fi
local image="$1"
local loopdev rootfs
if [[ -d "${image}" ]]; then
rootfs="${image}"
else
rootfs=$(make_temp_dir)
loopdev=$(loopback_partscan "${image}")
mount_loop_image_partition_ro "${loopdev}" 3 "${rootfs}"
fi
if ! no_chronos_password "${rootfs}"; then
die "chronos password is set! Shouldn't be for release builds."
fi
}
main "$@"
|
coreboot/vboot
|
scripts/image_signing/ensure_no_password.sh
|
Shell
|
bsd-3-clause
| 730 |
#/bin/bash
apt-get update
apt-get upgrade -y
apt-get install apache2 php5 php5-sqlite php5-curl php5-mcrypt -y
rm /var/www/html/index.html
a2enmod rewrite
php5enmod mcrypt
cp apache2.conf /etc/apache2/apache2.conf
cp php.ini /etc/php5/apache2/php.ini
cp 000-default.conf /etc/apache2/sites-available/000-default.conf
chown www-data:www-data /var/www
#Add the https version here + all the ssl generation etc
service apache2 start
|
EcrituresNumeriques/Lightium
|
conf/install.sh
|
Shell
|
mit
| 429 |
#!/bin/bash
# © Copyright IBM Corporation 2015.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
if [ "$LICENSE" = "accept" ]; then
exit 0
elif [ "$LICENSE" = "view" ]; then
case "$LANG" in
zh_TW*) LICENSE_FILE=Chinese_TW.txt ;;
zh*) LICENSE_FILE=Chinese.txt ;;
cs*) LICENSE_FILE=Czech.txt ;;
en*) LICENSE_FILE=English.txt ;;
fr*) LICENSE_FILE=French.txt ;;
de*) LICENSE_FILE=German.txt ;;
el*) LICENSE_FILE=Greek.txt ;;
id*) LICENSE_FILE=Indonesian.txt ;;
it*) LICENSE_FILE=Italian.txt ;;
ja*) LICENSE_FILE=Japanese.txt ;;
ko*) LICENSE_FILE=Korean.txt ;;
lt*) LICENSE_FILE=Lithuanian.txt ;;
pl*) LICENSE_FILE=Polish.txt ;;
pt*) LICENSE_FILE=Portuguese.txt ;;
ru*) LICENSE_FILE=Russian.txt ;;
sl*) LICENSE_FILE=Slovenian.txt ;;
es*) LICENSE_FILE=Spanish.txt ;;
tr*) LICENSE_FILE=Turkish.txt ;;
*) LICENSE_FILE=English.txt ;;
esac
cat /opt/ibm/iib-10.0.0.7/license/$LICENSE_FILE
exit 1
else
echo -e "Set environment variable LICENSE=accept to indicate acceptance of license terms and conditions.\n\nLicense agreements and information can be viewed by running this image with the environment variable LICENSE=view. You can also set the LANG environment variable to view the license in a different language."
exit 1
fi
|
JBarbes/IBM-MQ-Container-JBA02
|
iib-license-check.sh
|
Shell
|
epl-1.0
| 1,459 |
#!/bin/sh
# Copyright (C) 1999-2005 ImageMagick Studio LLC
#
# This program is covered by multiple licenses, which are described in
# LICENSE. You should have received a copy of LICENSE with this
# package; otherwise see http://www.imagemagick.org/script/license.php.
#
# Test for '${CONVERT}' utility.
#
set -e # Exit on any error
. ${srcdir}/utilities/tests/common.sh
${CONVERT} ${MODEL_MIFF} -implode 0.5 -label Implode Implode_out.miff
|
atmark-techno/atmark-dist
|
user/imagemagick/utilities/tests/implode.sh
|
Shell
|
gpl-2.0
| 443 |
#/bin/bash
KERNEL_VERSION=2.6.29-ts4700-00
INSTALL_MOD_PATH=/home/`whoami`/src/ts-4700/dist/linux-2.6.29-ts4700-00/modules-install
INITRD_PATH=${INSTALL_MOD_PATH}/initrd-modules
TRIM_PATH=${INITRD_PATH}/lib/modules/${KERNEL_VERSION}/kernel
rm -rf ${INSTALL_MOD_PATH}
if [ ! -d ${INITRD_PATH} ]; then mkdir -p ${INITRD_PATH}; fi
# Copy all modules.
INSTALL_MOD_PATH=${INSTALL_MOD_PATH} make modules_install
cp -r ${INSTALL_MOD_PATH}/lib ${INITRD_PATH}
# Remove all modules but those we need in the 'initrd'.
rm -rf ${TRIM_PATH}/{crypto,fs,lib,net}
rm -rf ${TRIM_PATH}/drivers/{block,i2c,input,misc,spi}
# Bundle up the reduced set necessary for the 'initrd'.
sudo chown -R root: ${INITRD_PATH}/lib
tar czf ${INITRD_PATH}/modules.tar.gz -C ${INITRD_PATH} lib
sudo chown -R `whoami`: ${INITRD_PATH}/lib
# Bundle up the full set of modules, for the Debian image.
sudo chown -R root: ${INSTALL_MOD_PATH}/lib
tar czf ${INSTALL_MOD_PATH}/modules-${KERNEL_VERSION}.tgz -C ${INSTALL_MOD_PATH} lib
sudo chown -R `whoami`: ${INSTALL_MOD_PATH}/lib
|
coherentsolutions/linux-2.6.29-ts4700
|
build-module-bundles.sh
|
Shell
|
gpl-2.0
| 1,047 |
#!/bin/bash
#
# Copyright (c) 2014 - 2020 Eaton
#
# This file is part of the Eaton sponsored 42ity project.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#! \file git_details.sh
# \brief Print the details of the current repository
# \author Jim Klimov <[email protected]>
# \details Print the details of the current repository, if any,
# and of build-host and current build-timestamp, to the
# stdout as a shell-includable markup.
#
# Establish standard environment
LANG=C
LC_ALL=C
TZ=UTC
export LANG LC_ALL TZ
### We need to convert a potentially multi-line value like "git status" into
### a single-line token for C macros or Makefiles; our JSON.sh can do that.
[ -z "$JSONSH" -o ! -x "$JSONSH" ] && JSONSH="`dirname $0`/JSON.sh"
[ -z "$JSONSH" -o ! -x "$JSONSH" ] && JSONSH="/usr/share/fty/scripts/JSON.sh"
[ -z "$JSONSH" -o ! -x "$JSONSH" ] && JSONSH="/usr/share/bios/scripts/JSON.sh"
[ ! -x "$JSONSH" ] && \
echo "GIT_DETAILS-FATAL: FAILED to use JSON.sh from '$JSONSH'" >&2 && exit 3
[ -z "$GIT" -o ! -x "$GIT" ] && GIT="$(which git 2>/dev/null | head -1)"
if [ -n "$GIT" -a -x "$GIT" ] && $GIT --help >/dev/null 2>&1; then :
else
echo "GIT_DETAILS-WARN: FAILED to execute 'git' program (tried '$GIT')" >&2
GIT="git"
fi
[ -z "$DATE" -o ! -x "$DATE" ] && DATE="$(which date 2>/dev/null | head -1)"
[ -z "$DATE" -o ! -x "$DATE" ] && DATE="$(which gdate 2>/dev/null | head -1)"
[ -n "$DATE" -a -x "$DATE" ] || DATE=date
PACKAGE_BUILD_TSTAMP_ISO8601=""
reportVar() {
# Argument is the name of the variable to report in "original"
# and "escaped" form
V="$1"
VE="${V}_ESCAPED"
if [ -z "$V" ]; then
echo "$V=\"\";"
echo "$VE=\"\";"
else
eval echo -E $V=\\\"\"'$'$V\"\\\""\\;"
eval $VE=\"$(eval echo -E \"'$'\{$V\}\" | $JSONSH -Q)\"
eval echo -E $VE=\\\"'$'$VE\\\""\\;"
fi
unset $VE
unset VE
unset V
return 0
}
reportBuildTimestamp() {
echo "GIT_DETAILS-INFO: Recording the build timestamp..." >&2
# Packaging metadata: Current timestamp at the build host
# (as of compilation)
# May be passed by caller like the obs-service_git_nas.sh script
# to use some unified value across a mass build, if needed
if [ -z "$PACKAGE_BUILD_TSTAMP" ] ; then
PACKAGE_BUILD_TSTAMP="`TZ=UTC $DATE -u '+%s'`" || \
PACKAGE_BUILD_TSTAMP="`TZ=UTC $DATE '+%s'`" || \
return 1
fi
[ -n "$PACKAGE_BUILD_TSTAMP" ] && [ -z "$PACKAGE_BUILD_TSTAMP_ISO8601" ] && \
PACKAGE_BUILD_TSTAMP_ISO8601="`TZ=UTC date -u -d "@${PACKAGE_BUILD_TSTAMP}" '+%Y%m%dT%H%M%SZ'`"
for PV in \
PACKAGE_BUILD_TSTAMP \
PACKAGE_BUILD_TSTAMP_ISO8601 \
; do
reportVar "$PV"
done
return 0
}
reportBuildHost() {
echo "GIT_DETAILS-INFO: Getting buildhost attributes..." >&2
# Packaging metadata: Full 'uname -a' of the building host
PACKAGE_BUILD_HOST_UNAME="`uname -a`"
# Packaging metadata: Hostname of the building host
PACKAGE_BUILD_HOST_NAME="`uname -n`"
# Packaging metadata: OS/kernel of the building host
PACKAGE_BUILD_HOST_OS="`uname -s -r -v`"
for PV in \
PACKAGE_BUILD_HOST_UNAME \
PACKAGE_BUILD_HOST_NAME \
PACKAGE_BUILD_HOST_OS \
; do
reportVar "$PV"
done
return 0
}
reportGitInfo() {
# Caller can 'export GIT_DETAILS_BLANK=yes' to just generate an empty set
GIT_ERRORED=no
GITRES=0
PACKAGE_GIT_ORIGIN=""
PACKAGE_GIT_BRANCH=""
PACKAGE_GIT_TSTAMP=""
PACKAGE_GIT_TSTAMP_ISO8601=""
PACKAGE_GIT_HASH_S=""
PACKAGE_GIT_HASH_L=""
PACKAGE_GIT_STATUS=""
PACKAGE_GIT_TAGGED=""
if [ -z "$GIT" ]; then
GIT_ERRORED=yes
GITRES=1
else
# Packaging metadata: URL of the Git origin repository
# (parent of the build workspace)
PACKAGE_GIT_ORIGIN="$($GIT config --get remote.origin.url)"
if [ $? != 0 ]; then
echo "GIT_DETAILS-INFO: SKIPPED: can not get Git metadata in '`pwd`'" >&2
PACKAGE_GIT_ORIGIN=""
GIT_ERRORED=yes
GITRES=2
fi
fi
if [ "$GIT_ERRORED" = no ]; then
echo "GIT_DETAILS-INFO: Getting Git workspace attributes..." >&2
# Packaging metadata: Git branch in the build workspace repository
PACKAGE_GIT_BRANCH="$($GIT rev-parse --abbrev-ref HEAD)"
# Packaging metadata: Git timestamp of the commit used for the build
PACKAGE_GIT_TSTAMP="$($GIT log -n 1 --format='%ct')" && \
PACKAGE_GIT_TSTAMP_ISO8601="`TZ=UTC date -u -d "@${PACKAGE_GIT_TSTAMP}" '+%Y%m%dT%H%M%SZ'`"
# Packaging metadata: Git short-hash of the commit used for the build
PACKAGE_GIT_HASH_S="$($GIT log -n 1 --format='%h')"
# Packaging metadata: Git long-hash of the commit used for the build
PACKAGE_GIT_HASH_L="$($GIT rev-parse --verify HEAD)"
# Packaging metadata: short list of possible differences against the
# committed repository state
PACKAGE_GIT_STATUS="$($GIT status -s)"
_B=''
_B_RES=-1
if [ "$PACKAGE_GIT_BRANCH" = "HEAD" ]; then
echo "GIT_DETAILS-INFO: This workspace is a 'detached HEAD'," \
"trying to detect the real source branch name..." >&2
if [ -n "$BRANCH" -a -n "$BUILDMACHINE" ]; then
echo "GIT_DETAILS-INFO: envvars set by Jenkins worker are detected;" \
"will rely on them (using '$BRANCH')" >&2
_B="$BRANCH"
[ -n "$BRANCH" -a x"$BRANCH" != xHEAD ]
_B_RES=$?
fi
[ $_B_RES != 0 -o -z "$_B" ] && \
if [ -n "$BRANCH_NAME" ]; then
echo "GIT_DETAILS-INFO: envvars set by Jenkins are detected;" \
"will rely on them (using '$BRANCH_NAME')" >&2
_B="$BRANCH_NAME"
[ -n "$BRANCH_NAME" -a x"$BRANCH_NAME" != xHEAD ]
_B_RES=$?
fi
_B_FETCH_HEAD=""
[ $_B_RES != 0 -o -z "$_B" ] && \
if [ -d ".git" -a -f ".git/FETCH_HEAD" -a\
-n "$PACKAGE_GIT_HASH_L" ]; then
echo "GIT_DETAILS-INFO: Looking for PACKAGE_GIT_BRANCH in .git/FETCH_HEAD..." >&2
_B="`grep "$PACKAGE_GIT_HASH_L" .git/FETCH_HEAD | grep -w branch | sed 's,^[^ ]* *branch '"'"'\(.*\)'"'"' of .*$,\1,'`" && [ -n "$_B" ]
_B_RES=$?
if [ $_B_RES = 0 ] && [ "`echo "$_B" | wc -l`" -gt 1 ] ; then
# Note: pedantically, this rule can also be hit if a branch
# is branched and no commits are added to either one - both
# HEADs are same commit id then... and then we have little
# reason to choose or reject either one. Maybe `|head -1` ?
# We fall-back to this in the end of this test suite.
echo "GIT_DETAILS-WARN: Looking for PACKAGE_GIT_BRANCH in .git/FETCH_HEAD returned more than one line (octopus, shoo!) :" >&2
echo "$_B" >&2
_B_FETCH_HEAD="$_B"
_B=""
_B_RES=1
fi
fi
[ $_B_RES != 0 -o -z "$_B" ] && \
if [ -n "$PACKAGE_GIT_HASH_S" ]; then
echo "GIT_DETAILS-INFO: Looking for PACKAGE_GIT_BRANCH in 'git branch' info..." >&2
_B="`git branch -a -v | grep -w "$PACKAGE_GIT_HASH_S" | egrep -v "^\* (\(no branch\) $PACKAGE_GIT_HASH_S|detached from $PACKAGE_GIT_HASH_S|HEAD detached at $PACKAGE_GIT_HASH_S)" | awk '{print $1}' | sed 's,^remotes/,,'`"
_B_RES=$?
fi
[ $_B_RES != 0 -o -z "$_B" -o "$_B" = '*' ] && \
if [ -s ".git_details" -a -r ".git_details" ]; then
echo "GIT_DETAILS-INFO: Looking for PACKAGE_GIT_BRANCH" \
"in older .git_details..." >&2
_B="`source .git_details && echo "$PACKAGE_GIT_BRANCH"`"
_B_RES=$?
fi
[ $_B_RES != 0 -o -z "$_B" ] && \
if [ -n "$_B_FETCH_HEAD" ]; then \
echo "GIT_DETAILS-INFO: Fall back to the first hit from .git/FETCH_HEAD as the PACKAGE_GIT_BRANCH..." >&2
_B="`echo "$_B_FETCH_HEAD" | head -1`"
_B_RES=$?
fi
[ $_B_RES = 0 -a -n "$_B" ] && \
echo "GIT_DETAILS-INFO: This workspace is a 'detached HEAD'," \
"but its commit-id matches the head of known branch '$_B'" >&2 && \
PACKAGE_GIT_BRANCH="$_B"
unset _B_FETCH_HEAD
fi
unset _B _B_RES
if [ "$PACKAGE_GIT_BRANCH" = "HEAD" ]; then
echo "GIT_DETAILS-WARN: This workspace is a 'detached HEAD', and" \
"we could not reliably detect any predecessor branch" >&2
fi
### Ported from bios-infra::obs-service_git_nas.sh
PACKAGE_GIT_TAGGED="$($GIT describe --tags 2>/dev/null)"
### TODO: is this still needed? The pattern ported from git_nas
### is absent nowadays... maybe it was even never implemented...
### Kill the "v" or "t" from version or tag
#PACKAGE_GIT_TAGGED="${PACKAGE_GIT_TAGGED/-[tv]/-}"
#PACKAGE_GIT_TAGGED="${PACKAGE_GIT_TAGGED//-/\~}"
fi
if [ "$GIT_ERRORED" = no -o x"$GIT_DETAILS_BLANK" = xyes ]; then
for PV in \
PACKAGE_GIT_ORIGIN PACKAGE_GIT_BRANCH PACKAGE_GIT_TSTAMP \
PACKAGE_GIT_HASH_S PACKAGE_GIT_HASH_L PACKAGE_GIT_STATUS \
PACKAGE_GIT_TAGGED PACKAGE_GIT_TSTAMP_ISO8601 \
; do
reportVar "$PV"
done
return 0
else
return $GITRES
fi
}
case "$1" in
build-host)
reportBuildHost ;;
build-timestamp)
reportBuildTimestamp ;;
build-source)
reportGitInfo ;;
*)
reportBuildHost
reportBuildTimestamp
# NOTE: This must be the last action - it returns the possible error
# exit-codes in Git metadata detection, if the caller cares about that
reportGitInfo
exit
;;
esac
|
jimklimov/fty-core
|
tools/git_details.sh
|
Shell
|
gpl-2.0
| 11,003 |
FANCYTTY=0
|
johnraff/cb-netinstall
|
sysfiles2/etc/lsb-base-logging.sh
|
Shell
|
gpl-2.0
| 11 |
#!/bin/sh
#
# Copyright (c) 2013 The Linux Foundation. All rights reserved.
# Copyright (C) 2011 OpenWrt.org
#
IPQ806X_BOARD_NAME=
IPQ806X_MODEL=
ipq806x_board_detect() {
local machine
local name
machine=$(awk 'BEGIN{FS="[ \t]+:[ \t]"} /Hardware/ {print $2}' /proc/cpuinfo)
case "$machine" in
*"DB149 reference board")
name="db149"
;;
*"AP148 reference board")
name="ap148"
;;
*"AP145 reference board")
name="ap145"
;;
*"AP145-1XX reference board")
name="ap145_1xx"
;;
*"DB149-1XX reference board")
name="db149_1xx"
;;
*"DB149-2XX reference board")
name="db149_2xx"
;;
*"AP148-1XX reference board")
name="ap148_1xx"
;;
esac
[ -z "$name" ] && name="unknown"
[ -z "$IPQ806X_BOARD_NAME" ] && IPQ806X_BOARD_NAME="$name"
[ -z "$IPQ806X_MODEL" ] && IPQ806X_MODEL="$machine"
[ -e "/tmp/sysinfo/" ] || mkdir -p "/tmp/sysinfo/"
echo "$IPQ806X_BOARD_NAME" > /tmp/sysinfo/board_name
echo "$IPQ806X_MODEL" > /tmp/sysinfo/model
}
ipq806x_board_name() {
local name
[ -f /tmp/sysinfo/board_name ] && name=$(cat /tmp/sysinfo/board_name)
[ -z "$name" ] && name="unknown"
echo "$name"
}
|
wjrsonic/openwrt
|
target/linux/ipq806x/base-files/lib/ipq806x.sh
|
Shell
|
gpl-2.0
| 1,130 |
#! /bin/bash
#
# A clang-format lint script. Usage:
# ./lint.sh
# Runs clang-format on the current git stage
# ./lint.sh revision..range
# Runs clang-format on the specified range of commits
set -eo pipefail
[ -z "${CLANG_FORMAT}" ] && CLANG_FORMAT="clang-format"
if [ -z "$(command -v ${CLANG_FORMAT})" ]; then
echo >&2 "clang format binary \"${CLANG_FORMAT}\" not found"
exit 1;
fi;
echo "Clang-format version:"
${CLANG_FORMAT} --version
# Default to 'cached', or the revision passed as an argument
GIT_REVISION=${1:---cached}
MODIFIED_FILES=$(git diff --name-only --diff-filter=ACMRTUXB ${GIT_REVISION})
RET=0
for f in ${MODIFIED_FILES}; do
# Skip any non C++ files
if ! echo "${f}" | egrep -q "[.](cpp|h)$"; then
continue;
fi
OUTPUT=$(${CLANG_FORMAT} ${f} | (diff -u "${f}" - || true))
if [ -n "${OUTPUT}" ]; then
echo "ERROR: File \"${f}\" doesn't match expected format, diff:"
echo
echo "${OUTPUT}"
RET=1;
fi;
done;
exit ${RET};
|
JonnyH/OpenApoc
|
tools/lint.sh
|
Shell
|
gpl-3.0
| 979 |
#!/bin/bash
cd $1
./runMavenProfile pysdk
|
zstackorg/zstack-utility
|
zstackbuild/scripts/build_zstack_pysdk.sh
|
Shell
|
apache-2.0
| 42 |
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Call this to dump all master and node logs into the folder specified in $1
# (defaults to _artifacts). Only works if the provider supports SSH.
set -o errexit
set -o nounset
set -o pipefail
readonly local_report_dir="${1:-_artifacts}"
report_dir=""
readonly gcs_artifacts_dir="${2:-}"
readonly logexporter_namespace="${3:-logexporter}"
# In order to more trivially extend log-dump for custom deployments,
# check for a function named log_dump_custom_get_instances. If it's
# defined, we assume the function can me called with one argument, the
# role, which is either "master" or "node".
echo 'Checking for custom logdump instances, if any'
if [[ $(type -t log_dump_custom_get_instances) == "function" ]]; then
readonly use_custom_instance_list=yes
else
readonly use_custom_instance_list=
fi
readonly master_ssh_supported_providers="gce aws"
readonly node_ssh_supported_providers="gce gke aws"
readonly gcloud_supported_providers="gce gke"
readonly master_logfiles="kube-apiserver.log kube-apiserver-audit.log kube-scheduler.log kube-controller-manager.log etcd.log etcd-events.log glbc.log cluster-autoscaler.log kube-addon-manager.log konnectivity-server.log fluentd.log kubelet.cov"
readonly node_logfiles="kube-proxy.log containers/konnectivity-agent-*.log fluentd.log node-problem-detector.log kubelet.cov"
readonly node_systemd_services="node-problem-detector"
readonly hollow_node_logfiles="kubelet-hollow-node-*.log kubeproxy-hollow-node-*.log npd-hollow-node-*.log"
readonly aws_logfiles="cloud-init-output.log"
readonly gce_logfiles="startupscript.log"
readonly kern_logfile="kern.log"
readonly initd_logfiles="docker/log"
readonly supervisord_logfiles="kubelet.log supervisor/supervisord.log supervisor/kubelet-stdout.log supervisor/kubelet-stderr.log supervisor/docker-stdout.log supervisor/docker-stderr.log"
readonly systemd_services="kubelet kubelet-monitor kube-container-runtime-monitor ${LOG_DUMP_SYSTEMD_SERVICES:-docker}"
readonly extra_log_files="${LOG_DUMP_EXTRA_FILES:-}"
readonly extra_systemd_services="${LOG_DUMP_SAVE_SERVICES:-}"
readonly dump_systemd_journal="${LOG_DUMP_SYSTEMD_JOURNAL:-false}"
# Root directory for Kubernetes files on Windows nodes.
WINDOWS_K8S_DIR="C:\\etc\\kubernetes"
# Directory where Kubernetes log files will be stored on Windows nodes.
export WINDOWS_LOGS_DIR="${WINDOWS_K8S_DIR}\\logs"
# Log files found in WINDOWS_LOGS_DIR on Windows nodes:
readonly windows_node_logfiles="kubelet.log kube-proxy.log docker.log docker_images.log csi-proxy.log"
# Log files found in other directories on Windows nodes:
readonly windows_node_otherfiles="C:\\Windows\\MEMORY.dmp"
# Limit the number of concurrent node connections so that we don't run out of
# file descriptors for large clusters.
readonly max_dump_processes=25
# Indicator variable whether we experienced a significant failure during
# logexporter creation or execution.
logexporter_failed=0
# Percentage of nodes that must be logexported successfully (otherwise the
# process will exit with a non-zero exit code).
readonly log_dump_expected_success_percentage="${LOG_DUMP_EXPECTED_SUCCESS_PERCENTAGE:-0}"
# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG
# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
kube::util::trap_add() {
local trap_add_cmd
trap_add_cmd=$1
shift
for trap_add_name in "$@"; do
local existing_cmd
local new_cmd
# Grab the currently defined trap commands for this trap
existing_cmd=$(trap -p "${trap_add_name}" | awk -F"'" '{print $2}')
if [[ -z "${existing_cmd}" ]]; then
new_cmd="${trap_add_cmd}"
else
new_cmd="${trap_add_cmd};${existing_cmd}"
fi
# Assign the test. Disable the shellcheck warning telling that trap
# commands should be single quoted to avoid evaluating them at this
# point instead evaluating them at run time. The logic of adding new
# commands to a single trap requires them to be evaluated right away.
# shellcheck disable=SC2064
trap "${new_cmd}" "${trap_add_name}"
done
}
# Opposite of kube::util::ensure-temp-dir()
kube::util::cleanup-temp-dir() {
rm -rf "${KUBE_TEMP}"
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
kube::util::ensure-temp-dir() {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX)
kube::util::trap_add kube::util::cleanup-temp-dir EXIT
fi
}
# Use the gcloud defaults to find the project. If it is already set in the
# environment then go with that.
#
# Vars set:
# PROJECT
# NETWORK_PROJECT
# PROJECT_REPORTED
function detect-project() {
if [[ -z "${PROJECT-}" ]]; then
PROJECT=$(gcloud config list project --format 'value(core.project)')
fi
NETWORK_PROJECT=${NETWORK_PROJECT:-${PROJECT}}
if [[ -z "${PROJECT-}" ]]; then
echo "Could not detect Google Cloud Platform project. Set the default project using " >&2
echo "'gcloud config set project <PROJECT>'" >&2
exit 1
fi
if [[ -z "${PROJECT_REPORTED-}" ]]; then
echo "Project: ${PROJECT}" >&2
echo "Network Project: ${NETWORK_PROJECT}" >&2
echo "Zone: ${ZONE}" >&2
PROJECT_REPORTED=true
fi
}
# Detect Linux and Windows nodes in the cluster.
#
# If a custom get-instances function has been set, this function will use it
# to set the NODE_NAMES array.
#
# Otherwise this function will attempt to detect the nodes based on the GCP
# instance group information. If Windows nodes are present they will be detected
# separately. The following arrays will be set:
# NODE_NAMES
# INSTANCE_GROUPS
# WINDOWS_NODE_NAMES
# WINDOWS_INSTANCE_GROUPS
function detect-node-names() {
NODE_NAMES=()
INSTANCE_GROUPS=()
WINDOWS_INSTANCE_GROUPS=()
WINDOWS_NODE_NAMES=()
if [[ -n "${use_custom_instance_list}" ]]; then
echo 'Detecting node names using log_dump_custom_get_instances() function'
while IFS='' read -r line; do NODE_NAMES+=("$line"); done < <(log_dump_custom_get_instances node)
echo "NODE_NAMES=${NODE_NAMES[*]:-}" >&2
return
fi
if ! [[ "${gcloud_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then
echo "gcloud not supported for ${KUBERNETES_PROVIDER}, can't detect node names"
return
fi
# These prefixes must not be prefixes of each other, so that they can be used to
# detect mutually exclusive sets of nodes.
local -r NODE_INSTANCE_PREFIX=${NODE_INSTANCE_PREFIX:-"${INSTANCE_PREFIX}-minion"}
local -r WINDOWS_NODE_INSTANCE_PREFIX=${WINDOWS_NODE_INSTANCE_PREFIX:-"${INSTANCE_PREFIX}-windows-node"}
detect-project
echo 'Detecting nodes in the cluster'
INSTANCE_GROUPS+=($(gcloud compute instance-groups managed list \
--project "${PROJECT}" \
--filter "name ~ '${NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
--format='value(name)' || true))
WINDOWS_INSTANCE_GROUPS+=($(gcloud compute instance-groups managed list \
--project "${PROJECT}" \
--filter "name ~ '${WINDOWS_NODE_INSTANCE_PREFIX}-.+' AND zone:(${ZONE})" \
--format='value(name)' || true))
if [[ -n "${INSTANCE_GROUPS[@]:-}" ]]; then
for group in "${INSTANCE_GROUPS[@]}"; do
NODE_NAMES+=($(gcloud compute instance-groups managed list-instances \
"${group}" --zone "${ZONE}" --project "${PROJECT}" \
--format='value(instance)'))
done
fi
# Add heapster node name to the list too (if it exists).
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
NODE_NAMES+=("${NODE_INSTANCE_PREFIX}-heapster")
fi
if [[ -n "${WINDOWS_INSTANCE_GROUPS[@]:-}" ]]; then
for group in "${WINDOWS_INSTANCE_GROUPS[@]}"; do
WINDOWS_NODE_NAMES+=($(gcloud compute instance-groups managed \
list-instances "${group}" --zone "${ZONE}" --project "${PROJECT}" \
--format='value(instance)'))
done
fi
echo "INSTANCE_GROUPS=${INSTANCE_GROUPS[*]:-}" >&2
echo "NODE_NAMES=${NODE_NAMES[*]:-}" >&2
echo "WINDOWS_INSTANCE_GROUPS=${WINDOWS_INSTANCE_GROUPS[*]:-}" >&2
echo "WINDOWS_NODE_NAMES=${WINDOWS_NODE_NAMES[*]:-}" >&2
}
# Detect the IP for the master
#
# Assumed vars:
# MASTER_NAME
# ZONE
# REGION
# Vars set:
# KUBE_MASTER
# KUBE_MASTER_IP
function detect-master() {
detect-project
KUBE_MASTER=${MASTER_NAME}
echo "Trying to find master named '${MASTER_NAME}'" >&2
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
local master_address_name="${MASTER_NAME}-ip"
echo "Looking for address '${master_address_name}'" >&2
if ! KUBE_MASTER_IP=$(gcloud compute addresses describe "${master_address_name}" \
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)') || \
[[ -z "${KUBE_MASTER_IP-}" ]]; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
fi
if [[ -z "${KUBE_MASTER_INTERNAL_IP-}" ]] && [[ ${GCE_PRIVATE_CLUSTER:-} == "true" ]]; then
local master_address_name="${MASTER_NAME}-internal-ip"
echo "Looking for address '${master_address_name}'" >&2
if ! KUBE_MASTER_INTERNAL_IP=$(gcloud compute addresses describe "${master_address_name}" \
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)') || \
[[ -z "${KUBE_MASTER_INTERNAL_IP-}" ]]; then
echo "Could not detect Kubernetes master node. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
fi
echo "Using master: $KUBE_MASTER (external IP: $KUBE_MASTER_IP; internal IP: ${KUBE_MASTER_INTERNAL_IP:-(not set)})" >&2
}
# SSH to a node by name ($1) and run a command ($2).
function setup() {
if [[ -z "${use_custom_instance_list}" ]]; then
echo "Using gce provider, skipping check for LOG_DUMP_SSH_KEY and LOG_DUMP_SSH_USER"
ZONE="${KUBE_GCE_ZONE:-us-central1-b}"
REGION="${ZONE%-*}"
INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-kubernetes}"
CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
GCE_PRIVATE_CLUSTER="${KUBE_GCE_PRIVATE_CLUSTER:-false}"
detect-project 2>&1
elif [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
NUM_NODES=${NUM_NODES:-3}
echo "Using 'use_custom_instance_list' with gke, skipping check for LOG_DUMP_SSH_KEY and LOG_DUMP_SSH_USER"
elif [[ -z "${LOG_DUMP_SSH_KEY:-}" ]]; then
echo 'LOG_DUMP_SSH_KEY not set, but required when using log_dump_custom_get_instances'
exit 1
elif [[ -z "${LOG_DUMP_SSH_USER:-}" ]]; then
echo 'LOG_DUMP_SSH_USER not set, but required when using log_dump_custom_get_instances'
exit 1
fi
}
function log-dump-ssh() {
local host="$1"
local cmd="$2"
if [[ "${gcloud_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then
for (( i=0; i<5; i++)); do
if gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" --project "${PROJECT}" --zone="${ZONE}" "${host}" --command "echo test > /dev/null"; then
break
fi
sleep 5
done
# Then actually try the command.
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --ssh-flag="-o ConnectTimeout=30" --project "${PROJECT}" --zone="${ZONE}" "${host}" --command "${cmd}"
return
fi
ssh -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${host}" "${cmd}"
}
# Copy all files /var/log/{$3}.log on node $1 into local dir $2.
# $3 should be a string array of file names.
# This function shouldn't ever trigger errexit, but doesn't block stderr.
function copy-logs-from-node() {
local -r node="${1}"
local -r dir="${2}"
shift
shift
local files=("$@")
# Append "*"
# The * at the end is needed to also copy rotated logs (which happens
# in large clusters and long runs).
files=( "${files[@]/%/*}" )
# Prepend "/var/log/"
files=( "${files[@]/#/\/var\/log\/}" )
# Comma delimit (even the singleton, or scp does the wrong thing), surround by braces.
local -r scp_files="{$(printf "%s," "${files[@]}")}"
if [[ "${gcloud_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then
# get-serial-port-output lets you ask for ports 1-4, but currently (11/21/2016) only port 1 contains useful information
gcloud compute instances get-serial-port-output --project "${PROJECT}" --zone "${ZONE}" --port 1 "${node}" > "${dir}/serial-1.log" || true
gcloud compute scp --recurse --project "${PROJECT}" --zone "${ZONE}" "${node}:${scp_files}" "${dir}" > /dev/null || true
elif [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
local ip
ip=$(get_ssh_hostname "${node}")
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${ip}:${scp_files}" "${dir}" > /dev/null || true
elif [[ -n "${use_custom_instance_list}" ]]; then
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${node}:${scp_files}" "${dir}" > /dev/null || true
else
echo "Unknown cloud-provider '${KUBERNETES_PROVIDER}' and use_custom_instance_list is unset too - skipping logdump for '${node}'"
fi
}
# Save logs for node $1 into directory $2. Pass in any non-common files in $3.
# Pass in any non-common systemd services in $4.
# $3 and $4 should be a space-separated list of files.
# Set $5 to true to indicate it is on master. Default to false.
# This function shouldn't ever trigger errexit
function save-logs() {
local -r node_name="${1}"
local -r dir="${2}"
local files=()
IFS=' ' read -r -a files <<< "$3"
local opt_systemd_services="${4:-""}"
local on_master="${5:-"false"}"
local extra=()
IFS=' ' read -r -a extra <<< "$extra_log_files"
files+=("${extra[@]}")
if [[ -n "${use_custom_instance_list}" ]]; then
if [[ -n "${LOG_DUMP_SAVE_LOGS:-}" ]]; then
local dump=()
IFS=' ' read -r -a dump <<< "${LOG_DUMP_SAVE_LOGS:-}"
files+=("${dump[@]}")
fi
else
local providerlogs=()
case "${KUBERNETES_PROVIDER}" in
gce|gke)
IFS=' ' read -r -a providerlogs <<< "${gce_logfiles}"
;;
aws)
IFS=' ' read -r -a providerlogs <<< "${aws_logfiles}"
;;
esac
files+=("${providerlogs[@]}")
fi
local services
read -r -a services <<< "${systemd_services} ${opt_systemd_services} ${extra_systemd_services}"
if log-dump-ssh "${node_name}" "command -v journalctl" &> /dev/null; then
if [[ "${on_master}" == "true" ]]; then
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-master-installation.service" > "${dir}/kube-master-installation.log" || true
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-master-configuration.service" > "${dir}/kube-master-configuration.log" || true
else
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-node-installation.service" > "${dir}/kube-node-installation.log" || true
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-node-configuration.service" > "${dir}/kube-node-configuration.log" || true
fi
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -k" > "${dir}/kern.log" || true
for svc in "${services[@]}"; do
log-dump-ssh "${node_name}" "sudo journalctl --output=cat -u ${svc}.service" > "${dir}/${svc}.log" || true
done
if [[ "$dump_systemd_journal" == "true" ]]; then
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise" > "${dir}/systemd.log" || true
fi
else
local tmpfiles=()
for f in "${kern_logfile}" "${initd_logfiles}" "${supervisord_logfiles}"; do
IFS=' ' read -r -a tmpfiles <<< "$f"
files+=("${tmpfiles[@]}")
done
fi
# Try dumping coverage profiles, if it looks like coverage is enabled in the first place.
if log-dump-ssh "${node_name}" "stat /var/log/kubelet.cov" &> /dev/null; then
if log-dump-ssh "${node_name}" "command -v docker" &> /dev/null; then
if [[ "${on_master}" == "true" ]]; then
run-in-docker-container "${node_name}" "kube-apiserver" "cat /tmp/k8s-kube-apiserver.cov" > "${dir}/kube-apiserver.cov" || true
run-in-docker-container "${node_name}" "kube-scheduler" "cat /tmp/k8s-kube-scheduler.cov" > "${dir}/kube-scheduler.cov" || true
run-in-docker-container "${node_name}" "kube-controller-manager" "cat /tmp/k8s-kube-controller-manager.cov" > "${dir}/kube-controller-manager.cov" || true
else
run-in-docker-container "${node_name}" "kube-proxy" "cat /tmp/k8s-kube-proxy.cov" > "${dir}/kube-proxy.cov" || true
fi
else
echo 'Coverage profiles seem to exist, but cannot be retrieved from inside containers.'
fi
fi
echo 'Changing logfiles to be world-readable for download'
log-dump-ssh "${node_name}" "sudo chmod -R a+r /var/log" || true
echo "Copying '${files[*]}' from ${node_name}"
copy-logs-from-node "${node_name}" "${dir}" "${files[@]}"
}
# Saves a copy of the Windows Docker event log to ${WINDOWS_LOGS_DIR}\docker.log
# on node $1.
function export-windows-docker-event-log() {
local -r node="${1}"
local -r powershell_cmd="powershell.exe -Command \"\$logs=\$(Get-EventLog -LogName Application -Source Docker | Format-Table -Property TimeGenerated, EntryType, Message -Wrap); \$logs | Out-File -FilePath '${WINDOWS_LOGS_DIR}\\docker.log'\""
# Retry up to 3 times to allow ssh keys to be properly propagated and
# stored.
for retry in {1..3}; do
if gcloud compute ssh --project "${PROJECT}" --zone "${ZONE}" "${node}" \
--command "$powershell_cmd"; then
break
else
sleep 10
fi
done
}
# Saves prepulled Windows Docker images list to ${WINDOWS_LOGS_DIR}\docker_images.log
# on node $1.
function export-windows-docker-images-list() {
local -r node="${1}"
local -r powershell_cmd="powershell.exe -Command \"\$logs=\$(docker image list); \$logs | Out-File -FilePath '${WINDOWS_LOGS_DIR}\\docker_images.log'\""
# Retry up to 3 times to allow ssh keys to be properly propagated and
# stored.
for retry in {1..3}; do
if gcloud compute ssh --project "${PROJECT}" --zone "${ZONE}" "${node}" \
--command "$powershell_cmd"; then
break
else
sleep 10
fi
done
}
# Saves log files from diagnostics tool.(https://github.com/GoogleCloudPlatform/compute-image-tools/tree/master/cli_tools/diagnostics)
function save-windows-logs-via-diagnostics-tool() {
local node="${1}"
local dest_dir="${2}"
gcloud compute instances add-metadata "${node}" --metadata enable-diagnostics=true --project="${PROJECT}" --zone="${ZONE}"
local logs_archive_in_gcs
logs_archive_in_gcs=$(gcloud alpha compute diagnose export-logs "${node}" "--zone=${ZONE}" "--project=${PROJECT}" | tail -n 1)
local temp_local_path="${node}.zip"
for retry in {1..20}; do
if gsutil mv "${logs_archive_in_gcs}" "${temp_local_path}" > /dev/null 2>&1; then
echo "Downloaded diagnostics log from ${logs_archive_in_gcs}"
break
else
sleep 10
fi
done
if [[ -f "${temp_local_path}" ]]; then
unzip "${temp_local_path}" -d "${dest_dir}" > /dev/null
rm -f "${temp_local_path}"
fi
}
# Saves log files from SSH
function save-windows-logs-via-ssh() {
local node="${1}"
local dest_dir="${2}"
export-windows-docker-event-log "${node}"
export-windows-docker-images-list "${node}"
local remote_files=()
for file in "${windows_node_logfiles[@]}"; do
remote_files+=( "${WINDOWS_LOGS_DIR}\\${file}" )
done
remote_files+=( "${windows_node_otherfiles[@]}" )
# TODO(pjh, yujuhong): handle rotated logs and copying multiple files at the
# same time.
for remote_file in "${remote_files[@]}"; do
# Retry up to 3 times to allow ssh keys to be properly propagated and
# stored.
for retry in {1..3}; do
if gcloud compute scp --recurse --project "${PROJECT}" \
--zone "${ZONE}" "${node}:${remote_file}" "${dest_dir}" \
> /dev/null; then
break
else
sleep 10
fi
done
done
}
# Save log files and serial console output from Windows node $1 into local
# directory $2.
# This function shouldn't ever trigger errexit.
function save-logs-windows() {
local -r node="${1}"
local -r dest_dir="${2}"
if [[ ! "${gcloud_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then
echo "Not saving logs for ${node}, Windows log dumping requires gcloud support"
return
fi
if [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then
save-windows-logs-via-diagnostics-tool "${node}" "${dest_dir}"
else
save-windows-logs-via-ssh "${node}" "${dest_dir}"
fi
# Serial port 1 contains the Windows console output.
gcloud compute instances get-serial-port-output --project "${PROJECT}" \
--zone "${ZONE}" --port 1 "${node}" > "${dest_dir}/serial-1.log" || true
}
# Execute a command in container $2 on node $1.
# Uses docker because the container may not ordinarily permit direct execution.
function run-in-docker-container() {
local node_name="$1"
local container="$2"
shift 2
log-dump-ssh "${node_name}" "docker exec \"\$(docker ps -f label=io.kubernetes.container.name=${container} --format \"{{.ID}}\")\" $*"
}
function dump_masters() {
local master_names=()
if [[ -n "${use_custom_instance_list}" ]]; then
while IFS='' read -r line; do master_names+=("$line"); done < <(log_dump_custom_get_instances master)
elif [[ ! "${master_ssh_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then
echo "Master SSH not supported for ${KUBERNETES_PROVIDER}"
return
elif [[ -n "${KUBEMARK_MASTER_NAME:-}" ]]; then
master_names=( "${KUBEMARK_MASTER_NAME}" )
else
if ! (detect-master); then
echo 'Master not detected. Is the cluster up?'
return
fi
master_names=( "${MASTER_NAME}" )
fi
if [[ "${#master_names[@]}" == 0 ]]; then
echo 'No masters found?'
return
fi
proc=${max_dump_processes}
for master_name in "${master_names[@]}"; do
master_dir="${report_dir}/${master_name}"
mkdir -p "${master_dir}"
save-logs "${master_name}" "${master_dir}" "${master_logfiles}" "" "true" &
# We don't want to run more than ${max_dump_processes} at a time, so
# wait once we hit that many nodes. This isn't ideal, since one might
# take much longer than the others, but it should help.
proc=$((proc - 1))
if [[ proc -eq 0 ]]; then
proc=${max_dump_processes}
wait
fi
done
# Wait for any remaining processes.
if [[ proc -gt 0 && proc -lt ${max_dump_processes} ]]; then
wait
fi
}
# Dumps logs from nodes in the cluster. Linux nodes to dump logs from can be
# specified via $1 or $use_custom_instance_list. If not specified then the nodes
# to dump logs for will be detected using detect-node-names(); if Windows nodes
# are present then they will be detected and their logs will be dumped too.
function dump_nodes() {
local node_names=()
local windows_node_names=()
if [[ -n "${1:-}" ]]; then
echo 'Dumping logs for nodes provided as args to dump_nodes() function'
node_names=( "$@" )
else
echo 'Detecting nodes in the cluster'
detect-node-names &> /dev/null
if [[ -n "${NODE_NAMES:-}" ]]; then
node_names=( "${NODE_NAMES[@]}" )
fi
if [[ -n "${WINDOWS_NODE_NAMES:-}" ]]; then
windows_node_names=( "${WINDOWS_NODE_NAMES[@]}" )
fi
fi
if [[ "${#node_names[@]}" == 0 && "${#windows_node_names[@]}" == 0 ]]; then
echo 'No nodes found!'
return
fi
node_logfiles_all="${node_logfiles}"
if [[ "${ENABLE_HOLLOW_NODE_LOGS:-}" == "true" ]]; then
node_logfiles_all="${node_logfiles_all} ${hollow_node_logfiles}"
fi
linux_nodes_selected_for_logs=()
if [[ -n "${LOGDUMP_ONLY_N_RANDOM_NODES:-}" ]]; then
# We randomly choose 'LOGDUMP_ONLY_N_RANDOM_NODES' many nodes for fetching logs.
for index in $(shuf -i 0-$(( ${#node_names[*]} - 1 )) -n "${LOGDUMP_ONLY_N_RANDOM_NODES}")
do
linux_nodes_selected_for_logs+=("${node_names[$index]}")
done
else
linux_nodes_selected_for_logs=( "${node_names[@]}" )
fi
all_selected_nodes=( "${linux_nodes_selected_for_logs[@]}" )
all_selected_nodes+=( "${windows_node_names[@]}" )
proc=${max_dump_processes}
start="$(date +%s)"
# log_dump_ssh_timeout is the maximal number of seconds the log dumping over
# SSH operation can take. Please note that the logic enforcing the timeout
# is only a best effort. The actual time of the operation may be longer
# due to waiting for all the child processes below.
log_dump_ssh_timeout_seconds="${LOG_DUMP_SSH_TIMEOUT_SECONDS:-}"
for i in "${!all_selected_nodes[@]}"; do
node_name="${all_selected_nodes[$i]}"
node_dir="${report_dir}/${node_name}"
mkdir -p "${node_dir}"
if [[ "${i}" -lt "${#linux_nodes_selected_for_logs[@]}" ]]; then
# Save logs in the background. This speeds up things when there are
# many nodes.
save-logs "${node_name}" "${node_dir}" "${node_logfiles_all}" "${node_systemd_services}" &
else
save-logs-windows "${node_name}" "${node_dir}" &
fi
# We don't want to run more than ${max_dump_processes} at a time, so
# wait once we hit that many nodes. This isn't ideal, since one might
# take much longer than the others, but it should help.
proc=$((proc - 1))
if [[ proc -eq 0 ]]; then
proc=${max_dump_processes}
wait
now="$(date +%s)"
if [[ -n "${log_dump_ssh_timeout_seconds}" && $((now - start)) -gt ${log_dump_ssh_timeout_seconds} ]]; then
echo "WARNING: Hit timeout after ${log_dump_ssh_timeout_seconds} seconds, finishing log dumping over SSH shortly"
break
fi
fi
done
# Wait for any remaining processes.
if [[ proc -gt 0 && proc -lt ${max_dump_processes} ]]; then
wait
fi
}
# Collect names of nodes which didn't run logexporter successfully.
# This function examines NODE_NAMES but not WINDOWS_NODE_NAMES since logexporter
# does not run on Windows nodes.
#
# Note: This step is O(#nodes^2) as we check if each node is present in the list of succeeded nodes.
# Making it linear would add code complexity without much benefit (as it just takes ~1s for 5k nodes).
# Assumes:
# NODE_NAMES
# Sets:
# NON_LOGEXPORTED_NODES
function find_non_logexported_nodes() {
local file="${gcs_artifacts_dir}/logexported-nodes-registry"
echo "Listing marker files ($file) for successful nodes..."
succeeded_nodes=$(gsutil ls "${file}") || return 1
echo 'Successfully listed marker files for successful nodes'
NON_LOGEXPORTED_NODES=()
for node in "${NODE_NAMES[@]}"; do
if [[ ! "${succeeded_nodes}" =~ ${node} ]]; then
NON_LOGEXPORTED_NODES+=("${node}")
fi
done
}
# This function examines NODE_NAMES but not WINDOWS_NODE_NAMES since logexporter
# does not run on Windows nodes.
function dump_nodes_with_logexporter() {
detect-node-names &> /dev/null
if [[ -z "${NODE_NAMES:-}" ]]; then
echo 'No nodes found!'
return
fi
# Obtain parameters required by logexporter.
local -r service_account_credentials="$(base64 "${GOOGLE_APPLICATION_CREDENTIALS}" | tr -d '\n')"
local -r cloud_provider="${KUBERNETES_PROVIDER}"
local -r enable_hollow_node_logs="${ENABLE_HOLLOW_NODE_LOGS:-false}"
local -r logexport_sleep_seconds="$(( 90 + NUM_NODES / 3 ))"
if [[ -z "${ZONE_NODE_SELECTOR_DISABLED:-}" ]]; then
local -r node_selector="${ZONE_NODE_SELECTOR_LABEL:-topology.kubernetes.io/zone}: ${ZONE}"
fi
# Fill in the parameters in the logexporter daemonset template.
local -r tmp="${KUBE_TEMP}/logexporter"
local -r manifest_yaml="${tmp}/logexporter-daemonset.yaml"
mkdir -p "${tmp}"
local -r cwd=$(dirname "${BASH_SOURCE[0]}")
cp "${cwd}/logexporter-daemonset.yaml" "${manifest_yaml}"
sed -i'' -e "s@{{.NodeSelector}}@${node_selector:-}@g" "${manifest_yaml}"
sed -i'' -e "s@{{.LogexporterNamespace}}@${logexporter_namespace}@g" "${manifest_yaml}"
sed -i'' -e "s@{{.ServiceAccountCredentials}}@${service_account_credentials}@g" "${manifest_yaml}"
sed -i'' -e "s@{{.CloudProvider}}@${cloud_provider}@g" "${manifest_yaml}"
sed -i'' -e "s@{{.GCSPath}}@${gcs_artifacts_dir}@g" "${manifest_yaml}"
sed -i'' -e "s@{{.EnableHollowNodeLogs}}@${enable_hollow_node_logs}@g" "${manifest_yaml}"
sed -i'' -e "s@{{.DumpSystemdJournal}}@${dump_systemd_journal}@g" "${manifest_yaml}"
sed -i'' -e "s@{{.ExtraLogFiles}}@${extra_log_files}@g" "${manifest_yaml}"
sed -i'' -e "s@{{.ExtraSystemdServices}}@${extra_systemd_services}@g" "${manifest_yaml}"
# Create the logexporter namespace, service-account secret and the logexporter daemonset within that namespace.
if ! kubectl create -f "${manifest_yaml}"; then
echo 'Failed to create logexporter daemonset.. falling back to logdump through SSH'
kubectl delete namespace "${logexporter_namespace}" || true
dump_nodes "${NODE_NAMES[@]}"
logexporter_failed=1
return
fi
# Periodically fetch list of already logexported nodes to verify
# if we aren't already done.
start="$(date +%s)"
while true; do
now="$(date +%s)"
if [[ $((now - start)) -gt ${logexport_sleep_seconds} ]]; then
echo 'Waiting for all nodes to be logexported timed out.'
break
fi
if find_non_logexported_nodes; then
if [[ -z "${NON_LOGEXPORTED_NODES:-}" ]]; then
break
fi
fi
sleep 15
done
# Store logs from logexporter pods to allow debugging log exporting process
# itself.
proc=${max_dump_processes}
kubectl get pods -n "${logexporter_namespace}" -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.nodeName}{"\n"}{end}' | (while read -r pod node; do
echo "Fetching logs from ${pod} running on ${node}"
mkdir -p "${report_dir}/${node}"
kubectl logs -n "${logexporter_namespace}" "${pod}" > "${report_dir}/${node}/${pod}.log" &
# We don't want to run more than ${max_dump_processes} at a time, so
# wait once we hit that many nodes. This isn't ideal, since one might
# take much longer than the others, but it should help.
proc=$((proc - 1))
if [[ proc -eq 0 ]]; then
proc=${max_dump_processes}
wait
fi
# Wait for any remaining processes.
done; wait)
# List registry of marker files (of nodes whose logexporter succeeded) from GCS.
for retry in {1..10}; do
if find_non_logexported_nodes; then
break
else
echo "Attempt ${retry} failed to list marker files for successful nodes"
if [[ "${retry}" == 10 ]]; then
echo 'Final attempt to list marker files failed.. falling back to logdump through SSH'
kubectl delete namespace "${logexporter_namespace}" || true
dump_nodes "${NODE_NAMES[@]}"
logexporter_failed=1
return
fi
sleep 2
fi
done
failed_nodes=()
# The following if is needed, because defaulting for empty arrays
# seems to treat them as non-empty with single empty string.
if [[ -n "${NON_LOGEXPORTED_NODES:-}" ]]; then
for node in "${NON_LOGEXPORTED_NODES[@]:-}"; do
echo "Logexporter didn't succeed on node ${node}. Queuing it for logdump through SSH."
failed_nodes+=("${node}")
done
fi
# If less than a certain ratio of the nodes got logexported, report an error.
if [[ $(((${#NODE_NAMES[@]} - ${#failed_nodes[@]}) * 100)) -lt $((${#NODE_NAMES[@]} * log_dump_expected_success_percentage )) ]]; then
logexporter_failed=1
fi
# Delete the logexporter resources and dump logs for the failed nodes (if any) through SSH.
kubectl get pods --namespace "${logexporter_namespace}" || true
kubectl delete namespace "${logexporter_namespace}" || true
if [[ "${#failed_nodes[@]}" != 0 ]]; then
echo -e "Dumping logs through SSH for the following nodes:\n${failed_nodes[*]}"
dump_nodes "${failed_nodes[@]}"
fi
}
# Writes node information that's available through the gcloud and kubectl API
# surfaces to a nodes/ subdirectory of $report_dir.
function dump_node_info() {
nodes_dir="${report_dir}/nodes"
mkdir -p "${nodes_dir}"
detect-node-names
if [[ -n "${NODE_NAMES:-}" ]]; then
printf "%s\n" "${NODE_NAMES[@]}" > "${nodes_dir}/node_names.txt"
fi
if [[ -n "${WINDOWS_NODE_NAMES:-}" ]]; then
printf "%s\n" "${WINDOWS_NODE_NAMES[@]}" > "${nodes_dir}/windows_node_names.txt"
fi
kubectl get nodes -o yaml > "${nodes_dir}/kubectl_get_nodes.yaml" || true
}
function detect_node_failures() {
if ! [[ "${gcloud_supported_providers}" =~ ${KUBERNETES_PROVIDER} ]]; then
return
fi
detect-node-names
if [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then
local all_instance_groups=("${INSTANCE_GROUPS[@]}" "${WINDOWS_INSTANCE_GROUPS[@]}")
else
local all_instance_groups=("${INSTANCE_GROUPS[@]}")
fi
if [ -z "${all_instance_groups:-}" ]; then
return
fi
for group in "${all_instance_groups[@]}"; do
local creation_timestamp
creation_timestamp=$(gcloud compute instance-groups managed describe \
"${group}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--format='value(creationTimestamp)')
echo "Failures for ${group} (if any):"
gcloud logging read --order=asc \
--format='table(timestamp,jsonPayload.resource.name,jsonPayload.event_subtype)' \
--project "${PROJECT}" \
"resource.type=\"gce_instance\"
logName=\"projects/${PROJECT}/logs/compute.googleapis.com%2Factivity_log\"
(jsonPayload.event_subtype=\"compute.instances.hostError\" OR jsonPayload.event_subtype=\"compute.instances.automaticRestart\")
jsonPayload.resource.name:\"${group}\"
timestamp >= \"${creation_timestamp}\""
done
}
function dump_logs() {
# Copy master logs to artifacts dir locally (through SSH).
echo "Dumping logs from master locally to '${report_dir}'"
dump_masters
if [[ "${DUMP_ONLY_MASTER_LOGS:-}" == "true" ]]; then
echo 'Skipping dumping of node logs'
return
fi
# Copy logs from nodes to GCS directly or to artifacts dir locally (through SSH).
if [[ -n "${gcs_artifacts_dir}" ]]; then
echo "Dumping logs from nodes to GCS directly at '${gcs_artifacts_dir}' using logexporter"
dump_nodes_with_logexporter
else
echo "Dumping logs from nodes locally to '${report_dir}'"
dump_nodes
fi
}
# Without ${DUMP_TO_GCS_ONLY} == true:
# * only logs exported by logexporter will be uploaded to
# ${gcs_artifacts_dir}
# * other logs (master logs, nodes where logexporter failed) will be
# fetched locally to ${report_dir}.
# If $DUMP_TO_GCS_ONLY == 'true', all logs will be uploaded directly to
# ${gcs_artifacts_dir}.
function main() {
setup
kube::util::ensure-temp-dir
if [[ "${DUMP_TO_GCS_ONLY:-}" == "true" ]] && [[ -n "${gcs_artifacts_dir}" ]]; then
report_dir="${KUBE_TEMP}/logs"
mkdir -p "${report_dir}"
echo "${gcs_artifacts_dir}" > "${local_report_dir}/master-and-node-logs.link.txt"
echo "Dumping logs temporarily to '${report_dir}'. Will upload to '${gcs_artifacts_dir}' later."
else
report_dir="${local_report_dir}"
fi
dump_logs
dump_node_info
if [[ "${DUMP_TO_GCS_ONLY:-}" == "true" ]] && [[ -n "${gcs_artifacts_dir}" ]]; then
if [[ "$(ls -A ${report_dir})" ]]; then
echo "Uploading '${report_dir}' to '${gcs_artifacts_dir}'"
if gsutil ls "${gcs_artifacts_dir}" > /dev/null; then
# If "${gcs_artifacts_dir}" exists, the simple call:
# `gsutil cp -r /tmp/dir/logs ${gcs_artifacts_dir}` will
# create subdirectory 'logs' in ${gcs_artifacts_dir}
#
# If "${gcs_artifacts_dir}" exists, we want to merge its content
# with local logs. To do that we do the following trick:
# * Let's say that ${gcs_artifacts_dir} == 'gs://a/b/c'.
# * We rename 'logs' to 'c'
# * Call `gsutil cp -r /tmp/dir/c gs://a/b/`
#
# Similar pattern is used in bootstrap.py#L409-L416.
# It is a known issue that gsutil cp behavior is that complex.
# For more information on this, see:
# https://cloud.google.com/storage/docs/gsutil/commands/cp#how-names-are-constructed
remote_dir=$(dirname ${gcs_artifacts_dir})
remote_basename=$(basename ${gcs_artifacts_dir})
mv ${report_dir} "${KUBE_TEMP}/${remote_basename}"
gsutil -m cp -r -c -z log,txt,xml "${KUBE_TEMP}/${remote_basename}" "${remote_dir}"
rm -rf "${KUBE_TEMP}/${remote_basename}"
else # ${gcs_artifacts_dir} doesn't exist.
gsutil -m cp -r -c -z log,txt,xml "${report_dir}" "${gcs_artifacts_dir}"
rm -rf "${report_dir}"
fi
else
echo "Skipping upload of '${report_dir}' as it's empty."
fi
fi
detect_node_failures
if [[ ${logexporter_failed} -ne 0 && ${log_dump_expected_success_percentage} -gt 0 ]]; then
return 1
fi
}
main
|
michelle192837/test-infra
|
logexporter/cluster/log-dump.sh
|
Shell
|
apache-2.0
| 37,852 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is for configuring kubernetes master and node instances. It is
# uploaded in the manifests tar ball.
# TODO: this script duplicates templating logic from cluster/saltbase/salt
# using sed. It should use an actual template parser on the manifest
# files.
set -o errexit
set -o nounset
set -o pipefail
function setup-os-params {
# Reset core_pattern. On GCI, the default core_pattern pipes the core dumps to
# /sbin/crash_reporter which is more restrictive in saving crash dumps. So for
# now, set a generic core_pattern that users can work with.
echo "core.%e.%p.%t" > /proc/sys/kernel/core_pattern
}
# Vars assumed:
# NUM_NODES
function get-calico-node-cpu {
local suggested_calico_cpus=100m
if [[ "${NUM_NODES}" -gt "10" ]]; then
suggested_calico_cpus=250m
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
suggested_calico_cpus=500m
fi
if [[ "${NUM_NODES}" -gt "500" ]]; then
suggested_calico_cpus=1000m
fi
echo "${suggested_calico_cpus}"
}
# Vars assumed:
# NUM_NODES
function get-calico-typha-replicas {
local typha_count=1
if [[ "${NUM_NODES}" -gt "10" ]]; then
typha_count=2
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
typha_count=3
fi
if [[ "${NUM_NODES}" -gt "250" ]]; then
typha_count=4
fi
if [[ "${NUM_NODES}" -gt "500" ]]; then
typha_count=5
fi
echo "${typha_count}"
}
# Vars assumed:
# NUM_NODES
function get-calico-typha-cpu {
local typha_cpu=200m
if [[ "${NUM_NODES}" -gt "10" ]]; then
typha_cpu=500m
fi
if [[ "${NUM_NODES}" -gt "100" ]]; then
typha_cpu=1000m
fi
echo "${typha_cpu}"
}
function config-ip-firewall {
echo "Configuring IP firewall rules"
# The GCI image has host firewall which drop most inbound/forwarded packets.
# We need to add rules to accept all TCP/UDP/ICMP packets.
if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
iptables -A INPUT -w -p TCP -j ACCEPT
iptables -A INPUT -w -p UDP -j ACCEPT
iptables -A INPUT -w -p ICMP -j ACCEPT
fi
if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
iptables -A FORWARD -w -p TCP -j ACCEPT
iptables -A FORWARD -w -p UDP -j ACCEPT
iptables -A FORWARD -w -p ICMP -j ACCEPT
fi
iptables -N KUBE-METADATA-SERVER
iptables -I FORWARD -p tcp -d 169.254.169.254 --dport 80 -j KUBE-METADATA-SERVER
if [[ -n "${KUBE_FIREWALL_METADATA_SERVER:-}" ]]; then
iptables -A KUBE-METADATA-SERVER -j DROP
fi
}
function create-dirs {
echo "Creating required directories"
mkdir -p /var/lib/kubelet
mkdir -p /etc/kubernetes/manifests
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
mkdir -p /var/lib/kube-proxy
fi
}
# Formats the given device ($1) if needed and mounts it at given mount point
# ($2).
function safe-format-and-mount() {
device=$1
mountpoint=$2
# Format only if the disk is not already formatted.
if ! tune2fs -l "${device}" ; then
echo "Formatting '${device}'"
mkfs.ext4 -F "${device}"
fi
mkdir -p "${mountpoint}"
echo "Mounting '${device}' at '${mountpoint}'"
mount -o discard,defaults "${device}" "${mountpoint}"
}
# Local ssds, if present, are mounted at /mnt/disks/ssdN.
function ensure-local-ssds() {
for ssd in /dev/disk/by-id/google-local-ssd-*; do
if [ -e "${ssd}" ]; then
ssdnum=`echo ${ssd} | sed -e 's/\/dev\/disk\/by-id\/google-local-ssd-\([0-9]*\)/\1/'`
ssdmount="/mnt/disks/ssd${ssdnum}/"
mkdir -p ${ssdmount}
safe-format-and-mount "${ssd}" ${ssdmount}
echo "Mounted local SSD $ssd at ${ssdmount}"
chmod a+w ${ssdmount}
else
echo "No local SSD disks found."
fi
done
}
# Installs logrotate configuration files
function setup-logrotate() {
mkdir -p /etc/logrotate.d/
# Configure log rotation for all logs in /var/log, which is where k8s services
# are configured to write their log files. Whenever logrotate is ran, this
# config will:
# * rotate the log file if its size is > 100Mb OR if one day has elapsed
# * save rotated logs into a gzipped timestamped backup
# * log file timestamp (controlled by 'dateformat') includes seconds too. This
# ensures that logrotate can generate unique logfiles during each rotation
# (otherwise it skips rotation if 'maxsize' is reached multiple times in a
# day).
# * keep only 5 old (rotated) logs, and will discard older logs.
cat > /etc/logrotate.d/allvarlogs <<EOF
/var/log/*.log {
rotate ${LOGROTATE_FILES_MAX_COUNT:-5}
copytruncate
missingok
notifempty
compress
maxsize ${LOGROTATE_MAX_SIZE:-100M}
daily
dateext
dateformat -%Y%m%d-%s
create 0644 root root
}
EOF
}
# Finds the master PD device; returns it in MASTER_PD_DEVICE
function find-master-pd {
MASTER_PD_DEVICE=""
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
MASTER_PD_DEVICE="/dev/disk/by-id/${relative_path}"
}
# Mounts a persistent disk (formatting if needed) to store the persistent data
# on the master -- etcd's data, a few settings, and security certs/keys/tokens.
# safe-format-and-mount only formats an unformatted disk, and mkdir -p will
# leave a directory be if it already exists.
function mount-master-pd {
find-master-pd
if [[ -z "${MASTER_PD_DEVICE:-}" ]]; then
return
fi
echo "Mounting master-pd"
local -r pd_path="/dev/disk/by-id/google-master-pd"
local -r mount_point="/mnt/disks/master-pd"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
mkdir -p "${mount_point}"
safe-format-and-mount "${pd_path}" "${mount_point}"
echo "Mounted master-pd '${pd_path}' at '${mount_point}'"
# NOTE: These locations on the PD store persistent data, so to maintain
# upgradeability, these locations should not change. If they do, take care
# to maintain a migration path from these locations to whatever new
# locations.
# Contains all the data stored in etcd.
mkdir -m 700 -p "${mount_point}/var/etcd"
ln -s -f "${mount_point}/var/etcd" /var/etcd
mkdir -p /etc/srv
# Contains the dynamically generated apiserver auth certs and keys.
mkdir -p "${mount_point}/srv/kubernetes"
ln -s -f "${mount_point}/srv/kubernetes" /etc/srv/kubernetes
# Directory for kube-apiserver to store SSH key (if necessary).
mkdir -p "${mount_point}/srv/sshproxy"
ln -s -f "${mount_point}/srv/sshproxy" /etc/srv/sshproxy
if ! id etcd &>/dev/null; then
useradd -s /sbin/nologin -d /var/etcd etcd
fi
chown -R etcd "${mount_point}/var/etcd"
chgrp -R etcd "${mount_point}/var/etcd"
}
# append_or_replace_prefixed_line ensures:
# 1. the specified file exists
# 2. existing lines with the specified ${prefix} are removed
# 3. a new line with the specified ${prefix}${suffix} is appended
function append_or_replace_prefixed_line {
local -r file="${1:-}"
local -r prefix="${2:-}"
local -r suffix="${3:-}"
local -r dirname="$(dirname ${file})"
local -r tmpfile="$(mktemp -t filtered.XXXX --tmpdir=${dirname})"
touch "${file}"
awk "substr(\$0,0,length(\"${prefix}\")) != \"${prefix}\" { print }" "${file}" > "${tmpfile}"
echo "${prefix}${suffix}" >> "${tmpfile}"
mv "${tmpfile}" "${file}"
}
function create-node-pki {
echo "Creating node pki files"
local -r pki_dir="/etc/srv/kubernetes/pki"
mkdir -p "${pki_dir}"
if [[ -z "${CA_CERT_BUNDLE:-}" ]]; then
CA_CERT_BUNDLE="${CA_CERT}"
fi
CA_CERT_BUNDLE_PATH="${pki_dir}/ca-certificates.crt"
echo "${CA_CERT_BUNDLE}" | base64 --decode > "${CA_CERT_BUNDLE_PATH}"
if [[ ! -z "${KUBELET_CERT:-}" && ! -z "${KUBELET_KEY:-}" ]]; then
KUBELET_CERT_PATH="${pki_dir}/kubelet.crt"
echo "${KUBELET_CERT}" | base64 --decode > "${KUBELET_CERT_PATH}"
KUBELET_KEY_PATH="${pki_dir}/kubelet.key"
echo "${KUBELET_KEY}" | base64 --decode > "${KUBELET_KEY_PATH}"
fi
# TODO(mikedanese): remove this when we don't support downgrading to versions
# < 1.6.
ln -sf "${CA_CERT_BUNDLE_PATH}" /etc/srv/kubernetes/ca.crt
}
function create-master-pki {
echo "Creating master pki files"
local -r pki_dir="/etc/srv/kubernetes/pki"
mkdir -p "${pki_dir}"
CA_CERT_PATH="${pki_dir}/ca.crt"
echo "${CA_CERT}" | base64 --decode > "${CA_CERT_PATH}"
# this is not true on GKE
if [[ ! -z "${CA_KEY:-}" ]]; then
CA_KEY_PATH="${pki_dir}/ca.key"
echo "${CA_KEY}" | base64 --decode > "${CA_KEY_PATH}"
fi
if [[ -z "${APISERVER_SERVER_CERT:-}" || -z "${APISERVER_SERVER_KEY:-}" ]]; then
APISERVER_SERVER_CERT="${MASTER_CERT}"
APISERVER_SERVER_KEY="${MASTER_KEY}"
fi
APISERVER_SERVER_CERT_PATH="${pki_dir}/apiserver.crt"
echo "${APISERVER_SERVER_CERT}" | base64 --decode > "${APISERVER_SERVER_CERT_PATH}"
APISERVER_SERVER_KEY_PATH="${pki_dir}/apiserver.key"
echo "${APISERVER_SERVER_KEY}" | base64 --decode > "${APISERVER_SERVER_KEY_PATH}"
if [[ -z "${APISERVER_CLIENT_CERT:-}" || -z "${APISERVER_CLIENT_KEY:-}" ]]; then
APISERVER_CLIENT_CERT="${KUBEAPISERVER_CERT}"
APISERVER_CLIENT_KEY="${KUBEAPISERVER_KEY}"
fi
APISERVER_CLIENT_CERT_PATH="${pki_dir}/apiserver-client.crt"
echo "${APISERVER_CLIENT_CERT}" | base64 --decode > "${APISERVER_CLIENT_CERT_PATH}"
APISERVER_CLIENT_KEY_PATH="${pki_dir}/apiserver-client.key"
echo "${APISERVER_CLIENT_KEY}" | base64 --decode > "${APISERVER_CLIENT_KEY_PATH}"
if [[ -z "${SERVICEACCOUNT_CERT:-}" || -z "${SERVICEACCOUNT_KEY:-}" ]]; then
SERVICEACCOUNT_CERT="${MASTER_CERT}"
SERVICEACCOUNT_KEY="${MASTER_KEY}"
fi
SERVICEACCOUNT_CERT_PATH="${pki_dir}/serviceaccount.crt"
echo "${SERVICEACCOUNT_CERT}" | base64 --decode > "${SERVICEACCOUNT_CERT_PATH}"
SERVICEACCOUNT_KEY_PATH="${pki_dir}/serviceaccount.key"
echo "${SERVICEACCOUNT_KEY}" | base64 --decode > "${SERVICEACCOUNT_KEY_PATH}"
# TODO(mikedanese): remove this when we don't support downgrading to versions
# < 1.6.
ln -sf "${APISERVER_SERVER_KEY_PATH}" /etc/srv/kubernetes/server.key
ln -sf "${APISERVER_SERVER_CERT_PATH}" /etc/srv/kubernetes/server.cert
if [[ ! -z "${REQUESTHEADER_CA_CERT:-}" ]]; then
AGGREGATOR_CA_KEY_PATH="${pki_dir}/aggr_ca.key"
echo "${AGGREGATOR_CA_KEY}" | base64 --decode > "${AGGREGATOR_CA_KEY_PATH}"
REQUESTHEADER_CA_CERT_PATH="${pki_dir}/aggr_ca.crt"
echo "${REQUESTHEADER_CA_CERT}" | base64 --decode > "${REQUESTHEADER_CA_CERT_PATH}"
PROXY_CLIENT_KEY_PATH="${pki_dir}/proxy_client.key"
echo "${PROXY_CLIENT_KEY}" | base64 --decode > "${PROXY_CLIENT_KEY_PATH}"
PROXY_CLIENT_CERT_PATH="${pki_dir}/proxy_client.crt"
echo "${PROXY_CLIENT_CERT}" | base64 --decode > "${PROXY_CLIENT_CERT_PATH}"
fi
}
# After the first boot and on upgrade, these files exist on the master-pd
# and should never be touched again (except perhaps an additional service
# account, see NB below.) One exception is if METADATA_CLOBBERS_CONFIG is
# enabled. In that case the basic_auth.csv file will be rewritten to make
# sure it matches the metadata source of truth.
function create-master-auth {
echo "Creating master auth files"
local -r auth_dir="/etc/srv/kubernetes"
local -r basic_auth_csv="${auth_dir}/basic_auth.csv"
if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
if [[ -e "${basic_auth_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]]; then
# If METADATA_CLOBBERS_CONFIG is true, we want to rewrite the file
# completely, because if we're changing KUBE_USER and KUBE_PASSWORD, we
# have nothing to match on. The file is replaced just below with
# append_or_replace_prefixed_line.
rm "${basic_auth_csv}"
fi
append_or_replace_prefixed_line "${basic_auth_csv}" "${KUBE_PASSWORD},${KUBE_USER}," "admin,system:masters"
fi
local -r known_tokens_csv="${auth_dir}/known_tokens.csv"
if [[ -e "${known_tokens_csv}" && "${METADATA_CLOBBERS_CONFIG:-false}" == "true" ]]; then
rm "${known_tokens_csv}"
fi
if [[ -n "${KUBE_BEARER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_BEARER_TOKEN}," "admin,admin,system:masters"
fi
if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager"
fi
if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler"
fi
if [[ -n "${KUBE_PROXY_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN}," "system:kube-proxy,uid:kube_proxy"
fi
if [[ -n "${NODE_PROBLEM_DETECTOR_TOKEN:-}" ]]; then
append_or_replace_prefixed_line "${known_tokens_csv}" "${NODE_PROBLEM_DETECTOR_TOKEN}," "system:node-problem-detector,uid:node-problem-detector"
fi
local use_cloud_config="false"
cat <<EOF >/etc/gce.conf
[global]
EOF
if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then
cat <<EOF >>/etc/gce.conf
api-endpoint = ${GCE_API_ENDPOINT}
EOF
fi
if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
project-id = ${PROJECT_ID}
network-name = ${NODE_NETWORK}
EOF
if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then
cat <<EOF >>/etc/gce.conf
network-project-id = ${NETWORK_PROJECT_ID}
EOF
fi
if [[ -n "${NODE_SUBNETWORK:-}" ]]; then
cat <<EOF >>/etc/gce.conf
subnetwork-name = ${NODE_SUBNETWORK}
EOF
fi
fi
if [[ -n "${NODE_INSTANCE_PREFIX:-}" ]]; then
use_cloud_config="true"
if [[ -n "${NODE_TAGS:-}" ]]; then
local -r node_tags="${NODE_TAGS}"
else
local -r node_tags="${NODE_INSTANCE_PREFIX}"
fi
cat <<EOF >>/etc/gce.conf
node-tags = ${node_tags}
node-instance-prefix = ${NODE_INSTANCE_PREFIX}
EOF
fi
if [[ -n "${MULTIZONE:-}" ]]; then
use_cloud_config="true"
cat <<EOF >>/etc/gce.conf
multizone = ${MULTIZONE}
EOF
fi
if [[ "${use_cloud_config}" != "true" ]]; then
rm -f /etc/gce.conf
fi
if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
cat <<EOF >/etc/gcp_authn.config
clusters:
- name: gcp-authentication-server
cluster:
server: ${GCP_AUTHN_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-authentication-server
user: kube-apiserver
name: webhook
EOF
fi
if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
cat <<EOF >/etc/gcp_authz.config
clusters:
- name: gcp-authorization-server
cluster:
server: ${GCP_AUTHZ_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-authorization-server
user: kube-apiserver
name: webhook
EOF
fi
if [[ -n "${GCP_IMAGE_VERIFICATION_URL:-}" ]]; then
# This is the config file for the image review webhook.
cat <<EOF >/etc/gcp_image_review.config
clusters:
- name: gcp-image-review-server
cluster:
server: ${GCP_IMAGE_VERIFICATION_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-image-review-server
user: kube-apiserver
name: webhook
EOF
# This is the config for the image review admission controller.
cat <<EOF >/etc/admission_controller.config
imagePolicy:
kubeConfigFile: /etc/gcp_image_review.config
allowTTL: 30
denyTTL: 30
retryBackoff: 500
defaultAllow: true
EOF
fi
}
# Write the config for the audit policy.
function create-master-audit-policy {
local -r path="${1}"
# Known api groups
local -r known_apis='
- group: "" # core
- group: "admissionregistration.k8s.io"
- group: "apps"
- group: "authentication.k8s.io"
- group: "authorization.k8s.io"
- group: "autoscaling"
- group: "batch"
- group: "certificates.k8s.io"
- group: "extensions"
- group: "networking.k8s.io"
- group: "policy"
- group: "rbac.authorization.k8s.io"
- group: "settings.k8s.io"
- group: "storage.k8s.io"'
cat <<EOF >"${path}"
apiVersion: audit.k8s.io/v1alpha1
kind: Policy
rules:
# The following requests were manually identified as high-volume and low-risk,
# so drop them.
- level: None
users: ["system:kube-proxy"]
verbs: ["watch"]
resources:
- group: "" # core
resources: ["endpoints", "services"]
- level: None
# Ingress controller reads `configmaps/ingress-uid` through the unsecured port.
# TODO(#46983): Change this to the ingress controller service account.
users: ["system:unsecured"]
namespaces: ["kube-system"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["configmaps"]
- level: None
users: ["kubelet"] # legacy kubelet identity
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes"]
- level: None
userGroups: ["system:nodes"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["nodes"]
- level: None
users:
- system:kube-controller-manager
- system:kube-scheduler
- system:serviceaccount:kube-system:endpoint-controller
verbs: ["get", "update"]
namespaces: ["kube-system"]
resources:
- group: "" # core
resources: ["endpoints"]
- level: None
users: ["system:apiserver"]
verbs: ["get"]
resources:
- group: "" # core
resources: ["namespaces"]
# Don't log these read-only URLs.
- level: None
nonResourceURLs:
- /healthz*
- /version
- /swagger*
# Don't log events requests.
- level: None
resources:
- group: "" # core
resources: ["events"]
# Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
# so only log at the Metadata level.
- level: Metadata
resources:
- group: "" # core
resources: ["secrets", "configmaps"]
- group: authentication.k8s.io
resources: ["tokenreviews"]
# Get repsonses can be large; skip them.
- level: Request
verbs: ["get", "list", "watch"]
resources: ${known_apis}
# Default level for known APIs
- level: RequestResponse
resources: ${known_apis}
# Default level for all other requests.
- level: Metadata
EOF
}
# Writes the configuration file used by the webhook advanced auditing backend.
function create-master-audit-webhook-config {
local -r path="${1}"
if [[ -n "${GCP_AUDIT_URL:-}" ]]; then
# The webhook config file is a kubeconfig file describing the webhook endpoint.
cat <<EOF >"${path}"
clusters:
- name: gcp-audit-server
cluster:
server: ${GCP_AUDIT_URL}
users:
- name: kube-apiserver
user:
auth-provider:
name: gcp
current-context: webhook
contexts:
- context:
cluster: gcp-audit-server
user: kube-apiserver
name: webhook
EOF
fi
}
# Arg 1: the IP address of the API server
function create-kubelet-kubeconfig() {
local apiserver_address="${1}"
if [[ -z "${apiserver_address}" ]]; then
echo "Must provide API server address to create Kubelet kubeconfig file!"
exit 1
fi
echo "Creating kubelet kubeconfig file"
cat <<EOF >/var/lib/kubelet/bootstrap-kubeconfig
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate: ${KUBELET_CERT_PATH}
client-key: ${KUBELET_KEY_PATH}
clusters:
- name: local
cluster:
server: https://${apiserver_address}
certificate-authority: ${CA_CERT_BUNDLE_PATH}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
}
# Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and KUBELET_KEY
# to generate a kubeconfig file for the kubelet to securely connect to the apiserver.
# Set REGISTER_MASTER_KUBELET to true if kubelet on the master node
# should register to the apiserver.
function create-master-kubelet-auth {
# Only configure the kubelet on the master if the required variables are
# set in the environment.
if [[ -n "${KUBELET_APISERVER:-}" && -n "${KUBELET_CERT:-}" && -n "${KUBELET_KEY:-}" ]]; then
REGISTER_MASTER_KUBELET="true"
create-kubelet-kubeconfig ${KUBELET_APISERVER}
fi
}
function create-kubeproxy-kubeconfig {
echo "Creating kube-proxy kubeconfig file"
cat <<EOF >/var/lib/kube-proxy/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT_BUNDLE}
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
}
function create-kubecontrollermanager-kubeconfig {
echo "Creating kube-controller-manager kubeconfig file"
mkdir -p /etc/srv/kubernetes/kube-controller-manager
cat <<EOF >/etc/srv/kubernetes/kube-controller-manager/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-controller-manager
user:
token: ${KUBE_CONTROLLER_MANAGER_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: kube-controller-manager
name: service-account-context
current-context: service-account-context
EOF
}
function create-kubescheduler-kubeconfig {
echo "Creating kube-scheduler kubeconfig file"
mkdir -p /etc/srv/kubernetes/kube-scheduler
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-scheduler
user:
token: ${KUBE_SCHEDULER_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: kube-scheduler
name: kube-scheduler
current-context: kube-scheduler
EOF
}
function create-node-problem-detector-kubeconfig {
echo "Creating node-problem-detector kubeconfig file"
mkdir -p /var/lib/node-problem-detector
cat <<EOF >/var/lib/node-problem-detector/kubeconfig
apiVersion: v1
kind: Config
users:
- name: node-problem-detector
user:
token: ${NODE_PROBLEM_DETECTOR_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT}
contexts:
- context:
cluster: local
user: node-problem-detector
name: service-account-context
current-context: service-account-context
EOF
}
function create-master-etcd-auth {
if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
local -r auth_dir="/etc/srv/kubernetes"
echo "${ETCD_CA_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-ca.crt"
echo "${ETCD_PEER_KEY}" | base64 --decode > "${auth_dir}/etcd-peer.key"
echo "${ETCD_PEER_CERT}" | base64 --decode | gunzip > "${auth_dir}/etcd-peer.crt"
fi
}
function assemble-docker-flags {
echo "Assemble docker command line flags"
local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false"
if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
docker_opts+=" --log-level=debug"
else
docker_opts+=" --log-level=warn"
fi
local use_net_plugin="true"
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" || "${NETWORK_PROVIDER:-}" == "cni" ]]; then
# set docker0 cidr to private ip address range to avoid conflict with cbr0 cidr range
docker_opts+=" --bip=169.254.123.1/24"
else
use_net_plugin="false"
docker_opts+=" --bridge=cbr0"
fi
# Decide whether to enable a docker registry mirror. This is taken from
# the "kube-env" metadata value.
if [[ -n "${DOCKER_REGISTRY_MIRROR_URL:-}" ]]; then
echo "Enable docker registry mirror at: ${DOCKER_REGISTRY_MIRROR_URL}"
docker_opts+=" --registry-mirror=${DOCKER_REGISTRY_MIRROR_URL}"
fi
# Configure docker logging
docker_opts+=" --log-driver=${DOCKER_LOG_DRIVER:-json-file}"
docker_opts+=" --log-opt=max-size=${DOCKER_LOG_MAX_SIZE:-10m}"
docker_opts+=" --log-opt=max-file=${DOCKER_LOG_MAX_FILE:-5}"
echo "DOCKER_OPTS=\"${docker_opts} ${EXTRA_DOCKER_OPTS:-}\"" > /etc/default/docker
if [[ "${use_net_plugin}" == "true" ]]; then
# If using a network plugin, extend the docker configuration to always remove
# the network checkpoint to avoid corrupt checkpoints.
# (https://github.com/docker/docker/issues/18283).
echo "Extend the default docker.service configuration"
mkdir -p /etc/systemd/system/docker.service.d
cat <<EOF >/etc/systemd/system/docker.service.d/01network.conf
[Service]
ExecStartPre=/bin/sh -x -c "rm -rf /var/lib/docker/network"
EOF
systemctl daemon-reload
# If using a network plugin, we need to explicitly restart docker daemon, because
# kubelet will not do it.
echo "Docker command line is updated. Restart docker to pick it up"
systemctl restart docker
fi
}
# This function assembles the kubelet systemd service file and starts it
# using systemctl.
function start-kubelet {
echo "Start kubelet"
local -r kubelet_cert_dir="/var/lib/kubelet/pki/"
mkdir -p "${kubelet_cert_dir}"
local kubelet_bin="${KUBE_HOME}/bin/kubelet"
local -r version="$("${kubelet_bin}" --version=true | cut -f2 -d " ")"
local -r builtin_kubelet="/usr/bin/kubelet"
if [[ "${TEST_CLUSTER:-}" == "true" ]]; then
# Determine which binary to use on test clusters. We use the built-in
# version only if the downloaded version is the same as the built-in
# version. This allows GCI to run some of the e2e tests to qualify the
# built-in kubelet.
if [[ -x "${builtin_kubelet}" ]]; then
local -r builtin_version="$("${builtin_kubelet}" --version=true | cut -f2 -d " ")"
if [[ "${builtin_version}" == "${version}" ]]; then
kubelet_bin="${builtin_kubelet}"
fi
fi
fi
echo "Using kubelet binary at ${kubelet_bin}"
local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}"
flags+=" --allow-privileged=true"
flags+=" --cgroup-root=/"
flags+=" --cloud-provider=gce"
flags+=" --cluster-dns=${DNS_SERVER_IP}"
flags+=" --cluster-domain=${DNS_DOMAIN}"
flags+=" --pod-manifest-path=/etc/kubernetes/manifests"
flags+=" --experimental-mounter-path=${CONTAINERIZED_MOUNTER_HOME}/mounter"
flags+=" --experimental-check-node-capabilities-before-mount=true"
flags+=" --cert-dir=${kubelet_cert_dir}"
if [[ -n "${KUBELET_PORT:-}" ]]; then
flags+=" --port=${KUBELET_PORT}"
fi
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
flags+=" ${MASTER_KUBELET_TEST_ARGS:-}"
flags+=" --enable-debugging-handlers=false"
flags+=" --hairpin-mode=none"
if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then
#TODO(mikedanese): allow static pods to start before creating a client
#flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
#flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
flags+=" --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
flags+=" --register-schedulable=false"
else
# Standalone mode (not widely used?)
flags+=" --pod-cidr=${MASTER_IP_RANGE}"
fi
else # For nodes
flags+=" ${NODE_KUBELET_TEST_ARGS:-}"
flags+=" --enable-debugging-handlers=true"
flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig"
flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig"
if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \
[[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \
[[ "${HAIRPIN_MODE:-}" == "none" ]]; then
flags+=" --hairpin-mode=${HAIRPIN_MODE}"
fi
flags+=" --anonymous-auth=false --authorization-mode=Webhook --client-ca-file=${CA_CERT_BUNDLE_PATH}"
fi
# Network plugin
if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then
flags+=" --cni-bin-dir=/home/kubernetes/bin"
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
# Calico uses CNI always.
if [[ "${KUBERNETES_PRIVATE_MASTER:-}" == "true" ]]; then
flags+=" --network-plugin=${NETWORK_PROVIDER}"
else
flags+=" --network-plugin=cni"
fi
else
# Otherwise use the configured value.
flags+=" --network-plugin=${NETWORK_PROVIDER}"
fi
fi
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
fi
# FlexVolume plugin
if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then
flags+=" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}"
fi
if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]]; then
flags+=" --manifest-url=${MANIFEST_URL}"
flags+=" --manifest-url-header=${MANIFEST_URL_HEADER}"
fi
if [[ -n "${ENABLE_CUSTOM_METRICS:-}" ]]; then
flags+=" --enable-custom-metrics=${ENABLE_CUSTOM_METRICS}"
fi
if [[ -n "${NODE_LABELS:-}" ]]; then
flags+=" --node-labels=${NODE_LABELS}"
fi
if [[ -n "${NODE_TAINTS:-}" ]]; then
flags+=" --register-with-taints=${NODE_TAINTS}"
fi
if [[ -n "${EVICTION_HARD:-}" ]]; then
flags+=" --eviction-hard=${EVICTION_HARD}"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
flags+=" --feature-gates=${FEATURE_GATES}"
fi
local -r kubelet_env_file="/etc/default/kubelet"
echo "KUBELET_OPTS=\"${flags}\"" > "${kubelet_env_file}"
# Write the systemd service file for kubelet.
cat <<EOF >/etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes kubelet
Requires=network-online.target
After=network-online.target
[Service]
Restart=always
RestartSec=10
EnvironmentFile=${kubelet_env_file}
ExecStart=${kubelet_bin} \$KUBELET_OPTS
[Install]
WantedBy=multi-user.target
EOF
# Flush iptables nat table
iptables -t nat -F || true
systemctl start kubelet.service
}
# This function assembles the node problem detector systemd service file and
# starts it using systemctl.
function start-node-problem-detector {
echo "Start node problem detector"
local -r npd_bin="${KUBE_HOME}/bin/node-problem-detector"
local -r km_config="${KUBE_HOME}/node-problem-detector/config/kernel-monitor.json"
local -r dm_config="${KUBE_HOME}/node-problem-detector/config/docker-monitor.json"
echo "Using node problem detector binary at ${npd_bin}"
local flags="${NPD_TEST_LOG_LEVEL:-"--v=2"} ${NPD_TEST_ARGS:-}"
flags+=" --logtostderr"
flags+=" --system-log-monitors=${km_config},${dm_config}"
flags+=" --apiserver-override=https://${KUBERNETES_MASTER_NAME}?inClusterConfig=false&auth=/var/lib/node-problem-detector/kubeconfig"
local -r npd_port=${NODE_PROBLEM_DETECTOR_PORT:-20256}
flags+=" --port=${npd_port}"
# Write the systemd service file for node problem detector.
cat <<EOF >/etc/systemd/system/node-problem-detector.service
[Unit]
Description=Kubernetes node problem detector
Requires=network-online.target
After=network-online.target
[Service]
Restart=always
RestartSec=10
ExecStart=${npd_bin} ${flags}
[Install]
WantedBy=multi-user.target
EOF
systemctl start node-problem-detector.service
}
# Create the log file and set its properties.
#
# $1 is the file to create.
function prepare-log-file {
touch $1
chmod 644 $1
chown root:root $1
}
# Starts kube-proxy pod.
function start-kube-proxy {
echo "Start kube-proxy pod"
prepare-log-file /var/log/kube-proxy.log
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/kube-proxy.manifest"
remove-salt-config-comments "${src_file}"
local -r kubeconfig="--kubeconfig=/var/lib/kube-proxy/kubeconfig"
local kube_docker_registry="gcr.io/google_containers"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
kube_docker_registry=${KUBE_DOCKER_REGISTRY}
fi
local -r kube_proxy_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-proxy.docker_tag)
local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
local params="${KUBEPROXY_TEST_LOG_LEVEL:-"--v=2"}"
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s"
if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then
params+=" ${KUBEPROXY_TEST_ARGS}"
fi
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="env:\n - name: KUBE_CACHE_MUTATION_DETECTOR\n value: \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
fi
sed -i -e "s@{{kubeconfig}}@${kubeconfig}@g" ${src_file}
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${kube_docker_registry}@g" ${src_file}
sed -i -e "s@{{pillar\['kube-proxy_docker_tag'\]}}@${kube_proxy_docker_tag}@g" ${src_file}
sed -i -e "s@{{params}}@${params}@g" ${src_file}
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
sed -i -e "s@{{cluster_cidr}}@--cluster-cidr=${CLUSTER_IP_RANGE}@g" ${src_file}
fi
cp "${src_file}" /etc/kubernetes/manifests
}
# Replaces the variables in the etcd manifest file with the real values, and then
# copy the file to the manifest dir
# $1: value for variable 'suffix'
# $2: value for variable 'port'
# $3: value for variable 'server_port'
# $4: value for variable 'cpulimit'
# $5: pod name, which should be either etcd or etcd-events
function prepare-etcd-manifest {
local host_name=$(hostname)
local etcd_cluster=""
local cluster_state="new"
local etcd_protocol="http"
local etcd_creds=""
if [[ -n "${INITIAL_ETCD_CLUSTER_STATE:-}" ]]; then
cluster_state="${INITIAL_ETCD_CLUSTER_STATE}"
fi
if [[ -n "${ETCD_CA_KEY:-}" && -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
etcd_creds=" --peer-trusted-ca-file /etc/srv/kubernetes/etcd-ca.crt --peer-cert-file /etc/srv/kubernetes/etcd-peer.crt --peer-key-file /etc/srv/kubernetes/etcd-peer.key -peer-client-cert-auth "
etcd_protocol="https"
fi
for host in $(echo "${INITIAL_ETCD_CLUSTER:-${host_name}}" | tr "," "\n"); do
etcd_host="etcd-${host}=${etcd_protocol}://${host}:$3"
if [[ -n "${etcd_cluster}" ]]; then
etcd_cluster+=","
fi
etcd_cluster+="${etcd_host}"
done
local -r temp_file="/tmp/$5"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd.manifest" "${temp_file}"
remove-salt-config-comments "${temp_file}"
sed -i -e "s@{{ *suffix *}}@$1@g" "${temp_file}"
sed -i -e "s@{{ *port *}}@$2@g" "${temp_file}"
sed -i -e "s@{{ *server_port *}}@$3@g" "${temp_file}"
sed -i -e "s@{{ *cpulimit *}}@\"$4\"@g" "${temp_file}"
sed -i -e "s@{{ *hostname *}}@$host_name@g" "${temp_file}"
sed -i -e "s@{{ *srv_kube_path *}}@/etc/srv/kubernetes@g" "${temp_file}"
sed -i -e "s@{{ *etcd_cluster *}}@$etcd_cluster@g" "${temp_file}"
# Get default storage backend from manifest file.
local -r default_storage_backend=$(cat "${temp_file}" | \
grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \
sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
if [[ "${STORAGE_BACKEND:-${default_storage_backend}}" == "etcd3" ]]; then
sed -i -e "s@{{ *quota_bytes *}}@--quota-backend-bytes=4294967296@g" "${temp_file}"
else
sed -i -e "s@{{ *quota_bytes *}}@@g" "${temp_file}"
fi
sed -i -e "s@{{ *cluster_state *}}@$cluster_state@g" "${temp_file}"
if [[ -n "${ETCD_IMAGE:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@${ETCD_IMAGE}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_docker_tag', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
sed -i -e "s@{{ *etcd_protocol *}}@$etcd_protocol@g" "${temp_file}"
sed -i -e "s@{{ *etcd_creds *}}@$etcd_creds@g" "${temp_file}"
if [[ -n "${ETCD_VERSION:-}" ]]; then
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@${ETCD_VERSION}@g" "${temp_file}"
else
sed -i -e "s@{{ *pillar\.get('etcd_version', '\(.*\)') *}}@\1@g" "${temp_file}"
fi
# Replace the volume host path.
sed -i -e "s@/mnt/master-pd/var/etcd@/mnt/disks/master-pd/var/etcd@g" "${temp_file}"
mv "${temp_file}" /etc/kubernetes/manifests
}
function start-etcd-empty-dir-cleanup-pod {
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/etcd-empty-dir-cleanup/etcd-empty-dir-cleanup.yaml" "/etc/kubernetes/manifests"
}
# Starts etcd server pod (and etcd-events pod if needed).
# More specifically, it prepares dirs and files, sets the variable value
# in the manifests, and copies them to /etc/kubernetes/manifests.
function start-etcd-servers {
echo "Start etcd pods"
if [[ -d /etc/etcd ]]; then
rm -rf /etc/etcd
fi
if [[ -e /etc/default/etcd ]]; then
rm -f /etc/default/etcd
fi
if [[ -e /etc/systemd/system/etcd.service ]]; then
rm -f /etc/systemd/system/etcd.service
fi
if [[ -e /etc/init.d/etcd ]]; then
rm -f /etc/init.d/etcd
fi
prepare-log-file /var/log/etcd.log
prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest"
prepare-log-file /var/log/etcd-events.log
prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest"
}
# Calculates the following variables based on env variables, which will be used
# by the manifests of several kube-master components.
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function compute-master-manifest-variables {
CLOUD_CONFIG_OPT=""
CLOUD_CONFIG_VOLUME=""
CLOUD_CONFIG_MOUNT=""
if [[ -f /etc/gce.conf ]]; then
CLOUD_CONFIG_OPT="--cloud-config=/etc/gce.conf"
CLOUD_CONFIG_VOLUME="{\"name\": \"cloudconfigmount\",\"hostPath\": {\"path\": \"/etc/gce.conf\"}},"
CLOUD_CONFIG_MOUNT="{\"name\": \"cloudconfigmount\",\"mountPath\": \"/etc/gce.conf\", \"readOnly\": true},"
fi
DOCKER_REGISTRY="gcr.io/google_containers"
if [[ -n "${KUBE_DOCKER_REGISTRY:-}" ]]; then
DOCKER_REGISTRY="${KUBE_DOCKER_REGISTRY}"
fi
}
# A helper function that bind mounts kubelet dirs for running mount in a chroot
function prepare-mounter-rootfs {
echo "Prepare containerized mounter"
mount --bind "${CONTAINERIZED_MOUNTER_HOME}" "${CONTAINERIZED_MOUNTER_HOME}"
mount -o remount,exec "${CONTAINERIZED_MOUNTER_HOME}"
CONTAINERIZED_MOUNTER_ROOTFS="${CONTAINERIZED_MOUNTER_HOME}/rootfs"
mount --rbind /var/lib/kubelet/ "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
mount --make-rshared "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
mount --bind -o ro /proc "${CONTAINERIZED_MOUNTER_ROOTFS}/proc"
mount --bind -o ro /dev "${CONTAINERIZED_MOUNTER_ROOTFS}/dev"
mount --bind -o ro /etc/resolv.conf "${CONTAINERIZED_MOUNTER_ROOTFS}/etc/resolv.conf"
}
# A helper function for removing salt configuration and comments from a file.
# This is mainly for preparing a manifest file.
#
# $1: Full path of the file to manipulate
function remove-salt-config-comments {
# Remove salt configuration.
sed -i "/^[ |\t]*{[#|%]/d" $1
# Remove comments.
sed -i "/^[ |\t]*#/d" $1
}
# Starts kubernetes apiserver.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function start-kube-apiserver {
echo "Start kubernetes api-server"
prepare-log-file /var/log/kube-apiserver.log
prepare-log-file /var/log/kube-apiserver-audit.log
# Calculate variables and assemble the command line.
local params="${API_SERVER_TEST_LOG_LEVEL:-"--v=2"} ${APISERVER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
params+=" --address=127.0.0.1"
params+=" --allow-privileged=true"
params+=" --cloud-provider=gce"
params+=" --client-ca-file=${CA_CERT_BUNDLE_PATH}"
params+=" --etcd-servers=http://127.0.0.1:2379"
params+=" --etcd-servers-overrides=/events#http://127.0.0.1:4002"
params+=" --secure-port=443"
params+=" --tls-cert-file=${APISERVER_SERVER_CERT_PATH}"
params+=" --tls-private-key-file=${APISERVER_SERVER_KEY_PATH}"
if [[ -s "${REQUESTHEADER_CA_CERT_PATH:-}" ]]; then
params+=" --requestheader-client-ca-file=${REQUESTHEADER_CA_CERT_PATH}"
params+=" --requestheader-allowed-names=aggregator"
params+=" --requestheader-extra-headers-prefix=X-Remote-Extra-"
params+=" --requestheader-group-headers=X-Remote-Group"
params+=" --requestheader-username-headers=X-Remote-User"
params+=" --proxy-client-cert-file=${PROXY_CLIENT_CERT_PATH}"
params+=" --proxy-client-key-file=${PROXY_CLIENT_KEY_PATH}"
fi
params+=" --enable-aggregator-routing=true"
if [[ -e "${APISERVER_CLIENT_CERT_PATH}" ]] && [[ -e "${APISERVER_CLIENT_KEY_PATH}" ]]; then
params+=" --kubelet-client-certificate=${APISERVER_CLIENT_CERT_PATH}"
params+=" --kubelet-client-key=${APISERVER_CLIENT_KEY_PATH}"
fi
if [[ -n "${SERVICEACCOUNT_CERT_PATH:-}" ]]; then
params+=" --service-account-key-file=${SERVICEACCOUNT_CERT_PATH}"
fi
params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv"
if [[ -n "${KUBE_PASSWORD:-}" && -n "${KUBE_USER:-}" ]]; then
params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv"
fi
if [[ -n "${STORAGE_BACKEND:-}" ]]; then
params+=" --storage-backend=${STORAGE_BACKEND}"
fi
if [[ -n "${STORAGE_MEDIA_TYPE:-}" ]]; then
params+=" --storage-media-type=${STORAGE_MEDIA_TYPE}"
fi
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
fi
if [[ -n "${NUM_NODES:-}" ]]; then
# If the cluster is large, increase max-requests-inflight limit in apiserver.
if [[ "${NUM_NODES}" -ge 1000 ]]; then
params+=" --max-requests-inflight=1500 --max-mutating-requests-inflight=500"
fi
# Set amount of memory available for apiserver based on number of nodes.
# TODO: Once we start setting proper requests and limits for apiserver
# we should reuse the same logic here instead of current heuristic.
params+=" --target-ram-mb=$((${NUM_NODES} * 60))"
fi
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
if [[ -n "${ETCD_QUORUM_READ:-}" ]]; then
params+=" --etcd-quorum-read=${ETCD_QUORUM_READ}"
fi
local audit_policy_config_mount=""
local audit_policy_config_volume=""
local audit_webhook_config_mount=""
local audit_webhook_config_volume=""
if [[ "${ENABLE_APISERVER_BASIC_AUDIT:-}" == "true" ]]; then
# We currently only support enabling with a fixed path and with built-in log
# rotation "disabled" (large value) so it behaves like kube-apiserver.log.
# External log rotation should be set up the same as for kube-apiserver.log.
params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
params+=" --audit-log-maxage=0"
params+=" --audit-log-maxbackup=0"
# Lumberjack doesn't offer any way to disable size-based rotation. It also
# has an in-memory counter that doesn't notice if you truncate the file.
# 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
# never restarts. Please manually restart apiserver before this time.
params+=" --audit-log-maxsize=2000000000"
elif [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
local -r audit_policy_file="/etc/audit_policy.config"
params+=" --audit-policy-file=${audit_policy_file}"
# Create the audit policy file, and mount it into the apiserver pod.
create-master-audit-policy "${audit_policy_file}"
audit_policy_config_mount="{\"name\": \"auditpolicyconfigmount\",\"mountPath\": \"${audit_policy_file}\", \"readOnly\": true},"
audit_policy_config_volume="{\"name\": \"auditpolicyconfigmount\",\"hostPath\": {\"path\": \"${audit_policy_file}\"}},"
if [[ "${ADVANCED_AUDIT_BACKEND:-log}" == *"log"* ]]; then
# The advanced audit log backend config matches the basic audit log config.
params+=" --audit-log-path=/var/log/kube-apiserver-audit.log"
params+=" --audit-log-maxage=0"
params+=" --audit-log-maxbackup=0"
# Lumberjack doesn't offer any way to disable size-based rotation. It also
# has an in-memory counter that doesn't notice if you truncate the file.
# 2000000000 (in MiB) is a large number that fits in 31 bits. If the log
# grows at 10MiB/s (~30K QPS), it will rotate after ~6 years if apiserver
# never restarts. Please manually restart apiserver before this time.
params+=" --audit-log-maxsize=2000000000"
fi
if [[ "${ADVANCED_AUDIT_BACKEND:-}" == *"webhook"* ]]; then
params+=" --audit-webhook-mode=batch"
# Create the audit webhook config file, and mount it into the apiserver pod.
local -r audit_webhook_config_file="/etc/audit_webhook.config"
params+=" --audit-webhook-config-file=${audit_webhook_config_file}"
create-master-audit-webhook-config "${audit_webhook_config_file}"
audit_webhook_config_mount="{\"name\": \"auditwebhookconfigmount\",\"mountPath\": \"${audit_webhook_config_file}\", \"readOnly\": true},"
audit_webhook_config_volume="{\"name\": \"auditwebhookconfigmount\",\"hostPath\": {\"path\": \"${audit_webhook_config_file}\"}},"
fi
fi
if [[ "${ENABLE_APISERVER_LOGS_HANDLER:-}" == "false" ]]; then
params+=" --enable-logs-handler=false"
fi
local admission_controller_config_mount=""
local admission_controller_config_volume=""
local image_policy_webhook_config_mount=""
local image_policy_webhook_config_volume=""
if [[ -n "${ADMISSION_CONTROL:-}" ]]; then
params+=" --admission-control=${ADMISSION_CONTROL}"
if [[ ${ADMISSION_CONTROL} == *"ImagePolicyWebhook"* ]]; then
params+=" --admission-control-config-file=/etc/admission_controller.config"
# Mount the file to configure admission controllers if ImagePolicyWebhook is set.
admission_controller_config_mount="{\"name\": \"admissioncontrollerconfigmount\",\"mountPath\": \"/etc/admission_controller.config\", \"readOnly\": false},"
admission_controller_config_volume="{\"name\": \"admissioncontrollerconfigmount\",\"hostPath\": {\"path\": \"/etc/admission_controller.config\"}},"
# Mount the file to configure the ImagePolicyWebhook's webhook.
image_policy_webhook_config_mount="{\"name\": \"imagepolicywebhookconfigmount\",\"mountPath\": \"/etc/gcp_image_review.config\", \"readOnly\": false},"
image_policy_webhook_config_volume="{\"name\": \"imagepolicywebhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_image_review.config\"}},"
fi
fi
if [[ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]]; then
params+=" --min-request-timeout=${KUBE_APISERVER_REQUEST_TIMEOUT}"
fi
if [[ -n "${RUNTIME_CONFIG:-}" ]]; then
params+=" --runtime-config=${RUNTIME_CONFIG}"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
local -r vm_external_ip=$(curl --retry 5 --retry-delay 3 --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")
params+=" --advertise-address=${vm_external_ip}"
params+=" --ssh-user=${PROXY_SSH_USER}"
params+=" --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile"
elif [ -n "${MASTER_ADVERTISE_ADDRESS:-}" ]; then
params="${params} --advertise-address=${MASTER_ADVERTISE_ADDRESS}"
fi
local webhook_authn_config_mount=""
local webhook_authn_config_volume=""
if [[ -n "${GCP_AUTHN_URL:-}" ]]; then
params+=" --authentication-token-webhook-config-file=/etc/gcp_authn.config"
webhook_authn_config_mount="{\"name\": \"webhookauthnconfigmount\",\"mountPath\": \"/etc/gcp_authn.config\", \"readOnly\": false},"
webhook_authn_config_volume="{\"name\": \"webhookauthnconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authn.config\"}},"
fi
local authorization_mode="Node,RBAC"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
# Enable ABAC mode unless the user explicitly opts out with ENABLE_LEGACY_ABAC=false
if [[ "${ENABLE_LEGACY_ABAC:-}" != "false" ]]; then
echo "Warning: Enabling legacy ABAC policy. All service accounts will have superuser API access. Set ENABLE_LEGACY_ABAC=false to disable this."
# Create the ABAC file if it doesn't exist yet, or if we have a KUBE_USER set (to ensure the right user is given permissions)
if [[ -n "${KUBE_USER:-}" || ! -e /etc/srv/kubernetes/abac-authz-policy.jsonl ]]; then
local -r abac_policy_json="${src_dir}/abac-authz-policy.jsonl"
remove-salt-config-comments "${abac_policy_json}"
if [[ -n "${KUBE_USER:-}" ]]; then
sed -i -e "s/{{kube_user}}/${KUBE_USER}/g" "${abac_policy_json}"
else
sed -i -e "/{{kube_user}}/d" "${abac_policy_json}"
fi
cp "${abac_policy_json}" /etc/srv/kubernetes/
fi
params+=" --authorization-policy-file=/etc/srv/kubernetes/abac-authz-policy.jsonl"
authorization_mode+=",ABAC"
fi
local webhook_config_mount=""
local webhook_config_volume=""
if [[ -n "${GCP_AUTHZ_URL:-}" ]]; then
authorization_mode+=",Webhook"
params+=" --authorization-webhook-config-file=/etc/gcp_authz.config"
webhook_config_mount="{\"name\": \"webhookconfigmount\",\"mountPath\": \"/etc/gcp_authz.config\", \"readOnly\": false},"
webhook_config_volume="{\"name\": \"webhookconfigmount\",\"hostPath\": {\"path\": \"/etc/gcp_authz.config\"}},"
fi
params+=" --authorization-mode=${authorization_mode}"
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
fi
if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then
if [[ -n "${container_env}" ]]; then
container_env="${container_env}, "
fi
container_env="\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\""
fi
if [[ -n "${container_env}" ]]; then
container_env="\"env\":[{${container_env}}],"
fi
if [[ -n "${ENCRYPTION_PROVIDER_CONFIG:-}" ]]; then
local encryption_provider_config_path="/etc/srv/kubernetes/encryption-provider-config.yml"
echo "${ENCRYPTION_PROVIDER_CONFIG}" | base64 --decode > "${encryption_provider_config_path}"
params+=" --experimental-encryption-provider-config=${encryption_provider_config_path}"
fi
src_file="${src_dir}/kube-apiserver.manifest"
remove-salt-config-comments "${src_file}"
# Evaluate variables.
local -r kube_apiserver_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-apiserver.docker_tag)
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{srv_sshproxy_path}}@/etc/srv/sshproxy@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-apiserver_docker_tag'\]}}@${kube_apiserver_docker_tag}@g" "${src_file}"
sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
sed -i -e "s@{{secure_port}}@443@g" "${src_file}"
sed -i -e "s@{{secure_port}}@8080@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
sed -i -e "s@{{webhook_authn_config_mount}}@${webhook_authn_config_mount}@g" "${src_file}"
sed -i -e "s@{{webhook_authn_config_volume}}@${webhook_authn_config_volume}@g" "${src_file}"
sed -i -e "s@{{webhook_config_mount}}@${webhook_config_mount}@g" "${src_file}"
sed -i -e "s@{{webhook_config_volume}}@${webhook_config_volume}@g" "${src_file}"
sed -i -e "s@{{audit_policy_config_mount}}@${audit_policy_config_mount}@g" "${src_file}"
sed -i -e "s@{{audit_policy_config_volume}}@${audit_policy_config_volume}@g" "${src_file}"
sed -i -e "s@{{audit_webhook_config_mount}}@${audit_webhook_config_mount}@g" "${src_file}"
sed -i -e "s@{{audit_webhook_config_volume}}@${audit_webhook_config_volume}@g" "${src_file}"
sed -i -e "s@{{admission_controller_config_mount}}@${admission_controller_config_mount}@g" "${src_file}"
sed -i -e "s@{{admission_controller_config_volume}}@${admission_controller_config_volume}@g" "${src_file}"
sed -i -e "s@{{image_policy_webhook_config_mount}}@${image_policy_webhook_config_mount}@g" "${src_file}"
sed -i -e "s@{{image_policy_webhook_config_volume}}@${image_policy_webhook_config_volume}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts kubernetes controller manager.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
# DOCKER_REGISTRY
function start-kube-controller-manager {
echo "Start kubernetes controller-manager"
create-kubecontrollermanager-kubeconfig
prepare-log-file /var/log/kube-controller-manager.log
# Calculate variables and assemble the command line.
local params="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-"--v=2"} ${CONTROLLER_MANAGER_TEST_ARGS:-} ${CLOUD_CONFIG_OPT}"
params+=" --use-service-account-credentials"
params+=" --cloud-provider=gce"
params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig"
params+=" --root-ca-file=${CA_CERT_BUNDLE_PATH}"
params+=" --service-account-private-key-file=${SERVICEACCOUNT_KEY_PATH}"
if [[ -n "${ENABLE_GARBAGE_COLLECTOR:-}" ]]; then
params+=" --enable-garbage-collector=${ENABLE_GARBAGE_COLLECTOR}"
fi
if [[ -n "${INSTANCE_PREFIX:-}" ]]; then
params+=" --cluster-name=${INSTANCE_PREFIX}"
fi
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
params+=" --cluster-cidr=${CLUSTER_IP_RANGE}"
fi
if [[ -n "${CA_KEY:-}" ]]; then
params+=" --cluster-signing-cert-file=${CA_CERT_PATH}"
params+=" --cluster-signing-key-file=${CA_KEY_PATH}"
fi
if [[ -n "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
fi
if [[ "${NETWORK_PROVIDER:-}" == "kubenet" ]]; then
params+=" --allocate-node-cidrs=true"
elif [[ -n "${ALLOCATE_NODE_CIDRS:-}" ]]; then
params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
fi
if [[ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]]; then
params+=" --terminated-pod-gc-threshold=${TERMINATED_POD_GC_THRESHOLD}"
fi
if [[ "${ENABLE_IP_ALIASES:-}" == 'true' ]]; then
params+=" --cidr-allocator-type=CloudAllocator"
params+=" --configure-cloud-routes=false"
fi
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
local -r kube_rc_docker_tag=$(cat /home/kubernetes/kube-docker-files/kube-controller-manager.docker_tag)
local container_env=""
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],"
fi
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-controller-manager.manifest"
remove-salt-config-comments "${src_file}"
# Evaluate variables.
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-controller-manager_docker_tag'\]}}@${kube_rc_docker_tag}@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_mount}}@@g" "${src_file}"
sed -i -e "s@{{additional_cloud_config_volume}}@@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts kubernetes scheduler.
# It prepares the log file, loads the docker image, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
#
# Assumed vars (which are calculated in compute-master-manifest-variables)
# DOCKER_REGISTRY
function start-kube-scheduler {
echo "Start kubernetes scheduler"
create-kubescheduler-kubeconfig
prepare-log-file /var/log/kube-scheduler.log
# Calculate variables and set them in the manifest.
params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}"
params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig"
if [[ -n "${FEATURE_GATES:-}" ]]; then
params+=" --feature-gates=${FEATURE_GATES}"
fi
if [[ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]]; then
params+=" --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}"
fi
local -r kube_scheduler_docker_tag=$(cat "${KUBE_HOME}/kube-docker-files/kube-scheduler.docker_tag")
# Remove salt comments and replace variables with values.
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
remove-salt-config-comments "${src_file}"
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
}
# Starts cluster autoscaler.
# Assumed vars (which are calculated in function compute-master-manifest-variables)
# CLOUD_CONFIG_OPT
# CLOUD_CONFIG_VOLUME
# CLOUD_CONFIG_MOUNT
function start-cluster-autoscaler {
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
echo "Start kubernetes cluster autoscaler"
prepare-log-file /var/log/cluster-autoscaler.log
# Remove salt comments and replace variables with values
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
remove-salt-config-comments "${src_file}"
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
sed -i -e "s@{%.*%}@@g" "${src_file}"
cp "${src_file}" /etc/kubernetes/manifests
fi
}
# A helper function for copying addon manifests and set dir/files
# permissions.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
function setup-addon-manifests {
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/$2"
local -r dst_dir="/etc/kubernetes/$1/$2"
if [[ ! -d "${dst_dir}" ]]; then
mkdir -p "${dst_dir}"
fi
local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.yaml "${dst_dir}"
fi
files=$(find "${src_dir}" -maxdepth 1 -name "*.json")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.json "${dst_dir}"
fi
files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml.in")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.yaml.in "${dst_dir}"
fi
chown -R root:root "${dst_dir}"
chmod 755 "${dst_dir}"
chmod 644 "${dst_dir}"/*
}
# Fluentd manifest is modified using kubectl, which may not be available at
# this point. Run this as a background process.
function wait-for-apiserver-and-update-fluentd {
until kubectl get nodes
do
sleep 10
done
kubectl set resources --dry-run --local -f ${fluentd_gcp_yaml} \
--limits=memory=${FLUENTD_GCP_MEMORY_LIMIT} \
--requests=cpu=${FLUENTD_GCP_CPU_REQUEST},memory=${FLUENTD_GCP_MEMORY_REQUEST} \
--containers=fluentd-gcp -o yaml > ${fluentd_gcp_yaml}.tmp
mv ${fluentd_gcp_yaml}.tmp ${fluentd_gcp_yaml}
}
# Trigger background process that will ultimately update fluentd resource
# requirements.
function start-fluentd-resource-update {
wait-for-apiserver-and-update-fluentd &
}
# Updates parameters in yaml file for prometheus-to-sd configuration, or
# removes component if it is disabled.
function update-prometheus-to-sd-parameters {
if [[ "${ENABLE_PROMETHEUS_TO_SD:-}" == "true" ]]; then
sed -i -e "s@{{ *prometheus_to_sd_prefix *}}@${PROMETHEUS_TO_SD_PREFIX}@g" "$1"
sed -i -e "s@{{ *prometheus_to_sd_endpoint *}}@${PROMETHEUS_TO_SD_ENDPOINT}@g" "$1"
else
# Removes all lines between two patterns (throws away prometheus-to-sd)
sed -i -e "/# BEGIN_PROMETHEUS_TO_SD/,/# END_PROMETHEUS_TO_SD/d" "$1"
fi
}
# Prepares the manifests of k8s addons, and starts the addon manager.
# Vars assumed:
# CLUSTER_NAME
function start-kube-addons {
echo "Prepare kube-addons manifests and start kube addon manager"
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
local -r dst_dir="/etc/kubernetes/addons"
# prep addition kube-up specific rbac objects
setup-addon-manifests "addons" "rbac"
# Set up manifests of other addons.
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "google" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "stackdriver" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "standalone" ]] || \
[[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then
local -r file_dir="cluster-monitoring/${ENABLE_CLUSTER_MONITORING}"
setup-addon-manifests "addons" "cluster-monitoring"
setup-addon-manifests "addons" "${file_dir}"
# Replace the salt configurations with variable values.
base_metrics_memory="140Mi"
base_eventer_memory="190Mi"
base_metrics_cpu="80m"
nanny_memory="90Mi"
local -r metrics_memory_per_node="4"
local -r metrics_cpu_per_node="0.5"
local -r eventer_memory_per_node="500"
local -r nanny_memory_per_node="200"
if [[ -n "${NUM_NODES:-}" && "${NUM_NODES}" -ge 1 ]]; then
num_kube_nodes="$((${NUM_NODES}+1))"
nanny_memory="$((${num_kube_nodes} * ${nanny_memory_per_node} + 90 * 1024))Ki"
fi
controller_yaml="${dst_dir}/${file_dir}"
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "googleinfluxdb" ]]; then
controller_yaml="${controller_yaml}/heapster-controller-combined.yaml"
else
controller_yaml="${controller_yaml}/heapster-controller.yaml"
fi
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}"
sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_memory_per_node *}}@${metrics_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *eventer_memory_per_node *}}@${eventer_memory_per_node}@g" "${controller_yaml}"
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}"
update-prometheus-to-sd-parameters ${controller_yaml}
fi
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns"
local -r dns_controller_file="${dst_dir}/dns/kubedns-controller.yaml"
local -r dns_svc_file="${dst_dir}/dns/kubedns-svc.yaml"
mv "${dst_dir}/dns/kubedns-controller.yaml.in" "${dns_controller_file}"
mv "${dst_dir}/dns/kubedns-svc.yaml.in" "${dns_svc_file}"
# Replace the salt configurations with variable values.
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${dns_controller_file}"
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${dns_svc_file}"
if [[ "${ENABLE_DNS_HORIZONTAL_AUTOSCALER:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns-horizontal-autoscaler"
fi
fi
if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
setup-addon-manifests "addons" "registry"
local -r registry_pv_file="${dst_dir}/registry/registry-pv.yaml"
local -r registry_pvc_file="${dst_dir}/registry/registry-pvc.yaml"
mv "${dst_dir}/registry/registry-pv.yaml.in" "${registry_pv_file}"
mv "${dst_dir}/registry/registry-pvc.yaml.in" "${registry_pvc_file}"
# Replace the salt configurations with variable values.
remove-salt-config-comments "${controller_yaml}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pv_file}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_size'\] *}}@${CLUSTER_REGISTRY_DISK_SIZE}@g" "${registry_pvc_file}"
sed -i -e "s@{{ *pillar\['cluster_registry_disk_name'\] *}}@${CLUSTER_REGISTRY_DISK}@g" "${registry_pvc_file}"
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
[[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then
setup-addon-manifests "addons" "fluentd-elasticsearch"
fi
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
setup-addon-manifests "addons" "fluentd-gcp"
local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
update-prometheus-to-sd-parameters ${event_exporter_yaml}
update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
start-fluentd-resource-update
fi
if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then
setup-addon-manifests "addons" "dashboard"
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "daemonset" ]]; then
setup-addon-manifests "addons" "node-problem-detector"
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
# Setup role binding for standalone node problem detector.
setup-addon-manifests "addons" "node-problem-detector/standalone"
fi
if echo "${ADMISSION_CONTROL:-}" | grep -q "LimitRanger"; then
setup-addon-manifests "admission-controls" "limit-range"
fi
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
# Configure Calico based on cluster size and image type.
local -r ds_file="${dst_dir}/calico-policy-controller/calico-node-daemonset.yaml"
local -r typha_dep_file="${dst_dir}/calico-policy-controller/typha-deployment.yaml"
sed -i -e "s@__CALICO_CNI_DIR__@/home/kubernetes/bin@g" "${ds_file}"
sed -i -e "s@__CALICO_NODE_CPU__@$(get-calico-node-cpu)@g" "${ds_file}"
sed -i -e "s@__CALICO_TYPHA_CPU__@$(get-calico-typha-cpu)@g" "${typha_dep_file}"
sed -i -e "s@__CALICO_TYPHA_REPLICAS__@$(get-calico-typha-replicas)@g" "${typha_dep_file}"
else
# If not configured to use Calico, the set the typha replica count to 0, but only if the
# addon is present.
local -r typha_dep_file="${dst_dir}/calico-policy-controller/typha-deployment.yaml"
if [[ -e $typha_dep_file ]]; then
sed -i -e "s@__CALICO_TYPHA_REPLICAS__@0@g" "${typha_dep_file}"
fi
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"
fi
if [[ "${ENABLE_IP_MASQ_AGENT:-}" == "true" ]]; then
setup-addon-manifests "addons" "ip-masq-agent"
fi
if [[ "${ENABLE_METADATA_PROXY:-}" == "simple" ]]; then
setup-addon-manifests "addons" "metadata-proxy/gce"
fi
# Place addon manager pod manifest.
cp "${src_dir}/kube-addon-manager.yaml" /etc/kubernetes/manifests
}
# Starts an image-puller - used in test clusters.
function start-image-puller {
echo "Start image-puller"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest" \
/etc/kubernetes/manifests/
}
# Starts kube-registry proxy
function start-kube-registry-proxy {
echo "Start kube-registry-proxy"
cp "${KUBE_HOME}/kube-manifests/kubernetes/kube-registry-proxy.yaml" /etc/kubernetes/manifests
}
# Starts a l7 loadbalancing controller for ingress.
function start-lb-controller {
if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
echo "Start GCE L7 pod"
prepare-log-file /var/log/glbc.log
setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest" \
/etc/kubernetes/manifests/
fi
}
# Starts rescheduler.
function start-rescheduler {
if [[ "${ENABLE_RESCHEDULER:-}" == "true" ]]; then
echo "Start Rescheduler"
prepare-log-file /var/log/rescheduler.log
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/rescheduler.manifest" \
/etc/kubernetes/manifests/
fi
}
# Setup working directory for kubelet.
function setup-kubelet-dir {
echo "Making /var/lib/kubelet executable for kubelet"
mount -B /var/lib/kubelet /var/lib/kubelet/
mount -B -o remount,exec,suid,dev /var/lib/kubelet
}
function reset-motd {
# kubelet is installed both on the master and nodes, and the version is easy to parse (unlike kubectl)
local -r version="$("${KUBE_HOME}"/bin/kubelet --version=true | cut -f2 -d " ")"
# This logic grabs either a release tag (v1.2.1 or v1.2.1-alpha.1),
# or the git hash that's in the build info.
local gitref="$(echo "${version}" | sed -r "s/(v[0-9]+\.[0-9]+\.[0-9]+)(-[a-z]+\.[0-9]+)?.*/\1\2/g")"
local devel=""
if [[ "${gitref}" != "${version}" ]]; then
devel="
Note: This looks like a development version, which might not be present on GitHub.
If it isn't, the closest tag is at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
"
gitref="${version//*+/}"
fi
cat > /etc/motd <<EOF
Welcome to Kubernetes ${version}!
You can find documentation for Kubernetes at:
http://docs.kubernetes.io/
The source for this release can be found at:
/home/kubernetes/kubernetes-src.tar.gz
Or you can download it at:
https://storage.googleapis.com/kubernetes-release/release/${version}/kubernetes-src.tar.gz
It is based on the Kubernetes source at:
https://github.com/kubernetes/kubernetes/tree/${gitref}
${devel}
For Kubernetes copyright and licensing information, see:
/home/kubernetes/LICENSES
EOF
}
function override-kubectl {
echo "overriding kubectl"
echo "export PATH=${KUBE_HOME}/bin:\$PATH" > /etc/profile.d/kube_env.sh
}
########### Main Function ###########
echo "Start to configure instance for kubernetes"
KUBE_HOME="/home/kubernetes"
CONTAINERIZED_MOUNTER_HOME="${KUBE_HOME}/containerized_mounter"
if [[ ! -e "${KUBE_HOME}/kube-env" ]]; then
echo "The ${KUBE_HOME}/kube-env file does not exist!! Terminate cluster initialization."
exit 1
fi
source "${KUBE_HOME}/kube-env"
if [[ -e "${KUBE_HOME}/kube-master-certs" ]]; then
source "${KUBE_HOME}/kube-master-certs"
fi
if [[ -n "${KUBE_USER:-}" ]]; then
if ! [[ "${KUBE_USER}" =~ ^[-._@a-zA-Z0-9]+$ ]]; then
echo "Bad KUBE_USER format."
exit 1
fi
fi
# generate the controller manager and scheduler tokens here since they are only used on the master.
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
setup-os-params
config-ip-firewall
create-dirs
setup-kubelet-dir
ensure-local-ssds
setup-logrotate
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
mount-master-pd
create-node-pki
create-master-pki
create-master-auth
create-master-kubelet-auth
create-master-etcd-auth
else
create-node-pki
create-kubelet-kubeconfig ${KUBERNETES_MASTER_NAME}
create-kubeproxy-kubeconfig
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
create-node-problem-detector-kubeconfig
fi
fi
override-kubectl
# Run the containerized mounter once to pre-cache the container image.
assemble-docker-flags
start-kubelet
if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then
compute-master-manifest-variables
start-etcd-servers
start-etcd-empty-dir-cleanup-pod
start-kube-apiserver
start-kube-controller-manager
start-kube-scheduler
start-kube-addons
start-cluster-autoscaler
start-lb-controller
start-rescheduler
else
start-kube-proxy
# Kube-registry-proxy.
if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
start-kube-registry-proxy
fi
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
start-image-puller
fi
if [[ "${ENABLE_NODE_PROBLEM_DETECTOR:-}" == "standalone" ]]; then
start-node-problem-detector
fi
fi
reset-motd
prepare-mounter-rootfs
modprobe configs
echo "Done for the configuration for kubernetes"
|
hacktastic/kubernetes
|
cluster/gce/gci/configure-helper.sh
|
Shell
|
apache-2.0
| 74,195 |
#! /bin/bash
source ./agent.version
echo "AGENT_VERSION=$AGENT_VERSION"
echo "MDSD_DEB_PACKAGE_NAME=$MDSD_DEB_PACKAGE_NAME"
echo "MDSD_RPM_PACKAGE_NAME=$MDSD_RPM_PACKAGE_NAME"
# updating HandlerManifest.json
# check for "version": "x.x.x",
sed -i "s/\"version\".*$/\"version\": \"$AGENT_VERSION\",/g" HandlerManifest.json
# updating agent.py
sed -i "s/^BundleFileNameDeb = .*$/BundleFileNameDeb = '$MDSD_DEB_PACKAGE_NAME'/" agent.py
sed -i "s/^BundleFileNameRpm = .*$/BundleFileNameRpm = '$MDSD_RPM_PACKAGE_NAME'/" agent.py
# updating manifest.xml
# check <Version>...</Version>
sed -i -e "s|<Version>[0-9a-z.]\{1,\}</Version>|<Version>$AGENT_VERSION</Version>|g" manifest.xml
|
bpramod/azure-linux-extensions
|
AzureMonitorAgent/apply_version.sh
|
Shell
|
apache-2.0
| 683 |
#!/bin/bash
# get opennebula context
mount -t iso9660 /dev/sr1 /mnt
if [ -f /mnt/context.sh ]; then
. /mnt/context.sh
fi
umount /mnt
ADDR=`ifconfig eth0 | grep "inet addr:" | cut -d ':' -f 2 | cut -d ' ' -f 1`
if [ "x$SCALARIS_FIRST" = xtrue ]; then
# only eth0 for the moment
ERLANG_ADDR=`echo $ADDR | tr . ,`
echo "{known_hosts, [{{$ERLANG_ADDR}, 14195, service_per_vm}]}." >> /etc/scalaris/scalaris.local.cfg
echo "{mgmt_server, {{$ERLANG_ADDR}, 14195, mgmt_server}}." >> /etc/scalaris/scalaris.local.cfg
SCALARIS_PARAMS="-f -m"
fi
if [ "x$SCALARIS_FIRST" = xfalse ]; then
SCALARIS_PARAMS=""
if [ "x$SCALARIS_KNOWN_HOSTS" != "x" ]; then
echo "$SCALARIS_KNOWN_HOSTS" >> /etc/scalaris/scalaris.local.cfg
fi
if [ "x$SCALARIS_MGMT_SERVER" != "x" ]; then
echo "$SCALARIS_MGMT_SERVER" >> /etc/scalaris/scalaris.local.cfg
fi
fi
# temporary fix, we are waiting for a real scalaris user
export HOME=/root
# the sleep command is used to find error messages
screen -d -m /bin/bash -c "/usr/bin/scalarisctl -s -n node@$ADDR -p 14195 -y 8000 $SCALARIS_PARAMS start; sleep 365d"
/usr/lib/scalaris/contrib/opennebula/start-manager.sh
/etc/init.d/iptables stop
|
Eonblast/Scalaxis
|
contrib/opennebula/scalaris-iso/init-contrail.sh
|
Shell
|
apache-2.0
| 1,189 |
#!/usr/bin/env bash
set -e
server="${1}"
rsync -vrl --delete * "${server}":
|
jupyter/nbgrader
|
demos/deploy_demos.sh
|
Shell
|
bsd-3-clause
| 78 |
#!/bin/bash
# Vim
apt-get install -y vim
cat << EOF >/etc/vim/vimrc.local
syntax on
set expandtab
set tabstop=4
set number
EOF
update-alternatives --set editor /usr/bin/vim.basic
|
wimme002/SecretSanta
|
shell_provisioner/module/vim.sh
|
Shell
|
isc
| 183 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2015 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Try to combine files that should be grouped because of combined commands.
#
# When the command nodes get combined, their edges are combined as well.
# However, if these aren't sorted when they are combined, then future nodes
# won't be combined when they should.
. ./tup.sh
cat > Tupfile << HERE
: |> cat bar.h > bar.o |> bar.o
: |> cat foo.h bar.h > foo.o |> foo.o
: foo.o |> cat foo.o |>
: |> cat bar.h foo.h > blah.h |> blah.h
HERE
tup touch foo.h bar.h Tupfile
update
tup graph . --combine > ok.dot
gitignore_good 'cat.*bar.h.*2 commands' ok.dot
gitignore_good 'foo*\.h.*2 files' ok.dot
eotup
|
p2rkw/tup
|
test/t4169-graph-combine6.sh
|
Shell
|
gpl-2.0
| 1,346 |
# Plowshare upstore module
# Copyright (c) 2013-2014 Plowshare team
#
# This file is part of Plowshare.
#
# Plowshare is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Plowshare is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Plowshare. If not, see <http://www.gnu.org/licenses/>.
MODULE_UPSTORE_REGEXP_URL='https\?://\(www\.\)\?upsto\(\.re\|re\.net\)/'
MODULE_UPSTORE_DOWNLOAD_OPTIONS="
AUTH,a,auth,a=EMAIL:PASSWORD,User account"
MODULE_UPSTORE_DOWNLOAD_RESUME=no
MODULE_UPSTORE_DOWNLOAD_FINAL_LINK_NEEDS_COOKIE=no
MODULE_UPSTORE_DOWNLOAD_SUCCESSIVE_INTERVAL=900
MODULE_UPSTORE_UPLOAD_OPTIONS="
AUTH,a,auth,a=EMAIL:PASSWORD,User account
SHORT_LINK,,short-link,,Produce short link like http://upsto.re/XXXXXX"
MODULE_UPSTORE_UPLOAD_UPLOAD_REMOTE_SUPPORT=no
MODULE_UPSTORE_PROBE_OPTIONS=""
# Static function. Proceed with login
# $1: authentication
# $2: cookie file
# $3: base url
# stdout: account type ("free" or "premium") on success
upstore_login() {
local -r AUTH=$1
local -r COOKIE_FILE=$2
local -r BASE_URL=$3
local LOGIN_DATA PAGE STATUS NAME
LOGIN_DATA='email=$USER&password=$PASSWORD&send=Login'
PAGE=$(post_login "$AUTH" "$COOKIE_FILE" "$LOGIN_DATA" \
"$BASE_URL/account/login/" -b 'lang=en' --location) || return
STATUS=$(parse_cookie_quiet 'usid' < "$COOKIE_FILE")
[ -n "$STATUS" ] || return $ERR_LOGIN_FAILED
# Determine account type and user name
NAME=$(parse '"/account/"' '^[[:space:]]*\(.*\)$' 1 <<< "$PAGE")
if match '="/premium/">renew</a>' "$PAGE"; then
echo 'premium'
else
echo 'free'
fi
log_debug "Successfully logged in as member '$NAME'"
}
# Switch language to english
# $1: cookie file
# $2: base URL
upstore_switch_lang() {
curl -c "$1" -o /dev/null "$2/?lang=en" || return
}
# Output a file URL to download from Upsto.re
# $1: cookie file
# $2: upstore url
# stdout: real file download link
# file name
upstore_download() {
local -r COOKIE_FILE=$1
local -r URL=$2
local -r BASE_URL='https://upstore.net'
local PAGE HASH ERR WAIT JSON
# extract file ID from URL
# http://upstore.net/xyz
# http://upsto.re/xyz
HASH=$(echo "$URL" | parse '' 'upsto[^/]\+/\([[:alnum:]]\+\)') || return
log_debug "File ID: '$HASH'"
upstore_switch_lang "$COOKIE_FILE" "$BASE_URL" || return
if [ -n "$AUTH" ]; then
ACC=$(upstore_login "$AUTH" "$COOKIE_FILE" "$BASE_URL") || return
fi
PAGE=$(curl -b "$COOKIE_FILE" -c "$COOKIE_FILE" -b 'lang=en' "$BASE_URL/$HASH") || return
ERR=$(echo "$PAGE" | parse_tag_quiet 'span class="error"' span) || return
if [ -n "$ERR" ]; then
[ "$ERR" = 'File not found' ] && return $ERR_LINK_DEAD
# File size is larger than 1 GB. Unfortunately, it can be downloaded only with premium
if [[ "$ERR" = 'File size is larger than'* ]]; then
return $ERR_LINK_NEED_PERMISSIONS
fi
log_error "Unexpected remote error: $ERR"
return $ERR_FATAL
fi
if [ -n "$AUTH" -a "$ACC" = 'premium' ]; then
JSON=$(curl -b "$COOKIE_FILE" -b 'lang=en' --referer "$URL" \
-H 'X-Requested-With: XMLHttpRequest' \
-d "hash=$HASH" \
-d 'antispam=' \
-d 'js=1' "$BASE_URL/load/premium/") || return
parse_json 'ok' <<< "$JSON" || return
return 0
fi
PAGE=$(curl -b "$COOKIE_FILE" -d "hash=$HASH" \
-d 'free=Slow+download' "$BASE_URL/$HASH") || return
# Error message is inside <span> or <h2> tag
ERR=$(echo "$PAGE" | parse_quiet 'class="error"' '>\([^<]\+\)</') || return
if [ -n "$ERR" ]; then
case "$ERR" in
# Sorry, but server with file is overloaded
# Server for free downloads is overloaded
*[Ss]erver*overloaded*)
log_error 'No free download slots available'
echo 120 # wait some arbitrary time
return $ERR_LINK_TEMP_UNAVAILABLE
;;
*'only for Premium users')
return $ERR_LINK_NEED_PERMISSIONS
;;
esac
log_error "Unexpected remote error: $ERR"
return $ERR_FATAL
fi
WAIT=$(echo "$PAGE" | parse 'Please wait %s before downloading' \
'^var sec = \([[:digit:]]\+\),') || return
wait $((WAIT + 1)) || return
# Solve recaptcha
local PUBKEY WCI CHALLENGE WORD CONTROL ID
PUBKEY='6LeqftkSAAAAAHl19qD7wPAVglFYWhZPTjno3wFb'
WCI=$(recaptcha_process $PUBKEY)
{ read WORD; read CHALLENGE; read ID; } <<< "$WCI"
PAGE=$(curl -b "$COOKIE_FILE" -b 'lang=en' -d "recaptcha_response_field=$WORD" \
-d "recaptcha_challenge_field=$CHALLENGE" -d "hash=$HASH" \
-d 'free=Get download link' "$BASE_URL/$HASH") || return
ERR=$(echo "$PAGE" | parse_tag_quiet 'span class="error"' span) || return
if [ -n "$ERR" ]; then
if [ "$ERR" = 'Wrong captcha protection code' ]; then
log_error 'Wrong captcha'
captcha_nack $ID
return $ERR_CAPTCHA
fi
captcha_ack $ID
case "$ERR" in
*[Ss]erver*overloaded*)
log_error 'No free download slots available'
echo 120 # wait some arbitrary time
return $ERR_LINK_TEMP_UNAVAILABLE
;;
# Sorry, you have reached a download limit for today (3 files). Please wait for tomorrow or...
*'you have reached a download limit for today'*)
# We'll take it literally and wait till the next day
# Note: Consider the time zone of their server (+0:00)
local HOUR MIN TIME
# Get current UTC time, prevent leading zeros
TIME=$(date -u +'%k:%M') || return
HOUR=${TIME%:*}
MIN=${TIME#*:}
log_error 'Daily limit reached.'
echo $(( ((23 - HOUR) * 60 + (61 - ${MIN#0}) ) * 60 ))
return $ERR_LINK_TEMP_UNAVAILABLE
;;
# Sorry, we have found that you or someone else have already downloaded another file recently from your IP (1.1.1.1). You should wait 13 minutes before downloading next file
*'you or someone else have already downloaded'*)
local WAIT
WAIT=$(echo "$ERR" | parse '' \
'wait \([[:digit:]]\+\) minute') || return
log_error 'Forced delay between downloads.'
echo $(( WAIT * 60 + 1 ))
return $ERR_LINK_TEMP_UNAVAILABLE
;;
# Sorry, we have found that you have already downloaded several files recently.
*'downloaded several files recently'*)
log_error 'Forced delay between downloads.'
echo 3600 # wait some arbitrary time
return $ERR_LINK_TEMP_UNAVAILABLE
;;
esac
log_error "Unexpected remote error: $ERR"
return $ERR_FATAL
fi
captcha_ack $ID
# extract + output download link + file name
echo "$PAGE" | parse_attr '<b>Download file</b>' 'href' || return
echo "$PAGE" | parse_tag '^[[:space:]]*Download file <b>' 'b' | html_to_utf8 || return
}
# Upload a file to Upstore.net
# $1: cookie file
# $2: input file (with full path)
# $3: remote filename
upstore_upload() {
local -r COOKIE_FILE=$1
local -r FILE=$2
local -r DEST_FILE=$3
local -r BASE_URL='http://upstore.net'
local PAGE JSON UP_URL FILE_SIZE MAX_SIZE HASH OPT_USER
upstore_switch_lang "$COOKIE_FILE" "$BASE_URL" || return
if [ -n "$AUTH" ]; then
upstore_login "$AUTH" "$COOKIE_FILE" "$BASE_URL" >/dev/null || return
fi
PAGE=$(curl -b "$COOKIE_FILE" "$BASE_URL") || return
UP_URL=$(echo "$PAGE" | parse 'script' "'\([^']\+\)',") || return
MAX_SIZE=$(echo "$PAGE" | parse 'sizeLimit' \
'[[:blank:]]\([[:digit:]]\+\),') || return
log_debug "URL: '$UP_URL'"
log_debug "Max size: '$MAX_SIZE'"
# Check file size
SIZE=$(get_filesize "$FILE") || return
if [ $SIZE -gt $MAX_SIZE ]; then
log_debug "File is bigger than $MAX_SIZE"
return $ERR_SIZE_LIMIT_EXCEEDED
fi
if [ -n "$AUTH" ]; then
local USER_ID
USER_ID=$(echo "$PAGE" | parse 'usid' ":[[:blank:]]'\([^']\+\)'") || return
log_debug "User ID: '$USER_ID'"
OPT_USER="-F usid=$USER_ID"
fi
# Note: Uses SWF variant of Uploadify v2.1.4 (jquery.uploadify)
JSON=$(curl_with_log --user-agent 'Shockwave Flash' -b "$COOKIE_FILE" \
-F "Filename=$DEST_FILE" \
-F 'folder=/' \
$OPT_USER \
-F 'fileext=*.*' \
-F "file=@$FILE;type=application/octet-stream;filename=$DEST_FILE" \
-F 'Upload=Submit Query' \
"$UP_URL") || return
HASH=$(echo "$JSON" | parse_json 'hash') || return
if [ -n "$SHORT_LINK" ]; then
echo "http://upsto.re/$HASH"
else
echo "$BASE_URL/$HASH"
fi
}
# Probe a download URL
# $1: cookie file (unused here)
# $2: Upstore url
# $3: requested capability list
# stdout: 1 capability per line
upstore_probe() {
local -r URL=$2
local -r REQ_IN=$3
local PAGE REQ_OUT FILE_SIZE
PAGE=$(curl --location -b 'lang=en' "$URL") || return
match 'File not found' "$PAGE" && return $ERR_LINK_DEAD
REQ_OUT=c
if [[ $REQ_IN = *f* ]]; then
echo "$PAGE" | parse '<div.*Download file' '>\([^<]\+\)<' 1 | \
html_to_utf8 && REQ_OUT="${REQ_OUT}f"
fi
if [[ $REQ_IN = *s* ]]; then
FILE_SIZE=$(echo "$PAGE" | parse '<div.*Download file' \
'^[[:blank:]]*\([[:digit:]]\+\(.[[:digit:]]\+\)\?[[:space:]][KMG]\?B\)' 3) &&
translate_size "$FILE_SIZE" && REQ_OUT="${REQ_OUT}s"
fi
echo $REQ_OUT
}
|
kidburglar/plowshare-modules-legacy
|
upstore.sh
|
Shell
|
gpl-3.0
| 10,333 |
#!/bin/sh
for pkgfile in `ls ../pkgData/AppIcon/`
do
if [ ! -f ../pkgData/pkgInfo/${pkgfile%.*} ]
then
echo $pkgfile
fi
done
|
Zulfikarlatief/tealinux-software-center
|
tools/findUnnecessaryIcon.sh
|
Shell
|
gpl-3.0
| 146 |
#!/bin/bash
VERSION_android=
URL_android=
DEPS_android=(pygame)
MD5_android=
BUILD_android=$BUILD_PATH/android/android
RECIPE_android=$RECIPES_PATH/android
function prebuild_android() {
cd $BUILD_PATH/android
rm -rf android
if [ ! -d android ]; then
try cp -a $RECIPE_android/src $BUILD_android
fi
}
function shouldbuild_android() {
if [ -d "$SITEPACKAGES_PATH/android" ]; then
DO_BUILD=0
fi
}
function build_android() {
cd $BUILD_android
# if the last step have been done, avoid all
if [ -f .done ]; then
return
fi
push_arm
export LDFLAGS="$LDFLAGS -L$LIBS_PATH"
export LDSHARED="$LIBLINK"
# cythonize
try find . -iname '*.pyx' -exec cython {} \;
try $HOSTPYTHON setup.py build_ext -v
try $HOSTPYTHON setup.py install -O2
unset LDSHARED
touch .done
pop_arm
}
function postbuild_android() {
true
}
|
eHealthAfrica/python-for-android
|
recipes/android/recipe.sh
|
Shell
|
lgpl-2.1
| 838 |
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# On windows, run this inside cygwin
# Bourne shell syntax, this should hopefully run on pretty much anything.
usage() {
echo "Usage: cd to this project's project directory, then ./verifySigsEclipseUpdateSite.sh"
}
if [ "$1" = "-help" ]
then
usage
exit 1
fi
# Verify PGP signatures, MD5 and SHA1 checksums on all jars
for i in $(find ./target/eclipse-update-site -name '*.jar')
do
gpg --verify $i.asc
md5sum --check $i.md5
sha1sum --check $i.sha1
done
# Verify PGP signatures, MD5 and SHA1 checksums on all gz files
for i in $(find ./target/eclipse-update-site -name '*.gz')
do
gpg --verify $i.asc
md5sum --check $i.md5
sha1sum --check $i.sha1
done
|
apache/uima-build
|
uima-eclipse-composite-update-site/verifySigsEclipseUpdateSite.sh
|
Shell
|
apache-2.0
| 1,518 |
#!/bin/bash
# Wide package tool.
#
# Command:
# ./pkg.sh ${version} ${target}
# Example:
# ./pkg.sh 1.0.0 /home/daniel/1.0.0/
ver=$1
target=$2
list="conf doc i18n static views README.md TERMS.md LICENSE"
mkdir -p ${target}
echo version=${ver}
echo target=${target}
## darwin
os=darwin
export GOOS=${os}
export GOARCH=386
echo wide-${ver}-${GOOS}-${GOARCH}.tar.gz
go build
go build github.com/visualfc/gotools
go build github.com/nsf/gocode
tar zcf ${target}/wide-${ver}-${GOOS}-${GOARCH}.tar.gz ${list} gotools gocode wide --exclude-vcs --exclude='conf/*.go' --exclude='i18n/*.go'
rm -f wide gotools gocode
export GOOS=${os}
export GOARCH=amd64
echo wide-${ver}-${GOOS}-${GOARCH}.tar.gz
go build
go build github.com/visualfc/gotools
go build github.com/nsf/gocode
tar zcf ${target}/wide-${ver}-${GOOS}-${GOARCH}.tar.gz ${list} gotools gocode wide --exclude-vcs --exclude='conf/*.go' --exclude='i18n/*.go'
rm -f wide gotools gocode
## linux
os=linux
export GOOS=${os}
export GOARCH=386
echo wide-${ver}-${GOOS}-${GOARCH}.tar.gz
go build
go build github.com/visualfc/gotools
go build github.com/nsf/gocode
tar zcf ${target}/wide-${ver}-${GOOS}-${GOARCH}.tar.gz ${list} gotools gocode wide --exclude-vcs --exclude='conf/*.go' --exclude='i18n/*.go'
rm -f wide gotools gocode
export GOOS=${os}
export GOARCH=amd64
echo wide-${ver}-${GOOS}-${GOARCH}.tar.gz
go build
go build github.com/visualfc/gotools
go build github.com/nsf/gocode
tar zcf ${target}/wide-${ver}-${GOOS}-${GOARCH}.tar.gz ${list} gotools gocode wide --exclude-vcs --exclude='conf/*.go' --exclude='i18n/*.go'
rm -f wide gotools gocode
## windows
os=windows
export GOOS=${os}
export GOARCH=386
echo wide-${ver}-${GOOS}-${GOARCH}.zip
go build
go build github.com/visualfc/gotools
go build github.com/nsf/gocode
zip -r -q ${target}/wide-${ver}-${GOOS}-${GOARCH}.zip ${list} gotools.exe gocode.exe wide.exe --exclude=conf/*.go --exclude=i18n/*.go
rm -f wide.exe gotools.exe gocode.exe
export GOOS=${os}
export GOARCH=amd64
echo wide-${ver}-${GOOS}-${GOARCH}.zip
go build
go build github.com/visualfc/gotools
go build github.com/nsf/gocode
zip -r -q ${target}/wide-${ver}-${GOOS}-${GOARCH}.zip ${list} gotools.exe gocode.exe wide.exe --exclude=conf/*.go --exclude=i18n/*.go
rm -f wide.exe gotools.exe gocode.exe
|
fengshao0907/wide
|
pkg.sh
|
Shell
|
apache-2.0
| 2,284 |
#!/bin/bash
FN="human650v3aCrlmm_1.0.3.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.8/data/annotation/src/contrib/human650v3aCrlmm_1.0.3.tar.gz"
"https://bioarchive.galaxyproject.org/human650v3aCrlmm_1.0.3.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-human650v3acrlmm/bioconductor-human650v3acrlmm_1.0.3_src_all.tar.gz"
)
MD5="4b0de367ccc0f7499dcffe21ef1893c2"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
wget -O- -q $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
bioconda/recipes
|
recipes/bioconductor-human650v3acrlmm/post-link.sh
|
Shell
|
mit
| 1,334 |
#!/bin/sh
helpdir="../../help/manual"
tmpdir_="../../help/temp"
srcuri="http://alphamanual.audacityteam.org/man"
mkdir -p "${tmpdir_}"
python mw2html.py "${srcuri}" "${tmpdir_}" -s
mv "${tmpdir_}/alphamanual.audacityteam.org" "${helpdir}"
rm -r "${tmpdir_}"
|
ThomasFeher/audacity
|
scripts/mw2html_audacity/wiki2htm.sh
|
Shell
|
gpl-2.0
| 260 |
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Creates a HBase release candidate. The script will update versions, tag the branch,
# build HBase binary packages and documentation, and upload maven artifacts to a staging
# repository. There is also a dry run mode where only local builds are performed, and
# nothing is uploaded to the ASF repos.
#
# Run with "-h" for options. For example, running below will do all
# steps above using the 'rm' dir under Downloads as workspace:
#
# $ ./do-release-docker.sh -d ~/Downloads/rm
#
# The scripts in this directory came originally from spark [1]. They were then
# modified to suite the hbase context. These scripts supercedes the old
# ../make_rc.sh script for making release candidates because what is here is more
# comprehensive doing more steps of the RM process as well as running in a
# container so the RM build environment can be a constant.
#
# It:
# * Tags release
# * Sets version to the release version
# * Sets version to next SNAPSHOT version.
# * Builds, signs, and hashes all artifacts.
# * Pushes release tgzs to the dev dir in a apache dist.
# * Pushes to repository.apache.org staging.
#
# The entry point is here, in the do-release-docker.sh script.
#
# 1. https://github.com/apache/spark/tree/master/dev/create-release
#
set -e
# Set this to build other hbase repos: e.g. PROJECT=hbase-operator-tools
export PROJECT="${PROJECT:-hbase}"
SELF="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# shellcheck source=SCRIPTDIR/release-util.sh
. "$SELF/release-util.sh"
ORIG_PWD="$(pwd)"
function usage {
local NAME
NAME="$(basename "${BASH_SOURCE[0]}")"
cat <<EOF
Usage: $NAME [OPTIONS]
Runs release scripts inside a docker image.
Options:
-d [path] Required. Working directory. Output will be written to "output" in here.
-f "force" -- actually publish this release. Unless you specify '-f', it will
default to dry run mode, which checks and does local builds, but does not
upload anything.
-t [tag] Tag for the hbase-rm docker image to use for building (default: "latest").
-j [path] Path to local JDK installation to use building. By default the script will
use openjdk8 installed in the docker image.
-p [project] Project to build: e.g. 'hbase' or 'hbase-thirdparty'; defaults to PROJECT env var
-r [repo] Git repo to use for remote git operations. defaults to ASF gitbox for project.
-s [step] Runs a single step of the process; valid steps: tag|publish-dist|publish-release.
If none specified, runs tag, then publish-dist, and then publish-release.
'publish-snapshot' is also an allowed, less used, option.
-x Debug. Does less clean up (env file, gpg forwarding on mac)
EOF
exit 1
}
WORKDIR=
IMGTAG=latest
JAVA=
RELEASE_STEP=
GIT_REPO=
while getopts "d:fhj:p:r:s:t:x" opt; do
case $opt in
d) WORKDIR="$OPTARG" ;;
f) DRY_RUN=0 ;;
t) IMGTAG="$OPTARG" ;;
j) JAVA="$OPTARG" ;;
p) PROJECT="$OPTARG" ;;
r) GIT_REPO="$OPTARG" ;;
s) RELEASE_STEP="$OPTARG" ;;
x) DEBUG=1 ;;
h) usage ;;
?) error "Invalid option. Run with -h for help." ;;
esac
done
shift $((OPTIND-1))
if (( $# > 0 )); then
error "Arguments can only be provided with option flags, invalid args: $*"
fi
export DEBUG
if [ -z "$WORKDIR" ] || [ ! -d "$WORKDIR" ]; then
error "Work directory (-d) must be defined and exist. Run with -h for help."
fi
if [ -d "$WORKDIR/output" ]; then
read -r -p "Output directory already exists. Overwrite and continue? [y/n] " ANSWER
if [ "$ANSWER" != "y" ]; then
error "Exiting."
fi
fi
if [ -f "${WORKDIR}/gpg-proxy.ssh.pid" ] || \
[ -f "${WORKDIR}/gpg-proxy.cid" ] || \
[ -f "${WORKDIR}/release.cid" ]; then
read -r -p "container/pid files from prior run exists. Overwrite and continue? [y/n] " ANSWER
if [ "$ANSWER" != "y" ]; then
error "Exiting."
fi
fi
cd "$WORKDIR"
rm -rf "$WORKDIR/output"
rm -rf "${WORKDIR}/gpg-proxy.ssh.pid" "${WORKDIR}/gpg-proxy.cid" "${WORKDIR}/release.cid"
mkdir "$WORKDIR/output"
banner "Gathering release details."
HOST_OS="$(get_host_os)"
get_release_info
banner "Setup"
# Place all RM scripts and necessary data in a local directory that must be defined in the command
# line. This directory is mounted into the image. Its WORKDIR, the arg passed with -d.
for f in "$SELF"/*; do
if [ -f "$f" ]; then
cp "$f" "$WORKDIR"
fi
done
# We need to import that public key in the container in order to use the private key via the agent.
GPG_KEY_FILE="$WORKDIR/gpg.key.public"
log "Exporting public key for ${GPG_KEY}"
fcreate_secure "$GPG_KEY_FILE"
$GPG "${GPG_ARGS[@]}" --export "${GPG_KEY}" > "${GPG_KEY_FILE}"
function cleanup {
local id
banner "Release Cleanup"
if is_debug; then
log "skipping due to debug run"
return 0
fi
log "details in cleanup.log"
if [ -f "${ENVFILE}" ]; then
rm -f "$ENVFILE"
fi
rm -f "$GPG_KEY_FILE"
if [ -f "${WORKDIR}/gpg-proxy.ssh.pid" ]; then
id=$(cat "${WORKDIR}/gpg-proxy.ssh.pid")
echo "Stopping ssh tunnel for gpg-agent at PID ${id}" | tee -a cleanup.log
kill -9 "${id}" >>cleanup.log 2>&1 || true
rm -f "${WORKDIR}/gpg-proxy.ssh.pid" >>cleanup.log 2>&1
fi
if [ -f "${WORKDIR}/gpg-proxy.cid" ]; then
id=$(cat "${WORKDIR}/gpg-proxy.cid")
echo "Stopping gpg-proxy container with ID ${id}" | tee -a cleanup.log
docker kill "${id}" >>cleanup.log 2>&1 || true
rm -f "${WORKDIR}/gpg-proxy.cid" >>cleanup.log 2>&1
# TODO we should remove the gpgagent volume?
fi
if [ -f "${WORKDIR}/release.cid" ]; then
id=$(cat "${WORKDIR}/release.cid")
echo "Stopping release container with ID ${id}" | tee -a cleanup.log
docker kill "${id}" >>cleanup.log 2>&1 || true
rm -f "${WORKDIR}/release.cid" >>cleanup.log 2>&1
fi
}
trap cleanup EXIT
log "Host OS: ${HOST_OS}"
if [ "${HOST_OS}" == "DARWIN" ]; then
run_silent "Building gpg-agent-proxy image with tag ${IMGTAG}..." "docker-proxy-build.log" \
docker build --build-arg "UID=${UID}" --build-arg "RM_USER=${USER}" \
--tag "org.apache.hbase/gpg-agent-proxy:${IMGTAG}" "${SELF}/mac-sshd-gpg-agent"
fi
run_silent "Building hbase-rm image with tag $IMGTAG..." "docker-build.log" \
docker build --tag "org.apache.hbase/hbase-rm:$IMGTAG" --build-arg "UID=$UID" \
--build-arg "RM_USER=${USER}" "$SELF/hbase-rm"
banner "Final prep for container launch."
log "Writing out environment for container."
# Write the release information to a file with environment variables to be used when running the
# image.
ENVFILE="$WORKDIR/env.list"
fcreate_secure "$ENVFILE"
cat > "$ENVFILE" <<EOF
PROJECT=$PROJECT
DRY_RUN=$DRY_RUN
SKIP_TAG=$SKIP_TAG
RUNNING_IN_DOCKER=1
GIT_BRANCH=$GIT_BRANCH
NEXT_VERSION=$NEXT_VERSION
RELEASE_VERSION=$RELEASE_VERSION
RELEASE_TAG=$RELEASE_TAG
GIT_REF=$GIT_REF
ASF_USERNAME=$ASF_USERNAME
GIT_NAME=$GIT_NAME
GIT_EMAIL=$GIT_EMAIL
GPG_KEY=$GPG_KEY
ASF_PASSWORD=$ASF_PASSWORD
RELEASE_STEP=$RELEASE_STEP
API_DIFF_TAG=$API_DIFF_TAG
HOST_OS=$HOST_OS
EOF
JAVA_MOUNT=()
if [ -n "$JAVA" ]; then
echo "JAVA_HOME=/opt/hbase-java" >> "$ENVFILE"
JAVA_MOUNT=(--mount "type=bind,src=${JAVA},dst=/opt/hbase-java,readonly")
fi
#TODO some debug output would be good here
GIT_REPO_MOUNT=()
if [ -n "${GIT_REPO}" ]; then
case "${GIT_REPO}" in
# skip the easy to identify remote protocols
ssh://*|git://*|http://*|https://*|ftp://*|ftps://*) ;;
# for sure local
/*)
GIT_REPO_MOUNT=(--mount "type=bind,src=${GIT_REPO},dst=/opt/hbase-repo,consistency=delegated")
echo "HOST_GIT_REPO=${GIT_REPO}" >> "${ENVFILE}"
GIT_REPO="/opt/hbase-repo"
;;
# on the host but normally git wouldn't use the local optimization
file://*)
log "Converted file:// git repo to a local path, which changes git to assume --local."
GIT_REPO_MOUNT=(--mount "type=bind,src=${GIT_REPO#file://},dst=/opt/hbase-repo,consistency=delegated")
echo "HOST_GIT_REPO=${GIT_REPO}" >> "${ENVFILE}"
GIT_REPO="/opt/hbase-repo"
;;
# have to decide if it's a local path or the "scp-ish" remote
*)
declare colon_remove_prefix;
declare slash_remove_prefix;
declare local_path;
colon_remove_prefix="${GIT_REPO#*:}"
slash_remove_prefix="${GIT_REPO#*/}"
if [ "${GIT_REPO}" = "${colon_remove_prefix}" ]; then
# if there was no colon at all, we assume this must be a local path
local_path="no colon at all"
elif [ "${GIT_REPO}" != "${slash_remove_prefix}" ]; then
# if there was a colon and there is no slash, then we assume it must be scp-style host
# and a relative path
if [ "${#colon_remove_prefix}" -lt "${#slash_remove_prefix}" ]; then
# Given the substrings made by removing everything up to the first colon and slash
# we can determine which comes first based on the longer substring length.
# if the slash is first, then we assume the colon is part of a path name and if the colon
# is first then it is the seperator between a scp-style host name and the path.
local_path="slash happened before a colon"
fi
fi
if [ -n "${local_path}" ]; then
# convert to an absolute path
GIT_REPO="$(cd "$(dirname "${ORIG_PWD}/${GIT_REPO}")"; pwd)/$(basename "${ORIG_PWD}/${GIT_REPO}")"
GIT_REPO_MOUNT=(--mount "type=bind,src=${GIT_REPO},dst=/opt/hbase-repo,consistency=delegated")
echo "HOST_GIT_REPO=${GIT_REPO}" >> "${ENVFILE}"
GIT_REPO="/opt/hbase-repo"
fi
;;
esac
echo "GIT_REPO=${GIT_REPO}" >> "${ENVFILE}"
fi
GPG_PROXY_MOUNT=()
if [ "${HOST_OS}" == "DARWIN" ]; then
GPG_PROXY_MOUNT=(--mount "type=volume,src=gpgagent,dst=/home/${USER}/.gnupg/")
log "Setting up GPG agent proxy container needed on OS X."
log " we should clean this up for you. If that fails the container ID is below and in " \
"gpg-proxy.cid"
#TODO the key pair used should be configurable
docker run --rm -p 62222:22 \
--detach --cidfile "${WORKDIR}/gpg-proxy.cid" \
--mount \
"type=bind,src=${HOME}/.ssh/id_rsa.pub,dst=/home/${USER}/.ssh/authorized_keys,readonly" \
"${GPG_PROXY_MOUNT[@]}" \
"org.apache.hbase/gpg-agent-proxy:${IMGTAG}"
# gotta trust the container host
ssh-keyscan -p 62222 localhost 2>/dev/null | sort > "${WORKDIR}/gpg-agent-proxy.ssh-keyscan"
sort "${HOME}/.ssh/known_hosts" | comm -1 -3 - "${WORKDIR}/gpg-agent-proxy.ssh-keyscan" \
> "${WORKDIR}/gpg-agent-proxy.known_hosts"
if [ -s "${WORKDIR}/gpg-agent-proxy.known_hosts" ]; then
log "Your ssh known_hosts does not include the entries for the gpg-agent proxy container."
log "The following entry(ies) are missing:"
sed -e 's/^/ /' "${WORKDIR}/gpg-agent-proxy.known_hosts"
read -r -p "Okay to add these entries to ${HOME}/.ssh/known_hosts? [y/n] " ANSWER
if [ "$ANSWER" != "y" ]; then
error "Exiting."
fi
cat "${WORKDIR}/gpg-agent-proxy.known_hosts" >> "${HOME}/.ssh/known_hosts"
fi
log "Launching ssh reverse tunnel from the container to gpg agent."
log " we should clean this up for you. If that fails the PID is in gpg-proxy.ssh.pid"
ssh -p 62222 -R "/home/${USER}/.gnupg/S.gpg-agent:$(gpgconf --list-dir agent-extra-socket)" \
-i "${HOME}/.ssh/id_rsa" -N -n localhost >gpg-proxy.ssh.log 2>&1 &
echo $! > "${WORKDIR}/gpg-proxy.ssh.pid"
else
# Note that on linux we always directly mount the gpg agent's extra socket to limit what the
# container can ask the gpg-agent to do.
# When working on a remote linux machine you should be sure to forward both the remote machine's
# agent socket and agent extra socket to your local gpg-agent's extra socket. See the README.txt
# for an example.
GPG_PROXY_MOUNT=(--mount \
"type=bind,src=$(gpgconf --list-dir agent-extra-socket),dst=/home/${USER}/.gnupg/S.gpg-agent")
fi
banner "Building $RELEASE_TAG; output will be at $WORKDIR/output"
log "We should clean the container up when we are done. If that fails then the container ID " \
"is in release.cid"
echo
# Where possible we specify "consistency=delegated" when we do not need host access during the
# build run. On Mac OS X specifically this gets us a big perf improvement.
cmd=(docker run --rm -ti \
--env-file "$ENVFILE" \
--cidfile "${WORKDIR}/release.cid" \
--mount "type=bind,src=${WORKDIR},dst=/home/${USER}/hbase-rm,consistency=delegated" \
"${JAVA_MOUNT[@]}" \
"${GIT_REPO_MOUNT[@]}" \
"${GPG_PROXY_MOUNT[@]}" \
"org.apache.hbase/hbase-rm:$IMGTAG")
echo "${cmd[*]}"
"${cmd[@]}"
|
mahak/hbase
|
dev-support/create-release/do-release-docker.sh
|
Shell
|
apache-2.0
| 13,374 |
#~/code/code_bash_shell/Count_plot_data_match_to_trial_name.sh unique_trail_name.txt
while read line; do
f=`echo "$line"`
#echo "$f"
#echo "$f" | grep -c all_380_files4exp.txt
#f11=`expr '"'"$f1"'"'`
#`expr "$f1" : '.*_\(.*\)_'`
#`expr "$f1" : '.*_\(.*\)_'`
#echo "$f11"
#pn="$f1"
#grep -c "S2-LINE TRIAL-1-1" all_380_files4exp.txt
num=`grep -c "$f" 380_exp_files.txt`
#grep -c "$f" all_380_files4exp_2.txt
#grep "$f" 380_pedigree_file.txt
#echo -e "$f\t$num"
ReExfile=(`grep "$f" 380_exp_files.txt`)
#echo ${ReExfile[0]}
#echo ${ReExfile[1]}
#for (( i = 0 ; i < ${#ReExfile[@]} ; i++ ))
#do
#echo "Element [$i]: ${ReExfile[$i]}"
##wc -l "${ReExfile[$i]}"
#fi
<<REP2
if [[ "${#ReExfile[@]}" -eq 2 ]]
then
echo -e "$f\t$num"
#echo "${ReExfile[1]}"
#echo "${ReExfile[2]}"
diff <(cut -f5 "${ReExfile[0]}") <(cut -f5 "${ReExfile[1]}")
diff <(cut -f14 "${ReExfile[0]}") <(cut -f14 "${ReExfile[1]}")
diff <(cut -f59 "${ReExfile[0]}") <(cut -f59 "${ReExfile[1]}")
diff <(cut -f60 "${ReExfile[0]}") <(cut -f60 "${ReExfile[1]}")
fi
REP2
<<REP3
if [[ "${#ReExfile[@]}" -eq 3 ]]
then
echo -e "$f\t$num"
#echo "${ReExfile[1]}"
#echo "${ReExfile[2]}"
diffuse -D <(cut -f5 "${ReExfile[0]}") <(cut -f5 "${ReExfile[1]}") <(cut -f5 "${ReExfile[2]}")
diffuse -D <(cut -f14 "${ReExfile[0]}") <(cut -f14 "${ReExfile[1]}") <(cut -f14 "${ReExfile[2]}")
diffuse -D <(cut -f59 "${ReExfile[0]}") <(cut -f59 "${ReExfile[1]}") <(cut -f59 "${ReExfile[2]}")
diffuse -D <(cut -f60 "${ReExfile[0]}") <(cut -f60 "${ReExfile[1]}") <(cut -f60 "${ReExfile[2]}")
fi
REP3
if [[ "${#ReExfile[@]}" -eq 4 ]]
then
echo -e "$f\t$num"
#echo "${ReExfile[1]}"
#echo "${ReExfile[2]}"
diffuse -D <(cut -f5 "${ReExfile[0]}") <(cut -f5 "${ReExfile[1]}") <(cut -f5 "${ReExfile[2]}") <(cut -f5 "${ReExfile[3]}")
diffuse -D <(cut -f14 "${ReExfile[0]}") <(cut -f14 "${ReExfile[1]}") <(cut -f14 "${ReExfile[2]}") <(cut -f14 "${ReExfile[3]}")
diffuse -D <(cut -f59 "${ReExfile[0]}") <(cut -f59 "${ReExfile[1]}") <(cut -f59 "${ReExfile[2]}") <(cut -f59 "${ReExfile[3]}")
diffuse -D <(cut -f60 "${ReExfile[0]}") <(cut -f60 "${ReExfile[1]}") <(cut -f60 "${ReExfile[2]}") <(cut -f60 "${ReExfile[3]}")
fi
if [[ "${#ReExfile[@]}" -gt 4 ]]
then
echo -e "$f\t$num"
#echo "${ReExfile[1]}"
#echo "${ReExfile[2]}"
diffuse -D <(cut -f5 "${ReExfile[0]}") <(cut -f5 "${ReExfile[1]}") <(cut -f5 "${ReExfile[2]}") <(cut -f5 "${ReExfile[3]}") <(cut -f5 "${ReExfile[4]}") <(cut -f5 "${ReExfile[5]}")
diffuse -D <(cut -f14 "${ReExfile[0]}") <(cut -f14 "${ReExfile[1]}") <(cut -f14 "${ReExfile[2]}") <(cut -f14 "${ReExfile[3]}") <(cut -f14 "${ReExfile[4]}") <(cut -f14 "${ReExfile[5]}")
diffuse -D <(cut -f59 "${ReExfile[0]}") <(cut -f59 "${ReExfile[1]}") <(cut -f59 "${ReExfile[2]}") <(cut -f59 "${ReExfile[3]}") <(cut -f59 "${ReExfile[4]}") <(cut -f59 "${ReExfile[5]}")
diffuse -D <(cut -f60 "${ReExfile[0]}") <(cut -f60 "${ReExfile[1]}") <(cut -f60 "${ReExfile[2]}") <(cut -f60 "${ReExfile[3]}") <(cut -f60 "${ReExfile[4]}") <(cut -f60 "${ReExfile[5]}")
fi
#grep -c "$f" test2.txt
#grep -c "$f" test3.txt
#grep -c '$f' test.txt
#grep "" all_380_files4exp.txt
#ReExfile=(`grep "$f" 380_pedigree_file.txt`)
#echo ${ReExfile[*]} | diffuse -D
#echo ${#ReExfile[@]}
#diffuse -D ${ReExfile[*]}
#diffuse -D single_trial_ILS2-TEST-1_rep10_sorted_pedigree.csv single_trial_ILS2-TEST-1_rep11_sorted_pedigree.csv single_trial_ILS2-TEST-1_rep12_sorted_pedigree.csv single_trial_ILS2-TEST-1_rep13_sorted_ped#igree.csv
#for (( i = 0 ; i < ${#ReExfile[@]} ; i++ ))
#do
#echo "Element [$i]: ${ReExfile[$i]}"
#wc -l "${ReExfile[$i]}"
#fn=`echo "${ReExfile[$i]}" | awk -F . '{print $1}'`
#echo "$fn"
#cut -f13 "${ReExfile[$i]}" > "$fn"_pedigree.csv
#cut -f13 single_trial_3WHYB-2010-15-1_rep1_2.csv > single_trial_3WHYB-2010-15-1_rep1_2_pedigree.csv
#cut -f58 "${ReExfile[$i]}" > "$fn"_Line.csv
#cut -f58 single_trial_3WHYB-2010-15-1_rep1_2.csv > single_trial_3WHYB-2010-15-1_rep1_2_Line.csv
#cut -f59 "${ReExfile[$i]}" > "$fn"_Tester.csv
#cut -f59 single_trial_3WHYB-2010-15-1_rep1_2.csv > single_trial_3WHYB-2010-15-1_rep1_2_Tester.csv
#pedigree=(`tail -n +3 "${ReExfile[$i]}" | cut -f13`)
#echo ${#pedigree[@]}
#LineName=(`tail -n +3 "${ReExfile[$i]}" | cut -f58`)
#echo ${#LineName[@]}
#Tester=(`tail -n +3 "${ReExfile[$i]}" | cut -f59`)
#echo ${#Tester[@]}
#cut -f13 "${ReExfile[$i]}"
#cut -f58 "${ReExfile[$i]}"
#cut -f59 "${ReExfile[$i]}"
#echo "Element [$i]: ${acc[$i]}"
#if echo ${ped_acc[$i]} | grep -qi 'tester'
#then
#echo -e "${ped_acc[$i]}\t${ped[$i]}" >> single_trial_3WHYB-2010-15-1_pedigree.txt
#fi
#acc_with_tester[$i]=${acc1[$i]}_tester_${acc2[$1]}_${acc3[$1]}
#acc_with_tester=(${acc_with_tester[@]} ${acc1[$i]}_tester_${acc2[$1]}_${acc3[$1]})
#done
#tail -n +3 "$f1"| cut -f4 | sort -u
#grep "$f1" makumbi.pheno.154trials_CO.csv | wc -l
#(head -2 makumbi.pheno.154trials_CO.csv && grep "$f1" makumbi.pheno.154trials_CO.csv) > single_trial_"$f1".csv
#~/code/code_bash_shell/process_each_trial_for_vahid_data2.sh "$f1"
#pop_name=`echo "$f1"| cut -c 12-17`
#echo -e "\n";
done < $1
|
solgenomics/zeabase
|
code_bash_shell/Count_plot_data_match_to_trial_name.sh
|
Shell
|
artistic-2.0
| 5,089 |
#!/bin/bash
source ./setenv.sh
FILENAME=dumb-0.9.3
EXTENSION=tar.gz
wget -c http://downloads.sourceforge.net/project/dumb/dumb/0.9.3/dumb-0.9.3.tar.gz -O ../$FILENAME.$EXTENSION
tar -zxf ../$FILENAME.$EXTENSION
cd $FILENAME
patch -p0 < ../../../patches/armv6/libdumb-0.9.3.patch
make
make install
|
humble/ags-geminirue
|
iOS/buildlibs/armv6/dumb.sh
|
Shell
|
artistic-2.0
| 304 |
#!/bin/sh
#
# arch/ppc64/boot/install.sh
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1995 by Linus Torvalds
#
# Blatantly stolen from in arch/i386/boot/install.sh by Dave Hansen
#
# "make install" script for ppc64 architecture
#
# Arguments:
# $1 - kernel version
# $2 - kernel image file
# $3 - kernel map file
# $4 - default install path (blank if root directory)
# $5 - kernel boot file, the zImage
#
# User may have a custom install script
if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi
if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi
# Default install
# this should work for both the pSeries zImage and the iSeries vmlinux.sm
image_name=`basename $5`
if [ -f $4/$image_name ]; then
mv $4/$image_name $4/$image_name.old
fi
if [ -f $4/System.map ]; then
mv $4/System.map $4/System.old
fi
cat $2 > $4/$image_name
cp $3 $4/System.map
|
ipwndev/DSLinux-Mirror
|
linux-2.6.x/arch/ppc64/boot/install.sh
|
Shell
|
gpl-2.0
| 1,110 |
echo Filtering Debug Code out of DHTML Player...
echo KeynoteDHTMLPlayer...
cat KeynoteDHTMLPlayer.js | sed -f FilterDebugCode.sed > KeynoteDHTMLPlayer.release.js
rm KeynoteDHTMLPlayer.js
mv KeynoteDHTMLPlayer.release.js KeynoteDHTMLPlayer.js
echo OrientationController...
cat OrientationController.js | sed -f FilterDebugCode.sed > OrientationController.release.js
rm OrientationController.js
mv OrientationController.release.js OrientationController.js
echo TouchController...
cat TouchController.js | sed -f FilterDebugCode.sed > TouchController.release.js
rm TouchController.js
mv TouchController.release.js TouchController.js
echo TextureManager...
cat TextureManager.js | sed -f FilterDebugCode.sed > TextureManager.release.js
rm TextureManager.js
mv TextureManager.release.js TextureManager.js
echo StageManager...
cat StageManager.js | sed -f FilterDebugCode.sed > StageManager.release.js
rm StageManager.js
mv StageManager.release.js StageManager.js
echo ShowController...
cat ShowController.js | sed -f FilterDebugCode.sed > ShowController.release.js
rm ShowController.js
mv ShowController.release.js ShowController.js
echo ScriptManager...
cat ScriptManager.js | sed -f FilterDebugCode.sed > ScriptManager.release.js
rm ScriptManager.js
mv ScriptManager.release.js ScriptManager.js
echo DisplayManager...
cat DisplayManager.js | sed -f FilterDebugCode.sed > DisplayManager.release.js
rm DisplayManager.js
mv DisplayManager.release.js DisplayManager.js
echo AnimationManager...
cat AnimationManager.js | sed -f FilterDebugCode.sed > AnimationManager.release.js
rm AnimationManager.js
mv AnimationManager.release.js AnimationManager.js
echo Done.
|
gwright/acts_as_bitemporal
|
slides/assets/player/FilterDebugCode.bash
|
Shell
|
mit
| 1,666 |
#!/bin/sh
./crtmpserver --daemon ./flvplayback.lua
|
OpenQCam/qcam
|
trunk/configs/run.sh
|
Shell
|
gpl-3.0
| 53 |
#!/bin/bash
mkdir -p /etc/dockercloud/agent/
if [ ! -f "/etc/dockercloud/agent/dockercloud-agent.conf" ]; then
cat > /etc/dockercloud/agent/dockercloud-agent.conf <<EOF
{
"CertCommonName":"${CERT_COMMON_NAME}",
"DockerHost":"${DOCKER_HOST_LISTEN}",
"Host":"${HOST}",
"Token":"${TOKEN}",
"UUID":"${UUID}"
}
EOF
fi
echo "Using dockercloud-agent.conf:"
cat /etc/dockercloud/agent/dockercloud-agent.conf
mkdir -p /var/log/dockercloud && touch /var/log/dockercloud/docker.log && tail -F /var/log/dockercloud/docker.log &
exec /usr/bin/dockercloud-agent -stdout "$@"
|
docker/dockercloud-node
|
staging/run.sh
|
Shell
|
apache-2.0
| 587 |
#!/bin/bash
# make sure we do not abort on invalid parameter (we
# once had this problem)
# addd 2016-03-03 by RGerhards, released under ASL 2.0
echo \[glbl-invld-param\]:
. $srcdir/diag.sh init
. $srcdir/diag.sh generate-conf
. $srcdir/diag.sh add-conf '
global(invalid="off")
global(debug.unloadModules="invalid")
action(type="omfile" file="rsyslog.out.log")
'
. $srcdir/diag.sh startup
sleep 1
. $srcdir/diag.sh shutdown-when-empty
. $srcdir/diag.sh wait-shutdown
# if we reach this point, we consider this a success.
. $srcdir/diag.sh exit
|
madedotcom/rsyslog
|
tests/glbl-invld-param.sh
|
Shell
|
gpl-3.0
| 546 |
#!/bin/sh
nsenter -t1 -m -- /usr/bin/bpftrace -l
if [ "$?" -ne "0" ]; then
printf "bpftrace test suite FAILED\n" >&1
exit 1
fi;
printf "bpftrace test suite PASSED\n" >&1
|
deitch/linuxkit
|
test/cases/040_packages/002_bpftrace/check.sh
|
Shell
|
apache-2.0
| 173 |
#!/bin/sh
LC_ALL=C
export LC_ALL
datadir="tests/data"
logfile="$datadir/copy.regression"
reffile="$1"
list=$(grep -oh ' ./tests/data/.*' tests/ref/{acodec,lavf,vsynth1}/*| sort)
rm -f $logfile
for i in $list ; do
echo ---------------- >> $logfile
echo $i >> $logfile
./ffmpeg_g -flags +bitexact -i $i -acodec copy -vcodec copy -y first.nut
./ffmpeg_g -flags +bitexact -i first.nut -acodec copy -vcodec copy -y second.nut
cmp first.nut second.nut >> $logfile
md5sum first.nut >> $logfile
done
if diff -u -w "$reffile" "$logfile" ; then
echo
echo copy regression test: success
exit 0
else
echo
echo copy regression test: error
exit 1
fi
|
xbmc/xbmc-antiquated
|
xbmc/cores/dvdplayer/Codecs/ffmpeg/tests/copycooker.sh
|
Shell
|
gpl-2.0
| 688 |
#
# $Id: net.east.sh,v 1.2 2001/10/14 00:09:35 mcr Exp $
#
if [ -n "$UML_private_CTL" ]
then
net_eth0="eth0=daemon,10:00:00:dc:bc:ff,unix,$UML_private_CTL,$UML_private_DATA";
else
net_eth0="eth0=mcast,10:00:00:dc:bc:ff,239.192.0.1,21200"
fi
if [ -n "$UML_public_CTL" ]
then
net_eth1="eth1=daemon,10:00:00:64:64:23,unix,$UML_public_CTL,$UML_public_DATA";
else
net_eth1="eth1=mcast,10:00:00:64:64:23,239.192.1.2,31200";
fi
net="$net_eth0 $net_eth1"
|
rhuitl/uClinux
|
freeswan/testing/baseconfigs/net.east.sh
|
Shell
|
gpl-2.0
| 469 |
#!/usr/bin/env bash
set -e
cd "$(dirname "$BASH_SOURCE")/.."
rm -rf vendor/
source 'hack/.vendor-helpers.sh'
# the following lines are in sorted order, FYI
clone git github.com/Azure/go-ansiterm 70b2c90b260171e829f1ebd7c17f600c11858dbe
clone git github.com/Sirupsen/logrus v0.8.2 # logrus is a common dependency among multiple deps
clone git github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
clone git github.com/go-check/check 64131543e7896d5bcc6bd5a76287eb75ea96c673
clone git github.com/gorilla/context 14f550f51a
clone git github.com/gorilla/mux e444e69cbd
clone git github.com/kr/pty 5cf931ef8f
clone git github.com/mattn/go-sqlite3 b4142c444a8941d0d92b0b7103a24df9cd815e42
clone git github.com/microsoft/hcsshim 7f646aa6b26bcf90caee91e93cde4a80d0d8a83e
clone git github.com/mistifyio/go-zfs v2.1.1
clone git github.com/tchap/go-patricia v2.1.0
clone git golang.org/x/net 3cffabab72adf04f8e3b01c5baf775361837b5fe https://github.com/golang/net.git
clone hg code.google.com/p/gosqlite 74691fb6f837
#get libnetwork packages
clone git github.com/docker/libnetwork 22dc04d06067b40a9e7ef575aee6d1bb69d4dcc3
clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b
clone git github.com/hashicorp/memberlist 9a1e242e454d2443df330bdd51a436d5a9058fc4
clone git github.com/hashicorp/serf 7151adcef72687bf95f451a2e0ba15cb19412bf2
clone git github.com/docker/libkv 60c7c881345b3c67defc7f93a8297debf041d43c
clone git github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25
clone git github.com/vishvananda/netlink 4b5dce31de6d42af5bb9811c6d265472199e0fec
clone git github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
clone git github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374
clone git github.com/coreos/go-etcd v2.0.0
clone git github.com/hashicorp/consul v0.5.2
# get graph and distribution packages
clone git github.com/docker/distribution ec87e9b6971d831f0eff752ddb54fb64693e51cd # docker/1.8 branch
clone git github.com/vbatts/tar-split v0.9.6
clone git github.com/docker/notary 8e8122eb5528f621afcd4e2854c47302f17392f7
clone git github.com/endophage/gotuf a592b03b28b02bb29bb5878308fb1abed63383b5
clone git github.com/tent/canonical-json-go 96e4ba3a7613a1216cbd1badca4efe382adea337
clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
clone git github.com/opencontainers/runc v0.0.3 # libcontainer
# libcontainer deps (see src/github.com/docker/libcontainer/update-vendor.sh)
clone git github.com/coreos/go-systemd v3
clone git github.com/godbus/dbus v2
clone git github.com/syndtr/gocapability 66ef2aa7a23ba682594e2b6f74cf40c0692b49fb
clone git github.com/golang/protobuf 655cdfa588ea
clone git github.com/Graylog2/go-gelf 6c62a85f1d47a67f2a5144c0e745b325889a8120
clone git github.com/fluent/fluent-logger-golang v1.0.0
# fluent-logger-golang deps
clone git github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa
clone git github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c
# fsnotify
clone git gopkg.in/fsnotify.v1 v1.2.0
clean
|
matrix-stone/docker
|
hack/vendor.sh
|
Shell
|
apache-2.0
| 3,158 |
# This hook extracts $distfiles into $XBPS_BUILDDIR if $distfiles and $checksum
# variables are set.
hook() {
local srcdir="$XBPS_SRCDISTDIR/$pkgname-$version"
if [ -z "$distfiles" -a -z "$checksum" ]; then
mkdir -p $wrksrc
return 0
fi
# Check that distfiles are there before anything else.
for f in ${distfiles}; do
curfile=$(basename "${f#*>}")
if [ ! -f $srcdir/$curfile ]; then
msg_error "$pkgver: cannot find ${curfile}, use 'xbps-src fetch' first.\n"
fi
done
if [ -n "$create_wrksrc" ]; then
mkdir -p ${wrksrc} || msg_error "$pkgver: failed to create wrksrc.\n"
fi
msg_normal "$pkgver: extracting distfile(s), please wait...\n"
for f in ${distfiles}; do
curfile=$(basename "${f#*>}")
for j in ${skip_extraction}; do
if [ "$curfile" = "$j" ]; then
found=1
break
fi
done
if [ -n "$found" ]; then
unset found
continue
fi
case $curfile in
*.tar.lzma) cursufx="txz";;
*.tar.lz) cursufx="tlz";;
*.tlz) cursufx="tlz";;
*.tar.xz) cursufx="txz";;
*.txz) cursufx="txz";;
*.tar.bz2) cursufx="tbz";;
*.tbz) cursufx="tbz";;
*.tar.gz) cursufx="tgz";;
*.tgz) cursufx="tgz";;
*.gz) cursufx="gz";;
*.bz2) cursufx="bz2";;
*.tar) cursufx="tar";;
*.zip) cursufx="zip";;
*.rpm) cursufx="rpm";;
*.patch) cursufx="pch";;
*) msg_error "$pkgver: unknown distfile suffix for $curfile.\n";;
esac
if [ -n "$create_wrksrc" ]; then
extractdir="$wrksrc"
else
extractdir="$XBPS_BUILDDIR"
fi
case ${cursufx} in
txz|tbz|tlz|tgz)
tar -x --no-same-permissions --no-same-owner -f $srcdir/$curfile -C $extractdir
if [ $? -ne 0 ]; then
msg_error "$pkgver: extracting $curfile into $XBPS_BUILDDIR.\n"
fi
;;
gz|bz2)
cp -f $srcdir/$curfile $extractdir
if [ "$cursufx" = "gz" ]; then
cd $extractdir && gunzip $curfile
else
cd $extractdir && bunzip2 $curfile
fi
;;
tar)
tar -x --no-same-permissions --no-same-owner -f $srcdir/$curfile -C $extractdir
if [ $? -ne 0 ]; then
msg_error "$pkgver: extracting $curfile into $XBPS_BUILDDIR.\n"
fi
;;
zip)
if command -v unzip &>/dev/null; then
unzip -o -q $srcdir/$curfile -d $extractdir
if [ $? -ne 0 ]; then
msg_error "$pkgver: extracting $curfile into $XBPS_BUILDDIR.\n"
fi
else
msg_error "$pkgver: cannot find unzip bin for extraction.\n"
fi
;;
rpm)
if command -v rpmextract &>/dev/null; then
cd $extractdir
rpmextract $srcdir/$curfile
if [ $? -ne 0 ]; then
msg_error "$pkgver: extracting $curfile into $XBPS_BUILDDIR.\n"
fi
else
msg_error "$pkgver: cannot find rpmextract for extraction.\n"
fi
;;
*)
msg_error "$pkgver: cannot guess $curfile extract suffix. ($cursufx)\n"
;;
esac
done
}
|
ylixir/void-packages
|
common/hooks/do-extract/00-distfiles.sh
|
Shell
|
bsd-2-clause
| 2,841 |
#!/bin/bash -l
# The settings come from ~/.bash_profile
rm -rf *.root *.dat *.log fort* hlt hough raw* recraw/*.root recraw/*.log GRP *.ps
aliroot -b -q sim.C 2>&1 | tee sim.log
mv syswatch.log simwatch.log
aliroot -b -q rec.C 2>&1 | tee rec.log
mv syswatch.log recwatch.log
aliroot -b -q ${ALICE_ROOT}/STEER/macros/CheckESD.C 2>&1 | tee check.log
aliroot -b -q aod.C 2>&1 | tee aod.log
|
jgrosseo/AliRoot
|
test/generators/herwig/runtest.sh
|
Shell
|
bsd-3-clause
| 402 |
#!/bin/sh
echo $1 > $1
|
Dataman-Cloud/drone
|
vendor/github.com/go-swagger/go-swagger/fixtures/templates/gen_templates.sh
|
Shell
|
apache-2.0
| 23 |
#!/bin/sh
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script generates two chains of test certificates:
#
# 1. A (end-entity) -> B -> C -> D (self-signed root)
# 2. A (end-entity) -> B -> C2 (self-signed root)
#
# in which A, B, C, and D have distinct keypairs. C2 is a self-signed root
# certificate that uses the same keypair as C.
#
# We use these cert chains in
# SSLClientSocketTest.VerifyReturnChainProperlyOrdered to ensure that
# SSLInfo objects see the certificate chain as validated rather than as
# served by the server. The server serves chain 1. The client has C2, NOT D,
# installed as a trusted root. Therefore, the chain will validate as chain
# 2, even though the server served chain 1.
try () {
echo "$@"
"$@" || exit 1
}
try rm -rf out
try mkdir out
echo Create the serial number files.
serial=1000
for i in B C C2 D
do
try /bin/sh -c "echo $serial > out/$i-serial"
serial=$(expr $serial + 1)
done
echo Generate the keys.
try openssl genrsa -out out/A.key 2048
try openssl genrsa -out out/B.key 2048
try openssl genrsa -out out/C.key 2048
try openssl genrsa -out out/D.key 2048
echo Generate the D CSR.
CA_COMMON_NAME="D Root CA" \
CERTIFICATE=D \
try openssl req \
-new \
-key out/D.key \
-out out/D.csr \
-config redundant-ca.cnf
echo D signs itself.
CA_COMMON_NAME="D Root CA" \
try openssl x509 \
-req -days 3650 \
-in out/D.csr \
-extensions ca_cert \
-extfile redundant-ca.cnf \
-signkey out/D.key \
-out out/D.pem \
-text
echo Generate the C2 root CSR.
CA_COMMON_NAME="C CA" \
CERTIFICATE=C2 \
try openssl req \
-new \
-key out/C.key \
-out out/C2.csr \
-config redundant-ca.cnf
echo C2 signs itself.
CA_COMMON_NAME="C CA" \
try openssl x509 \
-req -days 3650 \
-in out/C2.csr \
-extensions ca_cert \
-extfile redundant-ca.cnf \
-signkey out/C.key \
-out out/C2.pem \
-text
echo Generate the B and C intermediaries\' CSRs.
for i in B C
do
name="$i Intermediate CA"
CA_COMMON_NAME="$i CA" \
CERTIFICATE=$i \
try openssl req \
-new \
-key out/$i.key \
-out out/$i.csr \
-config redundant-ca.cnf
done
echo D signs the C intermediate.
# Make sure the signer's DB file exists.
touch out/D-index.txt
CA_COMMON_NAME="D Root CA" \
CERTIFICATE=D \
try openssl ca \
-batch \
-extensions ca_cert \
-in out/C.csr \
-out out/C.pem \
-config redundant-ca.cnf
echo C signs the B intermediate.
touch out/C-index.txt
CA_COMMON_NAME="C CA" \
CERTIFICATE=C \
try openssl ca \
-batch \
-extensions ca_cert \
-in out/B.csr \
-out out/B.pem \
-config redundant-ca.cnf
echo Generate the A end-entity CSR.
try openssl req \
-new \
-key out/A.key \
-out out/A.csr \
-config ee.cnf
echo B signs A.
touch out/B-index.txt
CA_COMMON_NAME="B CA" \
CERTIFICATE=B \
try openssl ca \
-batch \
-extensions user_cert \
-in out/A.csr \
-out out/A.pem \
-config redundant-ca.cnf
# EmbeddedTestServer only supports PKCS#8 format.
try openssl pkcs8 -topk8 -nocrypt -in out/A.key -out out/A-pkcs8.key
echo Create redundant-server-chain.pem
try /bin/sh -c "cat out/A-pkcs8.key out/A.pem out/B.pem out/C.pem out/D.pem \
> ../certificates/redundant-server-chain.pem"
echo Create redundant-validated-chain.pem
try /bin/sh -c "cat out/A-pkcs8.key out/A.pem out/B.pem out/C2.pem \
> ../certificates/redundant-validated-chain.pem"
echo Create redundant-validated-chain-root.pem
try cp out/C2.pem ../certificates/redundant-validated-chain-root.pem
|
nwjs/chromium.src
|
net/data/ssl/scripts/generate-redundant-test-chains.sh
|
Shell
|
bsd-3-clause
| 3,713 |
tmplang="$LANG"
test "$LC_MESSAGES" != "" && tmplang="$LC_MESSAGES"
test "$LC_ALL" != "" && tmplang="$LC_ALL"
test "$LANGUAGE" != "" && tmplang="$LANGUAGE"
lang=`echo $tmplang|cut -d "_" -f 1`
case $lang in
en)
lang=gb
echo $tmplang | grep en_US &>/dev/null && lang=en
;;
de|fr|it|pt|es|se)
;;
*)
lang=gb
;;
esac
echo $lang
|
cyrevolt/gentoo
|
games-rpg/bass/files/scummvmGetLang.sh
|
Shell
|
gpl-2.0
| 358 |
#!/bin/bash
# everything is performed in the destination directory
# this script has to be run by addsonpatch.sh
# do not execut it manually
cd "$destination"
if [ "$#" -eq 0 ];
then
echo "[ USAGE :"
echo "./install/addsonpatch.sh ADDSON_NAME WHERE_SOURCE WHERE_LINKS (-patch) (-revert) "
echo " addsonpatch.sh has to be run from the Quantum ESPRESSO root directory"
echo "WHERE_SOURCE is the relative path to the sources of the Addson code "
echo "WHERE_LINKS is the relative path to the QE directory where the addson sources have to be linked"
echo "at the moment it only allows for pure f90 routines to be linked in flib"
echo "or pure f90 modules to be linked in Modules"
echo " -patch : apply patch to Makefiles "
echo " -revert : revert Makefiles to original "
echo " ]"
exit
fi
case "$4" in
(-patch)
echo "* I will try to patch needed files for integrated compilation ..."
if test -e "${ADDSON_NAME}_PATCH" ; then
echo "-- File $destination/${ADDSON_NAME}_PATCH exists"
echo "-- I guess you have already patched $ADDSON_NAME"
echo "-- Please unpatch it first, or start from a clean source tree"
echo "-- See you later..."
echo "* ABORT"
exit
fi
echo "#Please do not remove or modify this file" > ${ADDSON_NAME}_PATCH
echo "#It is keeps track of the steps for patching $ADDSON package" >> ${ADDSON_NAME}_PATCH
#-------------------
echo "-- Executing pre script"
command -v patch &>/dev/null || { echo "I require patch command but it's not installed. Aborting." >&2; exit 1; }
#------------------- check if GNU patch works
cat > test_patch1 << \EOF
alfa
beta
EOF
cat > test_patch2 << \EOF
alfa
gamma
EOF
cat > test_patch3 << \EOF_EOF
patch -c -l -b -F 3 --suffix=.pre "./test_patch1" << \EOF
EOF_EOF
diff -c test_patch1 test_patch2 >> test_patch3
echo EOF >> test_patch3
bash test_patch3 &> test_patch4
status=$?
if [ $status -ne 0 ]
then
echo "patch does not work! Error message:"
echo "**********"
cat test_patch4
echo "**********"
echo "Please install a recent version of the GNU patch utility and try again."
exit
fi
rm test_patch1 test_patch2 test_patch3 test_patch4
if [ -e test_patch1.pre ]
then
rm test_patch1.pre
fi
#-------------------------------------------
command -v sed &>/dev/null || { echo "I require sed command but it's not installed. Aborting." >&2; exit 1; }
#------------------- check if GNU sed works
cat > test_sed1 << \EOF
alfa
beta
EOF
cat > test_sed2 << \EOF
alfa
gamma
beta
EOF
sed '/alfa/ a\
gamma' test_sed1 > tmp.1
mv tmp.1 test_sed1
diff -c test_sed1 test_sed2 >> test_sed3
# echo EOF >> test_sed3
bash test_sed3 &> test_sed4
status=$?
if [ $status -ne 0 ]
then
echo "sed does not work! Error message:"
echo "**********"
cat test_sed4
echo "**********"
echo "Please install a recent version of the GNU sed utility and try again."
exit
fi
rm test_sed1 test_sed2 test_sed3 test_sed4
# -----------------------------------------
# -----------------------------------------
to_do_before_patch
echo "-- Setting up symlinks"
for file in $destination/$LINKED_FILES ; do
base="${file##*/}"
if test -e $destination/$WHERE_LINKS/$base ; then
echo "PATCH ERROR: file $base is already in $WHERE_LINKS"
exit 1
fi
# echo "$destination/$WHERE_LINKS/$base"
ln -s $file $destination/$WHERE_LINKS/$base
done
tmp_var=\$\(${ADDSON_NAME}_OBJECTS\)
echo "-- modifying $WHERE_LINKS/Makefile"
sed < $destination/$WHERE_LINKS/Makefile.pre$ADDSON_NAME > $destination/$WHERE_LINKS/tmp.1 '/make.inc/ a\
include '"${ADDSON_NAME}"'.inc \
'
sed < $destination/$WHERE_LINKS/tmp.1 > $destination/$WHERE_LINKS/Makefile '/= \\/ a\
'"${tmp_var}"' \\'
rm $destination/$WHERE_LINKS/tmp.1
echo "-- Executing post script"
to_do_after_patch
echo "- DONE!"
;;
(-revert)
echo "* I will try to revert ..."
echo "-- Executing pre script"
to_do_before_revert
echo "-- Removing symlinks"
for file in $destination/$LINKED_FILES ; do
base="${file##*/}"
if test -e $destination/$WHERE_LINKS/$base ; then \
# echo "$destination/$WHERE_LINKS/$base" ; \
rm $destination/$WHERE_LINKS/$base ; \
else
echo "where_links base: $destination/$WHERE_LINKS/$base"
echo "PATCH WARNING: file $base is not in $destination/$WHERE_LINKS"
fi
done
echo "-- Restoring .pre$ADDSON_NAME files"
PREADDSON=$(find . -name "*.pre*")
if ! test "$PREADDSON" ; then
echo "-- I cannot find any .pre$ADDSON_NAME file"
echo "* ABORT"
exit
fi
rm ${ADDSON_NAME}_PATCH
echo "-- Executing post script"
to_do_after_revert
echo "* DONE!"
;;
(*)
echo "Missing input argument"
esac
|
QEF/q-e
|
install/addsontool.sh
|
Shell
|
gpl-2.0
| 4,825 |
# Copyright (C) 2010 One Laptop Per Child
# Licensed under the terms of the GNU GPL v2 or later; see COPYING for details.
. $OOB__shlib
idle_suspend_enabled=$(read_config powerd enable_idle_suspend)
if [[ "$idle_suspend_enabled" != 1 ]]; then
echo "touch /etc/powerd/flags/inhibit-suspend"
fi
|
dnarvaez/olpc-os-builder
|
modules/powerd/kspost.50.autosuspend.sh
|
Shell
|
gpl-2.0
| 297 |
#!/bin/bash
$PYTHON -m pip install . --no-deps --ignore-installed -vv
|
cokelaer/bioconda-recipes
|
recipes/strawc/build.sh
|
Shell
|
mit
| 71 |
# ltmain.sh - Provide generalized library-building support services.
# NOTE: Changing this file will not affect anything until you rerun configure.
#
# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004
# Free Software Foundation, Inc.
# Originally by Gordon Matzigkeit <[email protected]>, 1996
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
basename="s,^.*/,,g"
# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh
# is ksh but when the shell is invoked as "sh" and the current value of
# the _XPG environment variable is not equal to 1 (one), the special
# positional parameter $0, within a function call, is the name of the
# function.
progpath="$0"
# The name of this program:
progname=`echo "$progpath" | $SED $basename`
modename="$progname"
# Global variables:
EXIT_SUCCESS=0
EXIT_FAILURE=1
PROGRAM=ltmain.sh
PACKAGE=libtool
VERSION=1.5.6
TIMESTAMP=" (1.1220.2.95 2004/04/11 05:50:42) Debian$Rev: 215 $"
# Check that we have a working $echo.
if test "X$1" = X--no-reexec; then
# Discard the --no-reexec flag, and continue.
shift
elif test "X$1" = X--fallback-echo; then
# Avoid inline document here, it may be left over
:
elif test "X`($echo '\t') 2>/dev/null`" = 'X\t'; then
# Yippee, $echo works!
:
else
# Restart under the correct shell, and then maybe $echo will work.
exec $SHELL "$progpath" --no-reexec ${1+"$@"}
fi
if test "X$1" = X--fallback-echo; then
# used as fallback echo
shift
cat <<EOF
$*
EOF
exit $EXIT_SUCCESS
fi
default_mode=
help="Try \`$progname --help' for more information."
magic="%%%MAGIC variable%%%"
mkdir="mkdir"
mv="mv -f"
rm="rm -f"
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
Xsed="${SED}"' -e 1s/^X//'
sed_quote_subst='s/\([\\`\\"$\\\\]\)/\\\1/g'
# test EBCDIC or ASCII
case `echo A|tr A '\301'` in
A) # EBCDIC based system
SP2NL="tr '\100' '\n'"
NL2SP="tr '\r\n' '\100\100'"
;;
*) # Assume ASCII based system
SP2NL="tr '\040' '\012'"
NL2SP="tr '\015\012' '\040\040'"
;;
esac
# NLS nuisances.
# Only set LANG and LC_ALL to C if already set.
# These must not be set unconditionally because not all systems understand
# e.g. LANG=C (notably SCO).
# We save the old values to restore during execute mode.
if test "${LC_ALL+set}" = set; then
save_LC_ALL="$LC_ALL"; LC_ALL=C; export LC_ALL
fi
if test "${LANG+set}" = set; then
save_LANG="$LANG"; LANG=C; export LANG
fi
# Make sure IFS has a sensible default
: ${IFS="
"}
if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
$echo "$modename: not configured to build any kind of library" 1>&2
$echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
exit $EXIT_FAILURE
fi
# Global variables.
mode=$default_mode
nonopt=
prev=
prevopt=
run=
show="$echo"
show_help=
execute_dlfiles=
lo2o="s/\\.lo\$/.${objext}/"
o2lo="s/\\.${objext}\$/.lo/"
#####################################
# Shell function definitions:
# This seems to be the best place for them
# func_win32_libid arg
# return the library type of file 'arg'
#
# Need a lot of goo to handle *both* DLLs and import libs
# Has to be a shell function in order to 'eat' the argument
# that is supplied when $file_magic_command is called.
func_win32_libid () {
win32_libid_type="unknown"
win32_fileres=`file -L $1 2>/dev/null`
case $win32_fileres in
*ar\ archive\ import\ library*) # definitely import
win32_libid_type="x86 archive import"
;;
*ar\ archive*) # could be an import, or static
if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | \
$EGREP -e 'file format pe-i386(.*architecture: i386)?' >/dev/null ; then
win32_nmres=`eval $NM -f posix -A $1 | \
sed -n -e '1,100{/ I /{x;/import/!{s/^/import/;h;p;};x;};}'`
if test "X$win32_nmres" = "Ximport" ; then
win32_libid_type="x86 archive import"
else
win32_libid_type="x86 archive static"
fi
fi
;;
*DLL*)
win32_libid_type="x86 DLL"
;;
*executable*) # but shell scripts are "executable" too...
case $win32_fileres in
*MS\ Windows\ PE\ Intel*)
win32_libid_type="x86 DLL"
;;
esac
;;
esac
$echo $win32_libid_type
}
# func_infer_tag arg
# Infer tagged configuration to use if any are available and
# if one wasn't chosen via the "--tag" command line option.
# Only attempt this if the compiler in the base compile
# command doesn't match the default compiler.
# arg is usually of the form 'gcc ...'
func_infer_tag () {
if test -n "$available_tags" && test -z "$tagname"; then
CC_quoted=
for arg in $CC; do
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
CC_quoted="$CC_quoted $arg"
done
case $@ in
# Blanks in the command may have been stripped by the calling shell,
# but not from the CC environment variable when configure was run.
" $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*) ;;
# Blanks at the start of $base_compile will cause this to fail
# if we don't check for them as well.
*)
for z in $available_tags; do
if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then
# Evaluate the configuration.
eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`"
CC_quoted=
for arg in $CC; do
# Double-quote args containing other shell metacharacters.
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
CC_quoted="$CC_quoted $arg"
done
case "$@ " in
" $CC "* | "$CC "* | " `$echo $CC` "* | "`$echo $CC` "* | " $CC_quoted"* | "$CC_quoted "* | " `$echo $CC_quoted` "* | "`$echo $CC_quoted` "*)
# The compiler in the base compile command matches
# the one in the tagged configuration.
# Assume this is the tagged configuration we want.
tagname=$z
break
;;
esac
fi
done
# If $tagname still isn't set, then no tagged configuration
# was found and let the user know that the "--tag" command
# line option must be used.
if test -z "$tagname"; then
$echo "$modename: unable to infer tagged configuration"
$echo "$modename: specify a tag with \`--tag'" 1>&2
exit $EXIT_FAILURE
# else
# $echo "$modename: using $tagname tagged configuration"
fi
;;
esac
fi
}
# End of Shell function definitions
#####################################
# Darwin sucks
eval std_shrext=\"$shrext_cmds\"
# Parse our command line options once, thoroughly.
while test "$#" -gt 0
do
arg="$1"
shift
case $arg in
-*=*) optarg=`$echo "X$arg" | $Xsed -e 's/[-_a-zA-Z0-9]*=//'` ;;
*) optarg= ;;
esac
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
case $prev in
execute_dlfiles)
execute_dlfiles="$execute_dlfiles $arg"
;;
tag)
tagname="$arg"
preserve_args="${preserve_args}=$arg"
# Check whether tagname contains only valid characters
case $tagname in
*[!-_A-Za-z0-9,/]*)
$echo "$progname: invalid tag name: $tagname" 1>&2
exit $EXIT_FAILURE
;;
esac
case $tagname in
CC)
# Don't test for the "default" C tag, as we know, it's there, but
# not specially marked.
;;
*)
if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$" < "$progpath" > /dev/null; then
taglist="$taglist $tagname"
# Evaluate the configuration.
eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$tagname'$/,/^# ### END LIBTOOL TAG CONFIG: '$tagname'$/p' < $progpath`"
else
$echo "$progname: ignoring unknown tag $tagname" 1>&2
fi
;;
esac
;;
*)
eval "$prev=\$arg"
;;
esac
prev=
prevopt=
continue
fi
# Have we seen a non-optional argument yet?
case $arg in
--help)
show_help=yes
;;
--version)
$echo "$PROGRAM (GNU $PACKAGE) $VERSION$TIMESTAMP"
$echo
$echo "Copyright (C) 2003 Free Software Foundation, Inc."
$echo "This is free software; see the source for copying conditions. There is NO"
$echo "warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
exit $EXIT_SUCCESS
;;
--config)
${SED} -e '1,/^# ### BEGIN LIBTOOL CONFIG/d' -e '/^# ### END LIBTOOL CONFIG/,$d' $progpath
# Now print the configurations for the tags.
for tagname in $taglist; do
${SED} -n -e "/^# ### BEGIN LIBTOOL TAG CONFIG: $tagname$/,/^# ### END LIBTOOL TAG CONFIG: $tagname$/p" < "$progpath"
done
exit $EXIT_SUCCESS
;;
--debug)
$echo "$progname: enabling shell trace mode"
set -x
preserve_args="$preserve_args $arg"
;;
--dry-run | -n)
run=:
;;
--features)
$echo "host: $host"
if test "$build_libtool_libs" = yes; then
$echo "enable shared libraries"
else
$echo "disable shared libraries"
fi
if test "$build_old_libs" = yes; then
$echo "enable static libraries"
else
$echo "disable static libraries"
fi
exit $EXIT_SUCCESS
;;
--finish) mode="finish" ;;
--mode) prevopt="--mode" prev=mode ;;
--mode=*) mode="$optarg" ;;
--preserve-dup-deps) duplicate_deps="yes" ;;
--quiet | --silent)
show=:
preserve_args="$preserve_args $arg"
;;
--tag) prevopt="--tag" prev=tag ;;
--tag=*)
set tag "$optarg" ${1+"$@"}
shift
prev=tag
preserve_args="$preserve_args --tag"
;;
-dlopen)
prevopt="-dlopen"
prev=execute_dlfiles
;;
-*)
$echo "$modename: unrecognized option \`$arg'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
*)
nonopt="$arg"
break
;;
esac
done
if test -n "$prevopt"; then
$echo "$modename: option \`$prevopt' requires an argument" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# If this variable is set in any of the actions, the command in it
# will be execed at the end. This prevents here-documents from being
# left over by shells.
exec_cmd=
if test -z "$show_help"; then
# Infer the operation mode.
if test -z "$mode"; then
$echo "*** Warning: inferring the mode of operation is deprecated." 1>&2
$echo "*** Future versions of Libtool will require -mode=MODE be specified." 1>&2
case $nonopt in
*cc | cc* | *++ | gcc* | *-gcc* | g++* | xlc*)
mode=link
for arg
do
case $arg in
-c)
mode=compile
break
;;
esac
done
;;
*db | *dbx | *strace | *truss)
mode=execute
;;
*install*|cp|mv)
mode=install
;;
*rm)
mode=uninstall
;;
*)
# If we have no mode, but dlfiles were specified, then do execute mode.
test -n "$execute_dlfiles" && mode=execute
# Just use the default operation mode.
if test -z "$mode"; then
if test -n "$nonopt"; then
$echo "$modename: warning: cannot infer operation mode from \`$nonopt'" 1>&2
else
$echo "$modename: warning: cannot infer operation mode without MODE-ARGS" 1>&2
fi
fi
;;
esac
fi
# Only execute mode is allowed to have -dlopen flags.
if test -n "$execute_dlfiles" && test "$mode" != execute; then
$echo "$modename: unrecognized option \`-dlopen'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# Change the help message to a mode-specific one.
generic_help="$help"
help="Try \`$modename --help --mode=$mode' for more information."
# These modes are in order of execution frequency so that they run quickly.
case $mode in
# libtool compile mode
compile)
modename="$modename: compile"
# Get the compilation command and the source file.
base_compile=
srcfile="$nonopt" # always keep a non-empty value in "srcfile"
suppress_opt=yes
suppress_output=
arg_mode=normal
libobj=
later=
for arg
do
case "$arg_mode" in
arg )
# do not "continue". Instead, add this to base_compile
lastarg="$arg"
arg_mode=normal
;;
target )
libobj="$arg"
arg_mode=normal
continue
;;
normal )
# Accept any command-line options.
case $arg in
-o)
if test -n "$libobj" ; then
$echo "$modename: you cannot specify \`-o' more than once" 1>&2
exit $EXIT_FAILURE
fi
arg_mode=target
continue
;;
-static | -prefer-pic | -prefer-non-pic)
later="$later $arg"
continue
;;
-no-suppress)
suppress_opt=no
continue
;;
-Xcompiler)
arg_mode=arg # the next one goes into the "base_compile" arg list
continue # The current "srcfile" will either be retained or
;; # replaced later. I would guess that would be a bug.
-Wc,*)
args=`$echo "X$arg" | $Xsed -e "s/^-Wc,//"`
lastarg=
save_ifs="$IFS"; IFS=','
for arg in $args; do
IFS="$save_ifs"
# Double-quote args containing other shell metacharacters.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
lastarg="$lastarg $arg"
done
IFS="$save_ifs"
lastarg=`$echo "X$lastarg" | $Xsed -e "s/^ //"`
# Add the arguments to base_compile.
base_compile="$base_compile $lastarg"
continue
;;
* )
# Accept the current argument as the source file.
# The previous "srcfile" becomes the current argument.
#
lastarg="$srcfile"
srcfile="$arg"
;;
esac # case $arg
;;
esac # case $arg_mode
# Aesthetically quote the previous argument.
lastarg=`$echo "X$lastarg" | $Xsed -e "$sed_quote_subst"`
case $lastarg in
# Double-quote args containing other shell metacharacters.
# Many Bourne shells cannot handle close brackets correctly
# in scan sets, so we specify it separately.
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
lastarg="\"$lastarg\""
;;
esac
base_compile="$base_compile $lastarg"
done # for arg
case $arg_mode in
arg)
$echo "$modename: you must specify an argument for -Xcompile"
exit $EXIT_FAILURE
;;
target)
$echo "$modename: you must specify a target with \`-o'" 1>&2
exit $EXIT_FAILURE
;;
*)
# Get the name of the library object.
[ -z "$libobj" ] && libobj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%'`
;;
esac
# Recognize several different file suffixes.
# If the user specifies -o file.o, it is replaced with file.lo
xform='[cCFSifmso]'
case $libobj in
*.ada) xform=ada ;;
*.adb) xform=adb ;;
*.ads) xform=ads ;;
*.asm) xform=asm ;;
*.c++) xform=c++ ;;
*.cc) xform=cc ;;
*.ii) xform=ii ;;
*.class) xform=class ;;
*.cpp) xform=cpp ;;
*.cxx) xform=cxx ;;
*.f90) xform=f90 ;;
*.for) xform=for ;;
*.java) xform=java ;;
esac
libobj=`$echo "X$libobj" | $Xsed -e "s/\.$xform$/.lo/"`
case $libobj in
*.lo) obj=`$echo "X$libobj" | $Xsed -e "$lo2o"` ;;
*)
$echo "$modename: cannot determine name of library object from \`$libobj'" 1>&2
exit $EXIT_FAILURE
;;
esac
func_infer_tag $base_compile
for arg in $later; do
case $arg in
-static)
build_old_libs=yes
continue
;;
-prefer-pic)
pic_mode=yes
continue
;;
-prefer-non-pic)
pic_mode=no
continue
;;
esac
done
objname=`$echo "X$obj" | $Xsed -e 's%^.*/%%'`
xdir=`$echo "X$obj" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$obj"; then
xdir=
else
xdir=$xdir/
fi
lobj=${xdir}$objdir/$objname
if test -z "$base_compile"; then
$echo "$modename: you must specify a compilation command" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# Delete any leftover library objects.
if test "$build_old_libs" = yes; then
removelist="$obj $lobj $libobj ${libobj}T"
else
removelist="$lobj $libobj ${libobj}T"
fi
$run $rm $removelist
trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15
# On Cygwin there's no "real" PIC flag so we must build both object types
case $host_os in
cygwin* | mingw* | pw32* | os2*)
pic_mode=default
;;
esac
if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then
# non-PIC code in shared libraries is not supported
pic_mode=default
fi
# Calculate the filename of the output object if compiler does
# not support -o with -c
if test "$compiler_c_o" = no; then
output_obj=`$echo "X$srcfile" | $Xsed -e 's%^.*/%%' -e 's%\.[^.]*$%%'`.${objext}
lockfile="$output_obj.lock"
removelist="$removelist $output_obj $lockfile"
trap "$run $rm $removelist; exit $EXIT_FAILURE" 1 2 15
else
output_obj=
need_locks=no
lockfile=
fi
# Lock this critical section if it is needed
# We use this script file to make the link, it avoids creating a new file
if test "$need_locks" = yes; then
until $run ln "$progpath" "$lockfile" 2>/dev/null; do
$show "Waiting for $lockfile to be removed"
sleep 2
done
elif test "$need_locks" = warn; then
if test -f "$lockfile"; then
$echo "\
*** ERROR, $lockfile exists and contains:
`cat $lockfile 2>/dev/null`
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit $EXIT_FAILURE
fi
$echo $srcfile > "$lockfile"
fi
if test -n "$fix_srcfile_path"; then
eval srcfile=\"$fix_srcfile_path\"
fi
$run $rm "$libobj" "${libobj}T"
# Create a libtool object file (analogous to a ".la" file),
# but don't create it if we're doing a dry run.
test -z "$run" && cat > ${libobj}T <<EOF
# $libobj - a libtool object file
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# Name of the PIC object.
EOF
# Only build a PIC object if we are building libtool libraries.
if test "$build_libtool_libs" = yes; then
# Without this assignment, base_compile gets emptied.
fbsd_hideous_sh_bug=$base_compile
if test "$pic_mode" != no; then
command="$base_compile $srcfile $pic_flag"
else
# Don't build PIC code
command="$base_compile $srcfile"
fi
if test ! -d "${xdir}$objdir"; then
$show "$mkdir ${xdir}$objdir"
$run $mkdir ${xdir}$objdir
status=$?
if test "$status" -ne 0 && test ! -d "${xdir}$objdir"; then
exit $status
fi
fi
if test -z "$output_obj"; then
# Place PIC objects in $objdir
command="$command -o $lobj"
fi
$run $rm "$lobj" "$output_obj"
$show "$command"
if $run eval "$command"; then :
else
test -n "$output_obj" && $run $rm $removelist
exit $EXIT_FAILURE
fi
if test "$need_locks" = warn &&
test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
$echo "\
*** ERROR, $lockfile contains:
`cat $lockfile 2>/dev/null`
but it should contain:
$srcfile
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit $EXIT_FAILURE
fi
# Just move the object if needed, then go on to compile the next one
if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then
$show "$mv $output_obj $lobj"
if $run $mv $output_obj $lobj; then :
else
error=$?
$run $rm $removelist
exit $error
fi
fi
# Append the name of the PIC object to the libtool object file.
test -z "$run" && cat >> ${libobj}T <<EOF
pic_object='$objdir/$objname'
EOF
# Allow error messages only from the first compilation.
if test "$suppress_opt" = yes; then
suppress_output=' >/dev/null 2>&1'
fi
else
# No PIC object so indicate it doesn't exist in the libtool
# object file.
test -z "$run" && cat >> ${libobj}T <<EOF
pic_object=none
EOF
fi
# Only build a position-dependent object if we build old libraries.
if test "$build_old_libs" = yes; then
if test "$pic_mode" != yes; then
# Don't build PIC code
command="$base_compile $srcfile"
else
command="$base_compile $srcfile $pic_flag"
fi
if test "$compiler_c_o" = yes; then
command="$command -o $obj"
fi
# Suppress compiler output if we already did a PIC compilation.
command="$command$suppress_output"
$run $rm "$obj" "$output_obj"
$show "$command"
if $run eval "$command"; then :
else
$run $rm $removelist
exit $EXIT_FAILURE
fi
if test "$need_locks" = warn &&
test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
$echo "\
*** ERROR, $lockfile contains:
`cat $lockfile 2>/dev/null`
but it should contain:
$srcfile
This indicates that another process is trying to use the same
temporary object file, and libtool could not work around it because
your compiler does not support \`-c' and \`-o' together. If you
repeat this compilation, it may succeed, by chance, but you had better
avoid parallel builds (make -j) in this platform, or get a better
compiler."
$run $rm $removelist
exit $EXIT_FAILURE
fi
# Just move the object if needed
if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then
$show "$mv $output_obj $obj"
if $run $mv $output_obj $obj; then :
else
error=$?
$run $rm $removelist
exit $error
fi
fi
# Append the name of the non-PIC object the libtool object file.
# Only append if the libtool object file exists.
test -z "$run" && cat >> ${libobj}T <<EOF
# Name of the non-PIC object.
non_pic_object='$objname'
EOF
else
# Append the name of the non-PIC object the libtool object file.
# Only append if the libtool object file exists.
test -z "$run" && cat >> ${libobj}T <<EOF
# Name of the non-PIC object.
non_pic_object=none
EOF
fi
$run $mv "${libobj}T" "${libobj}"
# Unlock the critical section if it was locked
if test "$need_locks" != no; then
$run $rm "$lockfile"
fi
exit $EXIT_SUCCESS
;;
# libtool link mode
link | relink)
modename="$modename: link"
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
# It is impossible to link a dll without this setting, and
# we shouldn't force the makefile maintainer to figure out
# which system we are compiling for in order to pass an extra
# flag for every libtool invocation.
# allow_undefined=no
# FIXME: Unfortunately, there are problems with the above when trying
# to make a dll which has undefined symbols, in which case not
# even a static library is built. For now, we need to specify
# -no-undefined on the libtool link line when we can be certain
# that all symbols are satisfied, otherwise we get a static library.
allow_undefined=yes
;;
*)
allow_undefined=yes
;;
esac
libtool_args="$nonopt"
base_compile="$nonopt $@"
compile_command="$nonopt"
finalize_command="$nonopt"
compile_rpath=
finalize_rpath=
compile_shlibpath=
finalize_shlibpath=
convenience=
old_convenience=
deplibs=
old_deplibs=
compiler_flags=
linker_flags=
dllsearchpath=
lib_search_path=`pwd`
inst_prefix_dir=
avoid_version=no
dlfiles=
dlprefiles=
dlself=no
export_dynamic=no
export_symbols=
export_symbols_regex=
generated=
libobjs=
ltlibs=
module=no
no_install=no
objs=
non_pic_objects=
precious_files_regex=
prefer_static_libs=no
preload=no
prev=
prevarg=
release=
rpath=
xrpath=
perm_rpath=
temp_rpath=
thread_safe=no
vinfo=
vinfo_number=no
func_infer_tag $base_compile
# We need to know -static, to get the right output filenames.
for arg
do
case $arg in
-all-static | -static)
if test "X$arg" = "X-all-static"; then
if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
$echo "$modename: warning: complete static linking is impossible in this configuration" 1>&2
fi
if test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
else
if test -z "$pic_flag" && test -n "$link_static_flag"; then
dlopen_self=$dlopen_self_static
fi
fi
build_libtool_libs=no
build_old_libs=yes
prefer_static_libs=yes
break
;;
esac
done
# See if our shared archives depend on static archives.
test -n "$old_archive_from_new_cmds" && build_old_libs=yes
# Go through the arguments, transforming them on the way.
while test "$#" -gt 0; do
arg="$1"
shift
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
qarg=\"`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`\" ### testsuite: skip nested quoting test
;;
*) qarg=$arg ;;
esac
libtool_args="$libtool_args $qarg"
# If the previous option needs an argument, assign it.
if test -n "$prev"; then
case $prev in
output)
compile_command="$compile_command @OUTPUT@"
finalize_command="$finalize_command @OUTPUT@"
;;
esac
case $prev in
dlfiles|dlprefiles)
if test "$preload" = no; then
# Add the symbol object into the linking commands.
compile_command="$compile_command @SYMFILE@"
finalize_command="$finalize_command @SYMFILE@"
preload=yes
fi
case $arg in
*.la | *.lo) ;; # We handle these cases below.
force)
if test "$dlself" = no; then
dlself=needless
export_dynamic=yes
fi
prev=
continue
;;
self)
if test "$prev" = dlprefiles; then
dlself=yes
elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then
dlself=yes
else
dlself=needless
export_dynamic=yes
fi
prev=
continue
;;
*)
if test "$prev" = dlfiles; then
dlfiles="$dlfiles $arg"
else
dlprefiles="$dlprefiles $arg"
fi
prev=
continue
;;
esac
;;
expsyms)
export_symbols="$arg"
if test ! -f "$arg"; then
$echo "$modename: symbol file \`$arg' does not exist"
exit $EXIT_FAILURE
fi
prev=
continue
;;
expsyms_regex)
export_symbols_regex="$arg"
prev=
continue
;;
inst_prefix)
inst_prefix_dir="$arg"
prev=
continue
;;
precious_regex)
precious_files_regex="$arg"
prev=
continue
;;
release)
release="-$arg"
prev=
continue
;;
objectlist)
if test -f "$arg"; then
save_arg=$arg
moreargs=
for fil in `cat $save_arg`
do
# moreargs="$moreargs $fil"
arg=$fil
# A libtool-controlled object.
# Check to see that this really is a libtool object.
if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
pic_object=
non_pic_object=
# Read the .lo file
# If there is no directory component, then add one.
case $arg in
*/* | *\\*) . $arg ;;
*) . ./$arg ;;
esac
if test -z "$pic_object" || \
test -z "$non_pic_object" ||
test "$pic_object" = none && \
test "$non_pic_object" = none; then
$echo "$modename: cannot find name of object for \`$arg'" 1>&2
exit $EXIT_FAILURE
fi
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
if test "$pic_object" != none; then
# Prepend the subdirectory the object is found in.
pic_object="$xdir$pic_object"
if test "$prev" = dlfiles; then
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
dlfiles="$dlfiles $pic_object"
prev=
continue
else
# If libtool objects are unsupported, then we need to preload.
prev=dlprefiles
fi
fi
# CHECK ME: I think I busted this. -Ossama
if test "$prev" = dlprefiles; then
# Preload the old-style object.
dlprefiles="$dlprefiles $pic_object"
prev=
fi
# A PIC object.
libobjs="$libobjs $pic_object"
arg="$pic_object"
fi
# Non-PIC object.
if test "$non_pic_object" != none; then
# Prepend the subdirectory the object is found in.
non_pic_object="$xdir$non_pic_object"
# A standard non-PIC object
non_pic_objects="$non_pic_objects $non_pic_object"
if test -z "$pic_object" || test "$pic_object" = none ; then
arg="$non_pic_object"
fi
fi
else
# Only an error if not doing a dry-run.
if test -z "$run"; then
$echo "$modename: \`$arg' is not a valid libtool object" 1>&2
exit $EXIT_FAILURE
else
# Dry-run case.
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"`
non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"`
libobjs="$libobjs $pic_object"
non_pic_objects="$non_pic_objects $non_pic_object"
fi
fi
done
else
$echo "$modename: link input file \`$save_arg' does not exist"
exit $EXIT_FAILURE
fi
arg=$save_arg
prev=
continue
;;
rpath | xrpath)
# We need an absolute path.
case $arg in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
$echo "$modename: only absolute run-paths are allowed" 1>&2
exit $EXIT_FAILURE
;;
esac
if test "$prev" = rpath; then
case "$rpath " in
*" $arg "*) ;;
*) rpath="$rpath $arg" ;;
esac
else
case "$xrpath " in
*" $arg "*) ;;
*) xrpath="$xrpath $arg" ;;
esac
fi
prev=
continue
;;
xcompiler)
compiler_flags="$compiler_flags $qarg"
prev=
compile_command="$compile_command $qarg"
finalize_command="$finalize_command $qarg"
continue
;;
xlinker)
linker_flags="$linker_flags $qarg"
compiler_flags="$compiler_flags $wl$qarg"
prev=
compile_command="$compile_command $wl$qarg"
finalize_command="$finalize_command $wl$qarg"
continue
;;
xcclinker)
linker_flags="$linker_flags $qarg"
compiler_flags="$compiler_flags $qarg"
prev=
compile_command="$compile_command $qarg"
finalize_command="$finalize_command $qarg"
continue
;;
shrext)
shrext_cmds="$arg"
prev=
continue
;;
*)
eval "$prev=\"\$arg\""
prev=
continue
;;
esac
fi # test -n "$prev"
prevarg="$arg"
case $arg in
-all-static)
if test -n "$link_static_flag"; then
compile_command="$compile_command $link_static_flag"
finalize_command="$finalize_command $link_static_flag"
fi
continue
;;
-allow-undefined)
# FIXME: remove this flag sometime in the future.
$echo "$modename: \`-allow-undefined' is deprecated because it is the default" 1>&2
continue
;;
-avoid-version)
avoid_version=yes
continue
;;
-dlopen)
prev=dlfiles
continue
;;
-dlpreopen)
prev=dlprefiles
continue
;;
-export-dynamic)
export_dynamic=yes
continue
;;
-export-symbols | -export-symbols-regex)
if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
$echo "$modename: more than one -exported-symbols argument is not allowed"
exit $EXIT_FAILURE
fi
if test "X$arg" = "X-export-symbols"; then
prev=expsyms
else
prev=expsyms_regex
fi
continue
;;
-inst-prefix-dir)
prev=inst_prefix
continue
;;
# The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
# so, if we see these flags be careful not to treat them like -L
-L[A-Z][A-Z]*:*)
case $with_gcc/$host in
no/*-*-irix* | /*-*-irix*)
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
;;
esac
continue
;;
-L*)
dir=`$echo "X$arg" | $Xsed -e 's/^-L//'`
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
absdir=`cd "$dir" && pwd`
if test -z "$absdir"; then
$echo "$modename: cannot determine absolute directory name of \`$dir'" 1>&2
exit $EXIT_FAILURE
fi
dir="$absdir"
;;
esac
case "$deplibs " in
*" -L$dir "*) ;;
*)
deplibs="$deplibs -L$dir"
lib_search_path="$lib_search_path $dir"
;;
esac
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
case :$dllsearchpath: in
*":$dir:"*) ;;
*) dllsearchpath="$dllsearchpath:$dir";;
esac
;;
esac
continue
;;
-l*)
if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then
case $host in
*-*-cygwin* | *-*-pw32* | *-*-beos*)
# These systems don't actually have a C or math library (as such)
continue
;;
*-*-mingw* | *-*-os2*)
# These systems don't actually have a C library (as such)
test "X$arg" = "X-lc" && continue
;;
*-*-openbsd* | *-*-freebsd*)
# Do not include libc due to us having libc/libc_r.
test "X$arg" = "X-lc" && continue
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C and math libraries are in the System framework
deplibs="$deplibs -framework System"
continue
esac
elif test "X$arg" = "X-lc_r"; then
case $host in
*-*-openbsd* | *-*-freebsd*)
# Do not include libc_r directly, use -pthread flag.
continue
;;
esac
fi
deplibs="$deplibs $arg"
continue
;;
-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe)
deplibs="$deplibs $arg"
continue
;;
-module)
module=yes
continue
;;
# gcc -m* arguments should be passed to the linker via $compiler_flags
# in order to pass architecture information to the linker
# (e.g. 32 vs 64-bit). This may also be accomplished via -Wl,-mfoo
# but this is not reliable with gcc because gcc may use -mfoo to
# select a different linker, different libraries, etc, while
# -Wl,-mfoo simply passes -mfoo to the linker.
-m*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
if test "$with_gcc" = "yes" ; then
compiler_flags="$compiler_flags $arg"
fi
continue
;;
-shrext)
prev=shrext
continue
;;
-no-fast-install)
fast_install=no
continue
;;
-no-install)
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
# The PATH hackery in wrapper scripts is required on Windows
# in order for the loader to find any dlls it needs.
$echo "$modename: warning: \`-no-install' is ignored for $host" 1>&2
$echo "$modename: warning: assuming \`-no-fast-install' instead" 1>&2
fast_install=no
;;
*) no_install=yes ;;
esac
continue
;;
-no-undefined)
allow_undefined=no
continue
;;
-objectlist)
prev=objectlist
continue
;;
-o) prev=output ;;
-precious-files-regex)
prev=precious_regex
continue
;;
-release)
prev=release
continue
;;
-rpath)
prev=rpath
continue
;;
-R)
prev=xrpath
continue
;;
-R*)
dir=`$echo "X$arg" | $Xsed -e 's/^-R//'`
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
$echo "$modename: only absolute run-paths are allowed" 1>&2
exit $EXIT_FAILURE
;;
esac
case "$xrpath " in
*" $dir "*) ;;
*) xrpath="$xrpath $dir" ;;
esac
continue
;;
-static)
# The effects of -static are defined in a previous loop.
# We used to do the same as -all-static on platforms that
# didn't have a PIC flag, but the assumption that the effects
# would be equivalent was wrong. It would break on at least
# Digital Unix and AIX.
continue
;;
-thread-safe)
thread_safe=yes
continue
;;
-version-info)
prev=vinfo
continue
;;
-version-number)
prev=vinfo
vinfo_number=yes
continue
;;
-Wc,*)
args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wc,//'`
arg=
save_ifs="$IFS"; IFS=','
for flag in $args; do
IFS="$save_ifs"
case $flag in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
flag="\"$flag\""
;;
esac
arg="$arg $wl$flag"
compiler_flags="$compiler_flags $flag"
done
IFS="$save_ifs"
arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
;;
-Wl,*)
args=`$echo "X$arg" | $Xsed -e "$sed_quote_subst" -e 's/^-Wl,//'`
arg=
save_ifs="$IFS"; IFS=','
for flag in $args; do
IFS="$save_ifs"
case $flag in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
flag="\"$flag\""
;;
esac
arg="$arg $wl$flag"
compiler_flags="$compiler_flags $wl$flag"
linker_flags="$linker_flags $flag"
done
IFS="$save_ifs"
arg=`$echo "X$arg" | $Xsed -e "s/^ //"`
;;
-Xcompiler)
prev=xcompiler
continue
;;
-Xlinker)
prev=xlinker
continue
;;
-XCClinker)
prev=xcclinker
continue
;;
# Some other compiler flag.
-* | +*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
;;
*.$objext)
# A standard object.
objs="$objs $arg"
;;
*.lo)
# A libtool-controlled object.
# Check to see that this really is a libtool object.
if (${SED} -e '2q' $arg | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
pic_object=
non_pic_object=
# Read the .lo file
# If there is no directory component, then add one.
case $arg in
*/* | *\\*) . $arg ;;
*) . ./$arg ;;
esac
if test -z "$pic_object" || \
test -z "$non_pic_object" ||
test "$pic_object" = none && \
test "$non_pic_object" = none; then
$echo "$modename: cannot find name of object for \`$arg'" 1>&2
exit $EXIT_FAILURE
fi
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
if test "$pic_object" != none; then
# Prepend the subdirectory the object is found in.
pic_object="$xdir$pic_object"
if test "$prev" = dlfiles; then
if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
dlfiles="$dlfiles $pic_object"
prev=
continue
else
# If libtool objects are unsupported, then we need to preload.
prev=dlprefiles
fi
fi
# CHECK ME: I think I busted this. -Ossama
if test "$prev" = dlprefiles; then
# Preload the old-style object.
dlprefiles="$dlprefiles $pic_object"
prev=
fi
# A PIC object.
libobjs="$libobjs $pic_object"
arg="$pic_object"
fi
# Non-PIC object.
if test "$non_pic_object" != none; then
# Prepend the subdirectory the object is found in.
non_pic_object="$xdir$non_pic_object"
# A standard non-PIC object
non_pic_objects="$non_pic_objects $non_pic_object"
if test -z "$pic_object" || test "$pic_object" = none ; then
arg="$non_pic_object"
fi
fi
else
# Only an error if not doing a dry-run.
if test -z "$run"; then
$echo "$modename: \`$arg' is not a valid libtool object" 1>&2
exit $EXIT_FAILURE
else
# Dry-run case.
# Extract subdirectory from the argument.
xdir=`$echo "X$arg" | $Xsed -e 's%/[^/]*$%%'`
if test "X$xdir" = "X$arg"; then
xdir=
else
xdir="$xdir/"
fi
pic_object=`$echo "X${xdir}${objdir}/${arg}" | $Xsed -e "$lo2o"`
non_pic_object=`$echo "X${xdir}${arg}" | $Xsed -e "$lo2o"`
libobjs="$libobjs $pic_object"
non_pic_objects="$non_pic_objects $non_pic_object"
fi
fi
;;
*.$libext)
# An archive.
deplibs="$deplibs $arg"
old_deplibs="$old_deplibs $arg"
continue
;;
*.la)
# A libtool-controlled library.
if test "$prev" = dlfiles; then
# This library was specified with -dlopen.
dlfiles="$dlfiles $arg"
prev=
elif test "$prev" = dlprefiles; then
# The library was specified with -dlpreopen.
dlprefiles="$dlprefiles $arg"
prev=
else
deplibs="$deplibs $arg"
fi
continue
;;
# Some other compiler argument.
*)
# Unknown arguments in both finalize_command and compile_command need
# to be aesthetically quoted because they are evaled later.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
arg="\"$arg\""
;;
esac
;;
esac # arg
# Now actually substitute the argument into the commands.
if test -n "$arg"; then
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
fi
done # argument parsing loop
if test -n "$prev"; then
$echo "$modename: the \`$prevarg' option requires an argument" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
eval arg=\"$export_dynamic_flag_spec\"
compile_command="$compile_command $arg"
finalize_command="$finalize_command $arg"
fi
oldlibs=
# calculate the name of the file, without its directory
outputname=`$echo "X$output" | $Xsed -e 's%^.*/%%'`
libobjs_save="$libobjs"
if test -n "$shlibpath_var"; then
# get the directories listed in $shlibpath_var
eval shlib_search_path=\`\$echo \"X\${$shlibpath_var}\" \| \$Xsed -e \'s/:/ /g\'\`
else
shlib_search_path=
fi
eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
output_objdir=`$echo "X$output" | $Xsed -e 's%/[^/]*$%%'`
if test "X$output_objdir" = "X$output"; then
output_objdir="$objdir"
else
output_objdir="$output_objdir/$objdir"
fi
# Create the object directory.
if test ! -d "$output_objdir"; then
$show "$mkdir $output_objdir"
$run $mkdir $output_objdir
status=$?
if test "$status" -ne 0 && test ! -d "$output_objdir"; then
exit $status
fi
fi
# Determine the type of output
case $output in
"")
$echo "$modename: you must specify an output file" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
*.$libext) linkmode=oldlib ;;
*.lo | *.$objext) linkmode=obj ;;
*.la) linkmode=lib ;;
*) linkmode=prog ;; # Anything else should be a program.
esac
case $host in
*cygwin* | *mingw* | *pw32*)
# don't eliminate duplications in $postdeps and $predeps
duplicate_compiler_generated_deps=yes
;;
*)
duplicate_compiler_generated_deps=$duplicate_deps
;;
esac
specialdeplibs=
libs=
# Find all interdependent deplibs by searching for libraries
# that are linked more than once (e.g. -la -lb -la)
for deplib in $deplibs; do
if test "X$duplicate_deps" = "Xyes" ; then
case "$libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
libs="$libs $deplib"
done
if test "$linkmode" = lib; then
libs="$predeps $libs $compiler_lib_search_path $postdeps"
# Compute libraries that are listed more than once in $predeps
# $postdeps and mark them as special (i.e., whose duplicates are
# not to be eliminated).
pre_post_deps=
if test "X$duplicate_compiler_generated_deps" = "Xyes" ; then
for pre_post_dep in $predeps $postdeps; do
case "$pre_post_deps " in
*" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;;
esac
pre_post_deps="$pre_post_deps $pre_post_dep"
done
fi
pre_post_deps=
fi
deplibs=
newdependency_libs=
newlib_search_path=
need_relink=no # whether we're linking any uninstalled libtool libraries
notinst_deplibs= # not-installed libtool libraries
notinst_path= # paths that contain not-installed libtool libraries
case $linkmode in
lib)
passes="conv link"
for file in $dlfiles $dlprefiles; do
case $file in
*.la) ;;
*)
$echo "$modename: libraries can \`-dlopen' only libtool libraries: $file" 1>&2
exit $EXIT_FAILURE
;;
esac
done
;;
prog)
compile_deplibs=
finalize_deplibs=
alldeplibs=no
newdlfiles=
newdlprefiles=
passes="conv scan dlopen dlpreopen link"
;;
*) passes="conv"
;;
esac
for pass in $passes; do
if test "$linkmode,$pass" = "lib,link" ||
test "$linkmode,$pass" = "prog,scan"; then
libs="$deplibs"
deplibs=
fi
if test "$linkmode" = prog; then
case $pass in
dlopen) libs="$dlfiles" ;;
dlpreopen) libs="$dlprefiles" ;;
link)
libs="$deplibs %DEPLIBS%"
test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs"
;;
esac
fi
if test "$pass" = dlopen; then
# Collect dlpreopened libraries
save_deplibs="$deplibs"
deplibs=
fi
for deplib in $libs; do
lib=
found=no
case $deplib in
-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe)
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
fi
continue
;;
-l*)
if test "$linkmode" != lib && test "$linkmode" != prog; then
$echo "$modename: warning: \`-l' is ignored for archives/objects" 1>&2
continue
fi
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
continue
fi
name=`$echo "X$deplib" | $Xsed -e 's/^-l//'`
for searchdir in $newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path; do
for search_ext in .la $std_shrext .so .a; do
# Search the libtool library
lib="$searchdir/lib${name}${search_ext}"
if test -f "$lib"; then
if test "$search_ext" = ".la"; then
found=yes
else
found=no
fi
break 2
fi
done
done
if test "$found" != yes; then
# deplib doesn't seem to be a libtool library
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
fi
continue
else # deplib is a libtool library
# If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib,
# We need to do some special things here, and not later.
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $deplib "*)
if (${SED} -e '2q' $lib |
grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
library_names=
old_library=
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
for l in $old_library $library_names; do
ll="$l"
done
if test "X$ll" = "X$old_library" ; then # only static version available
found=no
ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'`
test "X$ladir" = "X$lib" && ladir="."
lib=$ladir/$old_library
if test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
deplibs="$deplib $deplibs"
test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
fi
continue
fi
fi
;;
*) ;;
esac
fi
fi
;; # -l
-L*)
case $linkmode in
lib)
deplibs="$deplib $deplibs"
test "$pass" = conv && continue
newdependency_libs="$deplib $newdependency_libs"
newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
;;
prog)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
continue
fi
if test "$pass" = scan; then
deplibs="$deplib $deplibs"
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`
;;
*)
$echo "$modename: warning: \`-L' is ignored for archives/objects" 1>&2
;;
esac # linkmode
continue
;; # -L
-R*)
if test "$pass" = link; then
dir=`$echo "X$deplib" | $Xsed -e 's/^-R//'`
# Make sure the xrpath contains only unique directories.
case "$xrpath " in
*" $dir "*) ;;
*) xrpath="$xrpath $dir" ;;
esac
fi
deplibs="$deplib $deplibs"
continue
;;
*.la) lib="$deplib" ;;
*.$libext)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
continue
fi
case $linkmode in
lib)
if test "$deplibs_check_method" != pass_all; then
$echo
$echo "*** Warning: Trying to link with static lib archive $deplib."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have"
$echo "*** because the file extensions .$libext of this argument makes me believe"
$echo "*** that it is just a static archive that I should not used here."
else
$echo
$echo "*** Warning: Linking the shared library $output against the"
$echo "*** static library $deplib is not portable!"
deplibs="$deplib $deplibs"
fi
continue
;;
prog)
if test "$pass" != link; then
deplibs="$deplib $deplibs"
else
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
fi
continue
;;
esac # linkmode
;; # *.$libext
*.lo | *.$objext)
if test "$pass" = conv; then
deplibs="$deplib $deplibs"
elif test "$linkmode" = prog; then
if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
# If there is no dlopen support or we're linking statically,
# we need to preload.
newdlprefiles="$newdlprefiles $deplib"
compile_deplibs="$deplib $compile_deplibs"
finalize_deplibs="$deplib $finalize_deplibs"
else
newdlfiles="$newdlfiles $deplib"
fi
fi
continue
;;
%DEPLIBS%)
alldeplibs=yes
continue
;;
esac # case $deplib
if test "$found" = yes || test -f "$lib"; then :
else
$echo "$modename: cannot find the library \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
# Check to see that this really is a libtool archive.
if (${SED} -e '2q' $lib | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
ladir=`$echo "X$lib" | $Xsed -e 's%/[^/]*$%%'`
test "X$ladir" = "X$lib" && ladir="."
dlname=
dlopen=
dlpreopen=
libdir=
library_names=
old_library=
# If the library was installed with an old release of libtool,
# it will not redefine variables installed, or shouldnotlink
installed=yes
shouldnotlink=no
# Read the .la file
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
if test "$linkmode,$pass" = "lib,link" ||
test "$linkmode,$pass" = "prog,scan" ||
{ test "$linkmode" != prog && test "$linkmode" != lib; }; then
test -n "$dlopen" && dlfiles="$dlfiles $dlopen"
test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen"
fi
if test "$pass" = conv; then
# Only check for convenience libraries
deplibs="$lib $deplibs"
if test -z "$libdir"; then
if test -z "$old_library"; then
$echo "$modename: cannot find name of link library for \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
# It is a libtool convenience library, so add in its objects.
convenience="$convenience $ladir/$objdir/$old_library"
old_convenience="$old_convenience $ladir/$objdir/$old_library"
tmp_libs=
for deplib in $dependency_libs; do
deplibs="$deplib $deplibs"
if test "X$duplicate_deps" = "Xyes" ; then
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
tmp_libs="$tmp_libs $deplib"
done
elif test "$linkmode" != prog && test "$linkmode" != lib; then
$echo "$modename: \`$lib' is not a convenience library" 1>&2
exit $EXIT_FAILURE
fi
continue
fi # $pass = conv
# Get the name of the library we link against.
linklib=
for l in $old_library $library_names; do
linklib="$l"
done
if test -z "$linklib"; then
$echo "$modename: cannot find name of link library for \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
# This library was specified with -dlopen.
if test "$pass" = dlopen; then
if test -z "$libdir"; then
$echo "$modename: cannot -dlopen a convenience library: \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
if test -z "$dlname" ||
test "$dlopen_support" != yes ||
test "$build_libtool_libs" = no; then
# If there is no dlname, no dlopen support or we're linking
# statically, we need to preload. We also need to preload any
# dependent libraries so libltdl's deplib preloader doesn't
# bomb out in the load deplibs phase.
dlprefiles="$dlprefiles $lib $dependency_libs"
else
newdlfiles="$newdlfiles $lib"
fi
continue
fi # $pass = dlopen
# We need an absolute path.
case $ladir in
[\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;;
*)
abs_ladir=`cd "$ladir" && pwd`
if test -z "$abs_ladir"; then
$echo "$modename: warning: cannot determine absolute directory name of \`$ladir'" 1>&2
$echo "$modename: passing it literally to the linker, although it might fail" 1>&2
abs_ladir="$ladir"
fi
;;
esac
laname=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
# Find the relevant object directory and library name.
if test "X$installed" = Xyes; then
if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
$echo "$modename: warning: library \`$lib' was moved." 1>&2
dir="$ladir"
absdir="$abs_ladir"
libdir="$abs_ladir"
else
dir="$libdir"
absdir="$libdir"
fi
else
dir="$ladir/$objdir"
absdir="$abs_ladir/$objdir"
# Remove this search path later
notinst_path="$notinst_path $abs_ladir"
fi # $installed = yes
name=`$echo "X$laname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
# This library was specified with -dlpreopen.
if test "$pass" = dlpreopen; then
if test -z "$libdir"; then
$echo "$modename: cannot -dlpreopen a convenience library: \`$lib'" 1>&2
exit $EXIT_FAILURE
fi
# Prefer using a static library (so that no silly _DYNAMIC symbols
# are required to link).
if test -n "$old_library"; then
newdlprefiles="$newdlprefiles $dir/$old_library"
# Otherwise, use the dlname, so that lt_dlopen finds it.
elif test -n "$dlname"; then
newdlprefiles="$newdlprefiles $dir/$dlname"
else
newdlprefiles="$newdlprefiles $dir/$linklib"
fi
fi # $pass = dlpreopen
if test -z "$libdir"; then
# Link the convenience library
if test "$linkmode" = lib; then
deplibs="$dir/$old_library $deplibs"
elif test "$linkmode,$pass" = "prog,link"; then
compile_deplibs="$dir/$old_library $compile_deplibs"
finalize_deplibs="$dir/$old_library $finalize_deplibs"
else
deplibs="$lib $deplibs" # used for prog,scan pass
fi
continue
fi
if test "$linkmode" = prog && test "$pass" != link; then
newlib_search_path="$newlib_search_path $ladir"
deplibs="$lib $deplibs"
linkalldeplibs=no
if test "$link_all_deplibs" != no || test -z "$library_names" ||
test "$build_libtool_libs" = no; then
linkalldeplibs=yes
fi
tmp_libs=
for deplib in $dependency_libs; do
case $deplib in
-L*) newlib_search_path="$newlib_search_path "`$echo "X$deplib" | $Xsed -e 's/^-L//'`;; ### testsuite: skip nested quoting test
esac
# Need to link against all dependency_libs?
if test "$linkalldeplibs" = yes; then
deplibs="$deplib $deplibs"
else
# Need to hardcode shared library paths
# or/and link against static libraries
newdependency_libs="$deplib $newdependency_libs"
fi
if test "X$duplicate_deps" = "Xyes" ; then
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
tmp_libs="$tmp_libs $deplib"
done # for deplib
continue
fi # $linkmode = prog...
if test "$linkmode,$pass" = "prog,link"; then
if test -n "$library_names" &&
{ test "$prefer_static_libs" = no || test -z "$old_library"; }; then
# We need to hardcode the library path
if test -n "$shlibpath_var"; then
# Make sure the rpath contains only unique directories.
case "$temp_rpath " in
*" $dir "*) ;;
*" $absdir "*) ;;
*) temp_rpath="$temp_rpath $dir" ;;
esac
fi
# Hardcode the library path.
# Skip directories that are in the system default run-time
# search path.
case " $sys_lib_dlsearch_path " in
*" $absdir "*) ;;
*)
case "$compile_rpath " in
*" $absdir "*) ;;
*) compile_rpath="$compile_rpath $absdir"
esac
;;
esac
case " $sys_lib_dlsearch_path " in
*" $libdir "*) ;;
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir"
esac
;;
esac
fi # $linkmode,$pass = prog,link...
if test "$alldeplibs" = yes &&
{ test "$deplibs_check_method" = pass_all ||
{ test "$build_libtool_libs" = yes &&
test -n "$library_names"; }; }; then
# We only need to search for static libraries
continue
fi
fi
link_static=no # Whether the deplib will be linked statically
if test -n "$library_names" &&
{ test "$prefer_static_libs" = no || test -z "$old_library"; }; then
if test "$installed" = no; then
notinst_deplibs="$notinst_deplibs $lib"
need_relink=yes
fi
# This is a shared library
# Warn about portability, can't link against -module's on
# some systems (darwin)
if test "$shouldnotlink" = yes && test "$pass" = link ; then
$echo
if test "$linkmode" = prog; then
$echo "*** Warning: Linking the executable $output against the loadable module"
else
$echo "*** Warning: Linking the shared library $output against the loadable module"
fi
$echo "*** $linklib is not portable!"
fi
if test "$linkmode" = lib &&
test "$hardcode_into_libs" = yes; then
# Hardcode the library path.
# Skip directories that are in the system default run-time
# search path.
case " $sys_lib_dlsearch_path " in
*" $absdir "*) ;;
*)
case "$compile_rpath " in
*" $absdir "*) ;;
*) compile_rpath="$compile_rpath $absdir"
esac
;;
esac
case " $sys_lib_dlsearch_path " in
*" $libdir "*) ;;
*)
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir"
esac
;;
esac
fi
if test -n "$old_archive_from_expsyms_cmds"; then
# figure out the soname
set dummy $library_names
realname="$2"
shift; shift
libname=`eval \\$echo \"$libname_spec\"`
# use dlname if we got it. it's perfectly good, no?
if test -n "$dlname"; then
soname="$dlname"
elif test -n "$soname_spec"; then
# bleh windows
case $host in
*cygwin* | mingw*)
major=`expr $current - $age`
versuffix="-$major"
;;
esac
eval soname=\"$soname_spec\"
else
soname="$realname"
fi
# Make a new name for the extract_expsyms_cmds to use
soroot="$soname"
soname=`$echo $soroot | ${SED} -e 's/^.*\///'`
newlib="libimp-`$echo $soname | ${SED} 's/^lib//;s/\.dll$//'`.a"
# If the library has no export list, then create one now
if test -f "$output_objdir/$soname-def"; then :
else
$show "extracting exported symbol list from \`$soname'"
save_ifs="$IFS"; IFS='~'
cmds=$extract_expsyms_cmds
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# Create $newlib
if test -f "$output_objdir/$newlib"; then :; else
$show "generating import library for \`$soname'"
save_ifs="$IFS"; IFS='~'
cmds=$old_archive_from_expsyms_cmds
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# make sure the library variables are pointing to the new library
dir=$output_objdir
linklib=$newlib
fi # test -n "$old_archive_from_expsyms_cmds"
if test "$linkmode" = prog || test "$mode" != relink; then
add_shlibpath=
add_dir=
add=
lib_linked=yes
case $hardcode_action in
immediate | unsupported)
if test "$hardcode_direct" = no; then
add="$dir/$linklib"
case $host in
*-*-sco3.2v5* ) add_dir="-L$dir" ;;
*-*-darwin* )
# if the lib is a module then we can not link against
# it, someone is ignoring the new warnings I added
if /usr/bin/file -L $add 2> /dev/null | $EGREP "bundle" >/dev/null ; then
$echo "** Warning, lib $linklib is a module, not a shared library"
if test -z "$old_library" ; then
$echo
$echo "** And there doesn't seem to be a static archive available"
$echo "** The link will probably fail, sorry"
else
add="$dir/$old_library"
fi
fi
esac
elif test "$hardcode_minus_L" = no; then
case $host in
*-*-sunos*) add_shlibpath="$dir" ;;
esac
add_dir="-L$dir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = no; then
add_shlibpath="$dir"
add="-l$name"
else
lib_linked=no
fi
;;
relink)
if test "$hardcode_direct" = yes; then
add="$dir/$linklib"
elif test "$hardcode_minus_L" = yes; then
add_dir="-L$dir"
# Try looking first in the location we're being installed to.
if test -n "$inst_prefix_dir"; then
case "$libdir" in
[\\/]*)
add_dir="$add_dir -L$inst_prefix_dir$libdir"
;;
esac
fi
add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
add_shlibpath="$dir"
add="-l$name"
else
lib_linked=no
fi
;;
*) lib_linked=no ;;
esac
if test "$lib_linked" != yes; then
$echo "$modename: configuration error: unsupported hardcode properties"
exit $EXIT_FAILURE
fi
if test -n "$add_shlibpath"; then
case :$compile_shlibpath: in
*":$add_shlibpath:"*) ;;
*) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;;
esac
fi
if test "$linkmode" = prog; then
test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
test -n "$add" && compile_deplibs="$add $compile_deplibs"
else
test -n "$add_dir" && deplibs="$add_dir $deplibs"
test -n "$add" && deplibs="$add $deplibs"
if test "$hardcode_direct" != yes && \
test "$hardcode_minus_L" != yes && \
test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
*) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
fi
fi
fi
if test "$linkmode" = prog || test "$mode" = relink; then
add_shlibpath=
add_dir=
add=
# Finalize command for both is simple: just hardcode it.
if test "$hardcode_direct" = yes; then
add="$libdir/$linklib"
elif test "$hardcode_minus_L" = yes; then
add_dir="-L$libdir"
add="-l$name"
elif test "$hardcode_shlibpath_var" = yes; then
case :$finalize_shlibpath: in
*":$libdir:"*) ;;
*) finalize_shlibpath="$finalize_shlibpath$libdir:" ;;
esac
add="-l$name"
elif test "$hardcode_automatic" = yes; then
if test -n "$inst_prefix_dir" &&
test -f "$inst_prefix_dir$libdir/$linklib" ; then
add="$inst_prefix_dir$libdir/$linklib"
else
add="$libdir/$linklib"
fi
else
# We cannot seem to hardcode it, guess we'll fake it.
add_dir="-L$libdir"
# Try looking first in the location we're being installed to.
if test -n "$inst_prefix_dir"; then
case "$libdir" in
[\\/]*)
add_dir="$add_dir -L$inst_prefix_dir$libdir"
;;
esac
fi
add="-l$name"
fi
if test "$linkmode" = prog; then
test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
else
test -n "$add_dir" && deplibs="$add_dir $deplibs"
test -n "$add" && deplibs="$add $deplibs"
fi
fi
elif test "$linkmode" = prog; then
# Here we assume that one of hardcode_direct or hardcode_minus_L
# is not unsupported. This is valid on all known static and
# shared platforms.
if test "$hardcode_direct" != unsupported; then
test -n "$old_library" && linklib="$old_library"
compile_deplibs="$dir/$linklib $compile_deplibs"
finalize_deplibs="$dir/$linklib $finalize_deplibs"
else
compile_deplibs="-l$name -L$dir $compile_deplibs"
finalize_deplibs="-l$name -L$dir $finalize_deplibs"
fi
elif test "$build_libtool_libs" = yes; then
# Not a shared library
if test "$deplibs_check_method" != pass_all; then
# We're trying link a shared library against a static one
# but the system doesn't support it.
# Just print a warning and add the library to dependency_libs so
# that the program can be linked against the static library.
$echo
$echo "*** Warning: This system can not link to static lib archive $lib."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have."
if test "$module" = yes; then
$echo "*** But as you try to build a module library, libtool will still create "
$echo "*** a static module, that should work as long as the dlopening application"
$echo "*** is linked with the -dlopen flag to resolve symbols at runtime."
if test -z "$global_symbol_pipe"; then
$echo
$echo "*** However, this would only work if libtool was able to extract symbol"
$echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
$echo "*** not find such a program. So, this module is probably useless."
$echo "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
fi
else
convenience="$convenience $dir/$old_library"
old_convenience="$old_convenience $dir/$old_library"
deplibs="$dir/$old_library $deplibs"
link_static=yes
fi
fi # link shared/static library?
if test "$linkmode" = lib; then
if test -n "$dependency_libs" &&
{ test "$hardcode_into_libs" != yes ||
test "$build_old_libs" = yes ||
test "$link_static" = yes; }; then
# Extract -R from dependency_libs
temp_deplibs=
for libdir in $dependency_libs; do
case $libdir in
-R*) temp_xrpath=`$echo "X$libdir" | $Xsed -e 's/^-R//'`
case " $xrpath " in
*" $temp_xrpath "*) ;;
*) xrpath="$xrpath $temp_xrpath";;
esac;;
*) temp_deplibs="$temp_deplibs $libdir";;
esac
done
dependency_libs="$temp_deplibs"
fi
newlib_search_path="$newlib_search_path $absdir"
# Link against this library
test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
# ... and its dependency_libs
tmp_libs=
for deplib in $dependency_libs; do
newdependency_libs="$deplib $newdependency_libs"
if test "X$duplicate_deps" = "Xyes" ; then
case "$tmp_libs " in
*" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;;
esac
fi
tmp_libs="$tmp_libs $deplib"
done
if test "$link_all_deplibs" != no; then
# Add the search paths of all dependency libraries
for deplib in $dependency_libs; do
case $deplib in
-L*) path="$deplib" ;;
*.la)
dir=`$echo "X$deplib" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$deplib" && dir="."
# We need an absolute path.
case $dir in
[\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
*)
absdir=`cd "$dir" && pwd`
if test -z "$absdir"; then
$echo "$modename: warning: cannot determine absolute directory name of \`$dir'" 1>&2
absdir="$dir"
fi
;;
esac
if grep "^installed=no" $deplib > /dev/null; then
path="$absdir/$objdir"
else
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
if test -z "$libdir"; then
$echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
if test "$absdir" != "$libdir"; then
$echo "$modename: warning: \`$deplib' seems to be moved" 1>&2
fi
path="$absdir"
fi
depdepl=
case $host in
*-*-darwin*)
# we do not want to link against static libs,
# but need to link against shared
eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib`
if test -n "$deplibrary_names" ; then
for tmp in $deplibrary_names ; do
depdepl=$tmp
done
if test -f "$path/$depdepl" ; then
depdepl="$path/$depdepl"
fi
# do not add paths which are already there
case " $newlib_search_path " in
*" $path "*) ;;
*) newlib_search_path="$newlib_search_path $path";;
esac
fi
path=""
;;
*)
path="-L$path"
;;
esac
;;
-l*)
case $host in
*-*-darwin*)
# Again, we only want to link against shared libraries
eval tmp_libs=`$echo "X$deplib" | $Xsed -e "s,^\-l,,"`
for tmp in $newlib_search_path ; do
if test -f "$tmp/lib$tmp_libs.dylib" ; then
eval depdepl="$tmp/lib$tmp_libs.dylib"
break
fi
done
path=""
;;
*) continue ;;
esac
;;
*) continue ;;
esac
case " $deplibs " in
*" $depdepl "*) ;;
*) deplibs="$depdepl $deplibs" ;;
esac
case " $deplibs " in
*" $path "*) ;;
*) deplibs="$deplibs $path" ;;
esac
done
fi # link_all_deplibs != no
fi # linkmode = lib
done # for deplib in $libs
dependency_libs="$newdependency_libs"
if test "$pass" = dlpreopen; then
# Link the dlpreopened libraries before other libraries
for deplib in $save_deplibs; do
deplibs="$deplib $deplibs"
done
fi
if test "$pass" != dlopen; then
if test "$pass" != conv; then
# Make sure lib_search_path contains only unique directories.
lib_search_path=
for dir in $newlib_search_path; do
case "$lib_search_path " in
*" $dir "*) ;;
*) lib_search_path="$lib_search_path $dir" ;;
esac
done
newlib_search_path=
fi
if test "$linkmode,$pass" != "prog,link"; then
vars="deplibs"
else
vars="compile_deplibs finalize_deplibs"
fi
for var in $vars dependency_libs; do
# Add libraries to $var in reverse order
eval tmp_libs=\"\$$var\"
new_libs=
for deplib in $tmp_libs; do
# FIXME: Pedantically, this is the right thing to do, so
# that some nasty dependency loop isn't accidentally
# broken:
#new_libs="$deplib $new_libs"
# Pragmatically, this seems to cause very few problems in
# practice:
case $deplib in
-L*) new_libs="$deplib $new_libs" ;;
-R*) ;;
*)
# And here is the reason: when a library appears more
# than once as an explicit dependence of a library, or
# is implicitly linked in more than once by the
# compiler, it is considered special, and multiple
# occurrences thereof are not removed. Compare this
# with having the same library being listed as a
# dependency of multiple other libraries: in this case,
# we know (pedantically, we assume) the library does not
# need to be listed more than once, so we keep only the
# last copy. This is not always right, but it is rare
# enough that we require users that really mean to play
# such unportable linking tricks to link the library
# using -Wl,-lname, so that libtool does not consider it
# for duplicate removal.
case " $specialdeplibs " in
*" $deplib "*) new_libs="$deplib $new_libs" ;;
*)
case " $new_libs " in
*" $deplib "*) ;;
*) new_libs="$deplib $new_libs" ;;
esac
;;
esac
;;
esac
done
tmp_libs=
for deplib in $new_libs; do
case $deplib in
-L*)
case " $tmp_libs " in
*" $deplib "*) ;;
*) tmp_libs="$tmp_libs $deplib" ;;
esac
;;
*) tmp_libs="$tmp_libs $deplib" ;;
esac
done
eval $var=\"$tmp_libs\"
done # for var
fi
# Last step: remove runtime libs from dependency_libs
# (they stay in deplibs)
tmp_libs=
for i in $dependency_libs ; do
case " $predeps $postdeps $compiler_lib_search_path " in
*" $i "*)
i=""
;;
esac
if test -n "$i" ; then
tmp_libs="$tmp_libs $i"
fi
done
dependency_libs=$tmp_libs
done # for pass
if test "$linkmode" = prog; then
dlfiles="$newdlfiles"
dlprefiles="$newdlprefiles"
fi
case $linkmode in
oldlib)
if test -n "$deplibs"; then
$echo "$modename: warning: \`-l' and \`-L' are ignored for archives" 1>&2
fi
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen' is ignored for archives" 1>&2
fi
if test -n "$rpath"; then
$echo "$modename: warning: \`-rpath' is ignored for archives" 1>&2
fi
if test -n "$xrpath"; then
$echo "$modename: warning: \`-R' is ignored for archives" 1>&2
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info/-version-number' is ignored for archives" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for archives" 1>&2
fi
if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
$echo "$modename: warning: \`-export-symbols' is ignored for archives" 1>&2
fi
# Now set the variables for building old libraries.
build_libtool_libs=no
oldlibs="$output"
objs="$objs$old_deplibs"
;;
lib)
# Make sure we only generate libraries of the form `libNAME.la'.
case $outputname in
lib*)
name=`$echo "X$outputname" | $Xsed -e 's/\.la$//' -e 's/^lib//'`
eval shared_ext=\"$shrext_cmds\"
eval libname=\"$libname_spec\"
;;
*)
if test "$module" = no; then
$echo "$modename: libtool library \`$output' must begin with \`lib'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
if test "$need_lib_prefix" != no; then
# Add the "lib" prefix for modules if required
name=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
eval shared_ext=\"$shrext_cmds\"
eval libname=\"$libname_spec\"
else
libname=`$echo "X$outputname" | $Xsed -e 's/\.la$//'`
fi
;;
esac
if test -n "$objs"; then
if test "$deplibs_check_method" != pass_all; then
$echo "$modename: cannot build libtool library \`$output' from non-libtool objects on this host:$objs" 2>&1
exit $EXIT_FAILURE
else
$echo
$echo "*** Warning: Linking the shared library $output against the non-libtool"
$echo "*** objects $objs is not portable!"
libobjs="$libobjs $objs"
fi
fi
if test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen self' is ignored for libtool libraries" 1>&2
fi
set dummy $rpath
if test "$#" -gt 2; then
$echo "$modename: warning: ignoring multiple \`-rpath's for a libtool library" 1>&2
fi
install_libdir="$2"
oldlibs=
if test -z "$rpath"; then
if test "$build_libtool_libs" = yes; then
# Building a libtool convenience library.
# Some compilers have problems with a `.al' extension so
# convenience libraries should have the same extension an
# archive normally would.
oldlibs="$output_objdir/$libname.$libext $oldlibs"
build_libtool_libs=convenience
build_old_libs=yes
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info/-version-number' is ignored for convenience libraries" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for convenience libraries" 1>&2
fi
else
# Parse the version information argument.
save_ifs="$IFS"; IFS=':'
set dummy $vinfo 0 0 0
IFS="$save_ifs"
if test -n "$8"; then
$echo "$modename: too many parameters to \`-version-info'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# convert absolute version numbers to libtool ages
# this retains compatibility with .la files and attempts
# to make the code below a bit more comprehensible
case $vinfo_number in
yes)
number_major="$2"
number_minor="$3"
number_revision="$4"
#
# There are really only two kinds -- those that
# use the current revision as the major version
# and those that subtract age and use age as
# a minor version. But, then there is irix
# which has an extra 1 added just for fun
#
case $version_type in
darwin|linux|osf|windows)
current=`expr $number_major + $number_minor`
age="$number_minor"
revision="$number_revision"
;;
freebsd-aout|freebsd-elf|sunos)
current="$number_major"
revision="$number_minor"
age="0"
;;
irix|nonstopux)
current=`expr $number_major + $number_minor - 1`
age="$number_minor"
revision="$number_minor"
;;
*)
$echo "$modename: unknown library version type \`$version_type'" 1>&2
$echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
exit $EXIT_FAILURE
;;
esac
;;
no)
current="$2"
revision="$3"
age="$4"
;;
esac
# Check that each of the things are valid numbers.
case $current in
0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
*)
$echo "$modename: CURRENT \`$current' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit $EXIT_FAILURE
;;
esac
case $revision in
0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
*)
$echo "$modename: REVISION \`$revision' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit $EXIT_FAILURE
;;
esac
case $age in
0 | [1-9] | [1-9][0-9] | [1-9][0-9][0-9]) ;;
*)
$echo "$modename: AGE \`$age' is not a nonnegative integer" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit $EXIT_FAILURE
;;
esac
if test "$age" -gt "$current"; then
$echo "$modename: AGE \`$age' is greater than the current interface number \`$current'" 1>&2
$echo "$modename: \`$vinfo' is not valid version information" 1>&2
exit $EXIT_FAILURE
fi
# Calculate the version variables.
major=
versuffix=
verstring=
case $version_type in
none) ;;
darwin)
# Like Linux, but with the current version available in
# verstring for coding it into the library header
major=.`expr $current - $age`
versuffix="$major.$age.$revision"
# Darwin ld doesn't like 0 for these options...
minor_current=`expr $current + 1`
verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
;;
freebsd-aout)
major=".$current"
versuffix=".$current.$revision";
;;
freebsd-elf)
major=".$current"
versuffix=".$current";
;;
irix | nonstopux)
major=`expr $current - $age + 1`
case $version_type in
nonstopux) verstring_prefix=nonstopux ;;
*) verstring_prefix=sgi ;;
esac
verstring="$verstring_prefix$major.$revision"
# Add in all the interfaces that we are compatible with.
loop=$revision
while test "$loop" -ne 0; do
iface=`expr $revision - $loop`
loop=`expr $loop - 1`
verstring="$verstring_prefix$major.$iface:$verstring"
done
# Before this point, $major must not contain `.'.
major=.$major
versuffix="$major.$revision"
;;
linux)
major=.`expr $current - $age`
versuffix="$major.$age.$revision"
;;
osf)
major=.`expr $current - $age`
versuffix=".$current.$age.$revision"
verstring="$current.$age.$revision"
# Add in all the interfaces that we are compatible with.
loop=$age
while test "$loop" -ne 0; do
iface=`expr $current - $loop`
loop=`expr $loop - 1`
verstring="$verstring:${iface}.0"
done
# Make executables depend on our current version.
verstring="$verstring:${current}.0"
;;
sunos)
major=".$current"
versuffix=".$current.$revision"
;;
windows)
# Use '-' rather than '.', since we only want one
# extension on DOS 8.3 filesystems.
major=`expr $current - $age`
versuffix="-$major"
;;
*)
$echo "$modename: unknown library version type \`$version_type'" 1>&2
$echo "Fatal configuration error. See the $PACKAGE docs for more information." 1>&2
exit $EXIT_FAILURE
;;
esac
# Clear the version info if we defaulted, and they specified a release.
if test -z "$vinfo" && test -n "$release"; then
major=
case $version_type in
darwin)
# we can't check for "0.0" in archive_cmds due to quoting
# problems, so we reset it completely
verstring=
;;
*)
verstring="0.0"
;;
esac
if test "$need_version" = no; then
versuffix=
else
versuffix=".0.0"
fi
fi
# Remove version info from name if versioning should be avoided
if test "$avoid_version" = yes && test "$need_version" = no; then
major=
versuffix=
verstring=""
fi
# Check to see if the archive will have undefined symbols.
if test "$allow_undefined" = yes; then
if test "$allow_undefined_flag" = unsupported; then
$echo "$modename: warning: undefined symbols not allowed in $host shared libraries" 1>&2
build_libtool_libs=no
build_old_libs=yes
fi
else
# Don't allow undefined symbols.
allow_undefined_flag="$no_undefined_flag"
fi
fi
if test "$mode" != relink; then
# Remove our outputs, but don't remove object files since they
# may have been created when compiling PIC objects.
removelist=
tempremovelist=`$echo "$output_objdir/*"`
for p in $tempremovelist; do
case $p in
*.$objext)
;;
$output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*)
if test "X$precious_files_regex" != "X"; then
if echo $p | $EGREP -e "$precious_files_regex" >/dev/null 2>&1
then
continue
fi
fi
removelist="$removelist $p"
;;
*) ;;
esac
done
if test -n "$removelist"; then
$show "${rm}r $removelist"
$run ${rm}r $removelist
fi
fi
# Now set the variables for building old libraries.
if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
oldlibs="$oldlibs $output_objdir/$libname.$libext"
# Transform .lo files to .o files.
oldobjs="$objs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}'$/d' -e "$lo2o" | $NL2SP`
fi
# Eliminate all temporary directories.
for path in $notinst_path; do
lib_search_path=`$echo "$lib_search_path " | ${SED} -e 's% $path % %g'`
deplibs=`$echo "$deplibs " | ${SED} -e 's% -L$path % %g'`
dependency_libs=`$echo "$dependency_libs " | ${SED} -e 's% -L$path % %g'`
done
if test -n "$xrpath"; then
# If the user specified any rpath flags, then add them.
temp_xrpath=
for libdir in $xrpath; do
temp_xrpath="$temp_xrpath -R$libdir"
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
esac
done
if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then
dependency_libs="$temp_xrpath $dependency_libs"
fi
fi
# Make sure dlfiles contains only unique files that won't be dlpreopened
old_dlfiles="$dlfiles"
dlfiles=
for lib in $old_dlfiles; do
case " $dlprefiles $dlfiles " in
*" $lib "*) ;;
*) dlfiles="$dlfiles $lib" ;;
esac
done
# Make sure dlprefiles contains only unique files
old_dlprefiles="$dlprefiles"
dlprefiles=
for lib in $old_dlprefiles; do
case "$dlprefiles " in
*" $lib "*) ;;
*) dlprefiles="$dlprefiles $lib" ;;
esac
done
if test "$build_libtool_libs" = yes; then
if test -n "$rpath"; then
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos*)
# these systems don't actually have a c library (as such)!
;;
*-*-rhapsody* | *-*-darwin1.[012])
# Rhapsody C library is in the System framework
deplibs="$deplibs -framework System"
;;
*-*-netbsd*)
# Don't link with libc until the a.out ld.so is fixed.
;;
*-*-openbsd* | *-*-freebsd*)
# Do not include libc due to us having libc/libc_r.
test "X$arg" = "X-lc" && continue
;;
*)
# Add libc to deplibs on all other systems if necessary.
if test "$build_libtool_need_lc" = "yes"; then
deplibs="$deplibs -lc"
fi
;;
esac
fi
# Transform deplibs into only deplibs that can be linked in shared.
name_save=$name
libname_save=$libname
release_save=$release
versuffix_save=$versuffix
major_save=$major
# I'm not sure if I'm treating the release correctly. I think
# release should show up in the -l (ie -lgmp5) so we don't want to
# add it in twice. Is that correct?
release=""
versuffix=""
major=""
newdeplibs=
droppeddeps=no
case $deplibs_check_method in
pass_all)
# Don't check for shared/static. Everything works.
# This might be a little naive. We might want to check
# whether the library exists or not. But this is on
# osf3 & osf4 and I'm not really sure... Just
# implementing what was already the behavior.
newdeplibs=$deplibs
;;
test_compile)
# This code stresses the "libraries are programs" paradigm to its
# limits. Maybe even breaks it. We compile a program, linking it
# against the deplibs as a proxy for the library. Then we can check
# whether they linked in statically or dynamically with ldd.
$rm conftest.c
cat > conftest.c <<EOF
int main() { return 0; }
EOF
$rm conftest
$LTCC -o conftest conftest.c $deplibs
if test "$?" -eq 0 ; then
ldd_output=`ldd conftest`
for i in $deplibs; do
name="`expr $i : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test "$name" != "" && test "$name" -ne "0"; then
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $i "*)
newdeplibs="$newdeplibs $i"
i=""
;;
esac
fi
if test -n "$i" ; then
libname=`eval \\$echo \"$libname_spec\"`
deplib_matches=`eval \\$echo \"$library_names_spec\"`
set dummy $deplib_matches
deplib_match=$2
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
newdeplibs="$newdeplibs $i"
else
droppeddeps=yes
$echo
$echo "*** Warning: dynamic linker does not accept needed library $i."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which I believe you do not have"
$echo "*** because a test_compile did reveal that the linker did not use it for"
$echo "*** its dynamic dependency list that programs get resolved with at runtime."
fi
fi
else
newdeplibs="$newdeplibs $i"
fi
done
else
# Error occurred in the first compile. Let's try to salvage
# the situation: Compile a separate program for each library.
for i in $deplibs; do
name="`expr $i : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test "$name" != "" && test "$name" != "0"; then
$rm conftest
$LTCC -o conftest conftest.c $i
# Did it work?
if test "$?" -eq 0 ; then
ldd_output=`ldd conftest`
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $i "*)
newdeplibs="$newdeplibs $i"
i=""
;;
esac
fi
if test -n "$i" ; then
libname=`eval \\$echo \"$libname_spec\"`
deplib_matches=`eval \\$echo \"$library_names_spec\"`
set dummy $deplib_matches
deplib_match=$2
if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
newdeplibs="$newdeplibs $i"
else
droppeddeps=yes
$echo
$echo "*** Warning: dynamic linker does not accept needed library $i."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have"
$echo "*** because a test_compile did reveal that the linker did not use this one"
$echo "*** as a dynamic dependency that programs can get resolved with at runtime."
fi
fi
else
droppeddeps=yes
$echo
$echo "*** Warning! Library $i is needed by this library but I was not able to"
$echo "*** make it link in! You will probably need to install it or some"
$echo "*** library that it depends on before this library will be fully"
$echo "*** functional. Installing it before continuing would be even better."
fi
else
newdeplibs="$newdeplibs $i"
fi
done
fi
;;
file_magic*)
set dummy $deplibs_check_method
file_magic_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
for a_deplib in $deplibs; do
name="`expr $a_deplib : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test "$name" != "" && test "$name" != "0"; then
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $a_deplib "*)
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
;;
esac
fi
if test -n "$a_deplib" ; then
libname=`eval \\$echo \"$libname_spec\"`
for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
# Follow soft links.
if ls -lLd "$potent_lib" 2>/dev/null \
| grep " -> " >/dev/null; then
continue
fi
# The statement above tries to avoid entering an
# endless loop below, in case of cyclic links.
# We might still enter an endless loop, since a link
# loop can be closed while we follow links,
# but so what?
potlib="$potent_lib"
while test -h "$potlib" 2>/dev/null; do
potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'`
case $potliblink in
[\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
*) potlib=`$echo "X$potlib" | $Xsed -e 's,[^/]*$,,'`"$potliblink";;
esac
done
if eval $file_magic_cmd \"\$potlib\" 2>/dev/null \
| ${SED} 10q \
| $EGREP "$file_magic_regex" > /dev/null; then
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
break 2
fi
done
done
fi
if test -n "$a_deplib" ; then
droppeddeps=yes
$echo
$echo "*** Warning: linker path does not have real file for library $a_deplib."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have"
$echo "*** because I did check the linker path looking for a file starting"
if test -z "$potlib" ; then
$echo "*** with $libname but no candidates were found. (...for file magic test)"
else
$echo "*** with $libname and none of the candidates passed a file format test"
$echo "*** using a file magic. Last file checked: $potlib"
fi
fi
else
# Add a -L argument.
newdeplibs="$newdeplibs $a_deplib"
fi
done # Gone through all deplibs.
;;
match_pattern*)
set dummy $deplibs_check_method
match_pattern_regex=`expr "$deplibs_check_method" : "$2 \(.*\)"`
for a_deplib in $deplibs; do
name="`expr $a_deplib : '-l\(.*\)'`"
# If $name is empty we are operating on a -L argument.
if test -n "$name" && test "$name" != "0"; then
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
case " $predeps $postdeps " in
*" $a_deplib "*)
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
;;
esac
fi
if test -n "$a_deplib" ; then
libname=`eval \\$echo \"$libname_spec\"`
for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
for potent_lib in $potential_libs; do
potlib="$potent_lib" # see symlink-check above in file_magic test
if eval $echo \"$potent_lib\" 2>/dev/null \
| ${SED} 10q \
| $EGREP "$match_pattern_regex" > /dev/null; then
newdeplibs="$newdeplibs $a_deplib"
a_deplib=""
break 2
fi
done
done
fi
if test -n "$a_deplib" ; then
droppeddeps=yes
$echo
$echo "*** Warning: linker path does not have real file for library $a_deplib."
$echo "*** I have the capability to make that library automatically link in when"
$echo "*** you link to this library. But I can only do this if you have a"
$echo "*** shared version of the library, which you do not appear to have"
$echo "*** because I did check the linker path looking for a file starting"
if test -z "$potlib" ; then
$echo "*** with $libname but no candidates were found. (...for regex pattern test)"
else
$echo "*** with $libname and none of the candidates passed a file format test"
$echo "*** using a regex pattern. Last file checked: $potlib"
fi
fi
else
# Add a -L argument.
newdeplibs="$newdeplibs $a_deplib"
fi
done # Gone through all deplibs.
;;
none | unknown | *)
newdeplibs=""
tmp_deplibs=`$echo "X $deplibs" | $Xsed -e 's/ -lc$//' \
-e 's/ -[LR][^ ]*//g'`
if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
for i in $predeps $postdeps ; do
# can't use Xsed below, because $i might contain '/'
tmp_deplibs=`$echo "X $tmp_deplibs" | ${SED} -e "1s,^X,," -e "s,$i,,"`
done
fi
if $echo "X $tmp_deplibs" | $Xsed -e 's/[ ]//g' \
| grep . >/dev/null; then
$echo
if test "X$deplibs_check_method" = "Xnone"; then
$echo "*** Warning: inter-library dependencies are not supported in this platform."
else
$echo "*** Warning: inter-library dependencies are not known to be supported."
fi
$echo "*** All declared inter-library dependencies are being dropped."
droppeddeps=yes
fi
;;
esac
versuffix=$versuffix_save
major=$major_save
release=$release_save
libname=$libname_save
name=$name_save
case $host in
*-*-rhapsody* | *-*-darwin1.[012])
# On Rhapsody replace the C library is the System framework
newdeplibs=`$echo "X $newdeplibs" | $Xsed -e 's/ -lc / -framework System /'`
;;
esac
if test "$droppeddeps" = yes; then
if test "$module" = yes; then
$echo
$echo "*** Warning: libtool could not satisfy all declared inter-library"
$echo "*** dependencies of module $libname. Therefore, libtool will create"
$echo "*** a static module, that should work as long as the dlopening"
$echo "*** application is linked with the -dlopen flag."
if test -z "$global_symbol_pipe"; then
$echo
$echo "*** However, this would only work if libtool was able to extract symbol"
$echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
$echo "*** not find such a program. So, this module is probably useless."
$echo "*** \`nm' from GNU binutils and a full rebuild may help."
fi
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
else
$echo "*** The inter-library dependencies that have been dropped here will be"
$echo "*** automatically added whenever a program is linked with this library"
$echo "*** or is declared to -dlopen it."
if test "$allow_undefined" = no; then
$echo
$echo "*** Since this library must not contain undefined symbols,"
$echo "*** because either the platform does not support them or"
$echo "*** it was explicitly requested with -no-undefined,"
$echo "*** libtool will only create a static version of it."
if test "$build_old_libs" = no; then
oldlibs="$output_objdir/$libname.$libext"
build_libtool_libs=module
build_old_libs=yes
else
build_libtool_libs=no
fi
fi
fi
fi
# Done checking deplibs!
deplibs=$newdeplibs
fi
# All the library-specific variables (install_libdir is set above).
library_names=
old_library=
dlname=
# Test again, we may have decided not to build it any more
if test "$build_libtool_libs" = yes; then
if test "$hardcode_into_libs" = yes; then
# Hardcode the library paths
hardcode_libdirs=
dep_rpath=
rpath="$finalize_rpath"
test "$mode" != relink && rpath="$compile_rpath$rpath"
for libdir in $rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
dep_rpath="$dep_rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
if test -n "$hardcode_libdir_flag_spec_ld"; then
eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\"
else
eval dep_rpath=\"$hardcode_libdir_flag_spec\"
fi
fi
if test -n "$runpath_var" && test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
rpath="$rpath$dir:"
done
eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
fi
test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
fi
shlibpath="$finalize_shlibpath"
test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
if test -n "$shlibpath"; then
eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
fi
# Get the real and link names of the library.
eval shared_ext=\"$shrext_cmds\"
eval library_names=\"$library_names_spec\"
set dummy $library_names
realname="$2"
shift; shift
if test -n "$soname_spec"; then
eval soname=\"$soname_spec\"
else
soname="$realname"
fi
if test -z "$dlname"; then
dlname=$soname
fi
lib="$output_objdir/$realname"
for link
do
linknames="$linknames $link"
done
# Use standard objects if they are pic
test -z "$pic_flag" && libobjs=`$echo "X$libobjs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
# Prepare the list of exported symbols
if test -z "$export_symbols"; then
if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then
$show "generating symbol list for \`$libname.la'"
export_symbols="$output_objdir/$libname.exp"
$run $rm $export_symbols
cmds=$export_symbols_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
if len=`expr "X$cmd" : ".*"` &&
test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then
$show "$cmd"
$run eval "$cmd" || exit $?
skipped_export=false
else
# The command line is too long to execute in one step.
$show "using reloadable object file for export list..."
skipped_export=:
fi
done
IFS="$save_ifs"
if test -n "$export_symbols_regex"; then
$show "$EGREP -e \"$export_symbols_regex\" \"$export_symbols\" > \"${export_symbols}T\""
$run eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
$show "$mv \"${export_symbols}T\" \"$export_symbols\""
$run eval '$mv "${export_symbols}T" "$export_symbols"'
fi
fi
fi
if test -n "$export_symbols" && test -n "$include_expsyms"; then
$run eval '$echo "X$include_expsyms" | $SP2NL >> "$export_symbols"'
fi
tmp_deplibs=
for test_deplib in $deplibs; do
case " $convenience " in
*" $test_deplib "*) ;;
*)
tmp_deplibs="$tmp_deplibs $test_deplib"
;;
esac
done
deplibs="$tmp_deplibs"
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
save_libobjs=$libobjs
eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
else
gentop="$output_objdir/${outputname}x"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "$mkdir $gentop"
$run $mkdir "$gentop"
status=$?
if test "$status" -ne 0 && test ! -d "$gentop"; then
exit $status
fi
generated="$generated $gentop"
for xlib in $convenience; do
# Extract the objects.
case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
xdir="$gentop/$xlib"
$show "${rm}r $xdir"
$run ${rm}r "$xdir"
$show "$mkdir $xdir"
$run $mkdir "$xdir"
status=$?
if test "$status" -ne 0 && test ! -d "$xdir"; then
exit $status
fi
# We will extract separately just the conflicting names and we will no
# longer touch any unique names. It is faster to leave these extract
# automatically by $AR in one run.
$show "(cd $xdir && $AR x $xabs)"
$run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
if ($AR t "$xabs" | sort | sort -uc >/dev/null 2>&1); then
:
else
$echo "$modename: warning: object name conflicts; renaming object files" 1>&2
$echo "$modename: warning: to ensure that they will not overwrite" 1>&2
$AR t "$xabs" | sort | uniq -cd | while read -r count name
do
i=1
while test "$i" -le "$count"
do
# Put our $i before any first dot (extension)
# Never overwrite any file
name_to="$name"
while test "X$name_to" = "X$name" || test -f "$xdir/$name_to"
do
name_to=`$echo "X$name_to" | $Xsed -e "s/\([^.]*\)/\1-$i/"`
done
$show "(cd $xdir && $AR xN $i $xabs '$name' && $mv '$name' '$name_to')"
$run eval "(cd \$xdir && $AR xN $i \$xabs '$name' && $mv '$name' '$name_to')" || exit $?
i=`expr $i + 1`
done
done
fi
libobjs="$libobjs "`find $xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP`
done
fi
fi
if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
eval flag=\"$thread_safe_flag_spec\"
linker_flags="$linker_flags $flag"
fi
# Make a backup of the uninstalled library when relinking
if test "$mode" = relink; then
$run eval '(cd $output_objdir && $rm ${realname}U && $mv $realname ${realname}U)' || exit $?
fi
# Do each of the archive commands.
if test "$module" = yes && test -n "$module_cmds" ; then
if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
eval test_cmds=\"$module_expsym_cmds\"
cmds=$module_expsym_cmds
else
eval test_cmds=\"$module_cmds\"
cmds=$module_cmds
fi
else
if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
eval test_cmds=\"$archive_expsym_cmds\"
cmds=$archive_expsym_cmds
else
eval test_cmds=\"$archive_cmds\"
cmds=$archive_cmds
fi
fi
if test "X$skipped_export" != "X:" && len=`expr "X$test_cmds" : ".*"` &&
test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then
:
else
# The command line is too long to link in one step, link piecewise.
$echo "creating reloadable object files..."
# Save the value of $output and $libobjs because we want to
# use them later. If we have whole_archive_flag_spec, we
# want to use save_libobjs as it was before
# whole_archive_flag_spec was expanded, because we can't
# assume the linker understands whole_archive_flag_spec.
# This may have to be revisited, in case too many
# convenience libraries get linked in and end up exceeding
# the spec.
if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then
save_libobjs=$libobjs
fi
save_output=$output
# Clear the reloadable object creation command queue and
# initialize k to one.
test_cmds=
concat_cmds=
objlist=
delfiles=
last_robj=
k=1
output=$output_objdir/$save_output-${k}.$objext
# Loop over the list of objects to be linked.
for obj in $save_libobjs
do
eval test_cmds=\"$reload_cmds $objlist $last_robj\"
if test "X$objlist" = X ||
{ len=`expr "X$test_cmds" : ".*"` &&
test "$len" -le "$max_cmd_len"; }; then
objlist="$objlist $obj"
else
# The command $test_cmds is almost too long, add a
# command to the queue.
if test "$k" -eq 1 ; then
# The first file doesn't have a previous command to add.
eval concat_cmds=\"$reload_cmds $objlist $last_robj\"
else
# All subsequent reloadable object files will link in
# the last one created.
eval concat_cmds=\"\$concat_cmds~$reload_cmds $objlist $last_robj\"
fi
last_robj=$output_objdir/$save_output-${k}.$objext
k=`expr $k + 1`
output=$output_objdir/$save_output-${k}.$objext
objlist=$obj
len=1
fi
done
# Handle the remaining objects by creating one last
# reloadable object file. All subsequent reloadable object
# files will link in the last one created.
test -z "$concat_cmds" || concat_cmds=$concat_cmds~
eval concat_cmds=\"\${concat_cmds}$reload_cmds $objlist $last_robj\"
if ${skipped_export-false}; then
$show "generating symbol list for \`$libname.la'"
export_symbols="$output_objdir/$libname.exp"
$run $rm $export_symbols
libobjs=$output
# Append the command to create the export file.
eval concat_cmds=\"\$concat_cmds~$export_symbols_cmds\"
fi
# Set up a command to remove the reloadale object files
# after they are used.
i=0
while test "$i" -lt "$k"
do
i=`expr $i + 1`
delfiles="$delfiles $output_objdir/$save_output-${i}.$objext"
done
$echo "creating a temporary reloadable object file: $output"
# Loop through the commands generated above and execute them.
save_ifs="$IFS"; IFS='~'
for cmd in $concat_cmds; do
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
libobjs=$output
# Restore the value of output.
output=$save_output
if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then
eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
fi
# Expand the library linking commands again to reset the
# value of $libobjs for piecewise linking.
# Do each of the archive commands.
if test "$module" = yes && test -n "$module_cmds" ; then
if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
cmds=$module_expsym_cmds
else
cmds=$module_cmds
fi
else
if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
cmds=$archive_expsym_cmds
else
cmds=$archive_cmds
fi
fi
# Append the command to remove the reloadable object files
# to the just-reset $cmds.
eval cmds=\"\$cmds~\$rm $delfiles\"
fi
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
# Restore the uninstalled library and exit
if test "$mode" = relink; then
$run eval '(cd $output_objdir && $rm ${realname}T && $mv $realname ${realname}T && $mv "$realname"U $realname)' || exit $?
exit $EXIT_SUCCESS
fi
# Create links to the real library.
for linkname in $linknames; do
if test "$realname" != "$linkname"; then
$show "(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)"
$run eval '(cd $output_objdir && $rm $linkname && $LN_S $realname $linkname)' || exit $?
fi
done
# If -module or -export-dynamic was specified, set the dlname.
if test "$module" = yes || test "$export_dynamic" = yes; then
# On all known operating systems, these are identical.
dlname="$soname"
fi
fi
;;
obj)
if test -n "$deplibs"; then
$echo "$modename: warning: \`-l' and \`-L' are ignored for objects" 1>&2
fi
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
$echo "$modename: warning: \`-dlopen' is ignored for objects" 1>&2
fi
if test -n "$rpath"; then
$echo "$modename: warning: \`-rpath' is ignored for objects" 1>&2
fi
if test -n "$xrpath"; then
$echo "$modename: warning: \`-R' is ignored for objects" 1>&2
fi
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for objects" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for objects" 1>&2
fi
case $output in
*.lo)
if test -n "$objs$old_deplibs"; then
$echo "$modename: cannot build library object \`$output' from non-libtool objects" 1>&2
exit $EXIT_FAILURE
fi
libobj="$output"
obj=`$echo "X$output" | $Xsed -e "$lo2o"`
;;
*)
libobj=
obj="$output"
;;
esac
# Delete the old objects.
$run $rm $obj $libobj
# Objects from convenience libraries. This assumes
# single-version convenience libraries. Whenever we create
# different ones for PIC/non-PIC, this we'll have to duplicate
# the extraction.
reload_conv_objs=
gentop=
# reload_cmds runs $LD directly, so let us get rid of
# -Wl from whole_archive_flag_spec
wl=
if test -n "$convenience"; then
if test -n "$whole_archive_flag_spec"; then
eval reload_conv_objs=\"\$reload_objs $whole_archive_flag_spec\"
else
gentop="$output_objdir/${obj}x"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "$mkdir $gentop"
$run $mkdir "$gentop"
status=$?
if test "$status" -ne 0 && test ! -d "$gentop"; then
exit $status
fi
generated="$generated $gentop"
for xlib in $convenience; do
# Extract the objects.
case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
xdir="$gentop/$xlib"
$show "${rm}r $xdir"
$run ${rm}r "$xdir"
$show "$mkdir $xdir"
$run $mkdir "$xdir"
status=$?
if test "$status" -ne 0 && test ! -d "$xdir"; then
exit $status
fi
# We will extract separately just the conflicting names and we will no
# longer touch any unique names. It is faster to leave these extract
# automatically by $AR in one run.
$show "(cd $xdir && $AR x $xabs)"
$run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
if ($AR t "$xabs" | sort | sort -uc >/dev/null 2>&1); then
:
else
$echo "$modename: warning: object name conflicts; renaming object files" 1>&2
$echo "$modename: warning: to ensure that they will not overwrite" 1>&2
$AR t "$xabs" | sort | uniq -cd | while read -r count name
do
i=1
while test "$i" -le "$count"
do
# Put our $i before any first dot (extension)
# Never overwrite any file
name_to="$name"
while test "X$name_to" = "X$name" || test -f "$xdir/$name_to"
do
name_to=`$echo "X$name_to" | $Xsed -e "s/\([^.]*\)/\1-$i/"`
done
$show "(cd $xdir && $AR xN $i $xabs '$name' && $mv '$name' '$name_to')"
$run eval "(cd \$xdir && $AR xN $i \$xabs '$name' && $mv '$name' '$name_to')" || exit $?
i=`expr $i + 1`
done
done
fi
reload_conv_objs="$reload_objs "`find $xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP`
done
fi
fi
# Create the old-style object.
reload_objs="$objs$old_deplibs "`$echo "X$libobjs" | $SP2NL | $Xsed -e '/\.'${libext}$'/d' -e '/\.lib$/d' -e "$lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
output="$obj"
cmds=$reload_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
# Exit if we aren't doing a library object file.
if test -z "$libobj"; then
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
exit $EXIT_SUCCESS
fi
if test "$build_libtool_libs" != yes; then
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
# Create an invalid libtool object if no PIC, so that we don't
# accidentally link it into a program.
# $show "echo timestamp > $libobj"
# $run eval "echo timestamp > $libobj" || exit $?
exit $EXIT_SUCCESS
fi
if test -n "$pic_flag" || test "$pic_mode" != default; then
# Only do commands if we really have different PIC objects.
reload_objs="$libobjs $reload_conv_objs"
output="$libobj"
cmds=$reload_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
if test -n "$gentop"; then
$show "${rm}r $gentop"
$run ${rm}r $gentop
fi
exit $EXIT_SUCCESS
;;
prog)
case $host in
*cygwin*) output=`$echo $output | ${SED} -e 's,.exe$,,;s,$,.exe,'` ;;
esac
if test -n "$vinfo"; then
$echo "$modename: warning: \`-version-info' is ignored for programs" 1>&2
fi
if test -n "$release"; then
$echo "$modename: warning: \`-release' is ignored for programs" 1>&2
fi
if test "$preload" = yes; then
if test "$dlopen_support" = unknown && test "$dlopen_self" = unknown &&
test "$dlopen_self_static" = unknown; then
$echo "$modename: warning: \`AC_LIBTOOL_DLOPEN' not used. Assuming no dlopen support."
fi
fi
case $host in
*-*-rhapsody* | *-*-darwin1.[012])
# On Rhapsody replace the C library is the System framework
compile_deplibs=`$echo "X $compile_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
finalize_deplibs=`$echo "X $finalize_deplibs" | $Xsed -e 's/ -lc / -framework System /'`
;;
esac
case $host in
*darwin*)
# Don't allow lazy linking, it breaks C++ global constructors
if test "$tagname" = CXX ; then
compile_command="$compile_command ${wl}-bind_at_load"
finalize_command="$finalize_command ${wl}-bind_at_load"
fi
;;
esac
compile_command="$compile_command $compile_deplibs"
finalize_command="$finalize_command $finalize_deplibs"
if test -n "$rpath$xrpath"; then
# If the user specified any rpath flags, then add them.
for libdir in $rpath $xrpath; do
# This is the magic to use -rpath.
case "$finalize_rpath " in
*" $libdir "*) ;;
*) finalize_rpath="$finalize_rpath $libdir" ;;
esac
done
fi
# Now hardcode the library paths
rpath=
hardcode_libdirs=
for libdir in $compile_rpath $finalize_rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
rpath="$rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$perm_rpath " in
*" $libdir "*) ;;
*) perm_rpath="$perm_rpath $libdir" ;;
esac
fi
case $host in
*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2*)
case :$dllsearchpath: in
*":$libdir:"*) ;;
*) dllsearchpath="$dllsearchpath:$libdir";;
esac
;;
esac
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval rpath=\" $hardcode_libdir_flag_spec\"
fi
compile_rpath="$rpath"
rpath=
hardcode_libdirs=
for libdir in $finalize_rpath; do
if test -n "$hardcode_libdir_flag_spec"; then
if test -n "$hardcode_libdir_separator"; then
if test -z "$hardcode_libdirs"; then
hardcode_libdirs="$libdir"
else
# Just accumulate the unique libdirs.
case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
*"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
;;
*)
hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir"
;;
esac
fi
else
eval flag=\"$hardcode_libdir_flag_spec\"
rpath="$rpath $flag"
fi
elif test -n "$runpath_var"; then
case "$finalize_perm_rpath " in
*" $libdir "*) ;;
*) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;;
esac
fi
done
# Substitute the hardcoded libdirs into the rpath.
if test -n "$hardcode_libdir_separator" &&
test -n "$hardcode_libdirs"; then
libdir="$hardcode_libdirs"
eval rpath=\" $hardcode_libdir_flag_spec\"
fi
finalize_rpath="$rpath"
if test -n "$libobjs" && test "$build_old_libs" = yes; then
# Transform all the library objects into standard objects.
compile_command=`$echo "X$compile_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
finalize_command=`$echo "X$finalize_command" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
fi
dlsyms=
if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
if test -n "$NM" && test -n "$global_symbol_pipe"; then
dlsyms="${outputname}S.c"
else
$echo "$modename: not configured to extract global symbols from dlpreopened files" 1>&2
fi
fi
if test -n "$dlsyms"; then
case $dlsyms in
"") ;;
*.c)
# Discover the nlist of each of the dlfiles.
nlist="$output_objdir/${outputname}.nm"
$show "$rm $nlist ${nlist}S ${nlist}T"
$run $rm "$nlist" "${nlist}S" "${nlist}T"
# Parse the name list into a source file.
$show "creating $output_objdir/$dlsyms"
test -z "$run" && $echo > "$output_objdir/$dlsyms" "\
/* $dlsyms - symbol resolution table for \`$outputname' dlsym emulation. */
/* Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP */
#ifdef __cplusplus
extern \"C\" {
#endif
/* Prevent the only kind of declaration conflicts we can make. */
#define lt_preloaded_symbols some_other_symbol
/* External symbol declarations for the compiler. */\
"
if test "$dlself" = yes; then
$show "generating symbol list for \`$output'"
test -z "$run" && $echo ': @PROGRAM@ ' > "$nlist"
# Add our own program objects to the symbol list.
progfiles=`$echo "X$objs$old_deplibs" | $SP2NL | $Xsed -e "$lo2o" | $NL2SP`
for arg in $progfiles; do
$show "extracting global C symbols from \`$arg'"
$run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
done
if test -n "$exclude_expsyms"; then
$run eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
$run eval '$mv "$nlist"T "$nlist"'
fi
if test -n "$export_symbols_regex"; then
$run eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T'
$run eval '$mv "$nlist"T "$nlist"'
fi
# Prepare the list of exported symbols
if test -z "$export_symbols"; then
export_symbols="$output_objdir/$output.exp"
$run $rm $export_symbols
$run eval "${SED} -n -e '/^: @PROGRAM@$/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
else
$run eval "${SED} -e 's/\([][.*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$output.exp"'
$run eval 'grep -f "$output_objdir/$output.exp" < "$nlist" > "$nlist"T'
$run eval 'mv "$nlist"T "$nlist"'
fi
fi
for arg in $dlprefiles; do
$show "extracting global C symbols from \`$arg'"
name=`$echo "$arg" | ${SED} -e 's%^.*/%%'`
$run eval '$echo ": $name " >> "$nlist"'
$run eval "$NM $arg | $global_symbol_pipe >> '$nlist'"
done
if test -z "$run"; then
# Make sure we have at least an empty file.
test -f "$nlist" || : > "$nlist"
if test -n "$exclude_expsyms"; then
$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
$mv "$nlist"T "$nlist"
fi
# Try sorting and uniquifying the output.
if grep -v "^: " < "$nlist" |
if sort -k 3 </dev/null >/dev/null 2>&1; then
sort -k 3
else
sort +2
fi |
uniq > "$nlist"S; then
:
else
grep -v "^: " < "$nlist" > "$nlist"S
fi
if test -f "$nlist"S; then
eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$dlsyms"'
else
$echo '/* NONE */' >> "$output_objdir/$dlsyms"
fi
$echo >> "$output_objdir/$dlsyms" "\
#undef lt_preloaded_symbols
#if defined (__STDC__) && __STDC__
# define lt_ptr void *
#else
# define lt_ptr char *
# define const
#endif
/* The mapping between symbol names and symbols. */
const struct {
const char *name;
lt_ptr address;
}
lt_preloaded_symbols[] =
{\
"
eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$dlsyms"
$echo >> "$output_objdir/$dlsyms" "\
{0, (lt_ptr) 0}
};
/* This works around a problem in FreeBSD linker */
#ifdef FREEBSD_WORKAROUND
static const void *lt_preloaded_setup() {
return lt_preloaded_symbols;
}
#endif
#ifdef __cplusplus
}
#endif\
"
fi
pic_flag_for_symtable=
case $host in
# compiling the symbol table file with pic_flag works around
# a FreeBSD bug that causes programs to crash when -lm is
# linked before any other PIC object. But we must not use
# pic_flag when linking with -static. The problem exists in
# FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
*-*-freebsd2*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
case "$compile_command " in
*" -static "*) ;;
*) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND";;
esac;;
*-*-hpux*)
case "$compile_command " in
*" -static "*) ;;
*) pic_flag_for_symtable=" $pic_flag";;
esac
esac
# Now compile the dynamic symbol file.
$show "(cd $output_objdir && $LTCC -c$no_builtin_flag$pic_flag_for_symtable \"$dlsyms\")"
$run eval '(cd $output_objdir && $LTCC -c$no_builtin_flag$pic_flag_for_symtable "$dlsyms")' || exit $?
# Clean up the generated files.
$show "$rm $output_objdir/$dlsyms $nlist ${nlist}S ${nlist}T"
$run $rm "$output_objdir/$dlsyms" "$nlist" "${nlist}S" "${nlist}T"
# Transform the symbol file into the correct name.
compile_command=`$echo "X$compile_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
finalize_command=`$echo "X$finalize_command" | $Xsed -e "s%@SYMFILE@%$output_objdir/${outputname}S.${objext}%"`
;;
*)
$echo "$modename: unknown suffix for \`$dlsyms'" 1>&2
exit $EXIT_FAILURE
;;
esac
else
# We keep going just in case the user didn't refer to
# lt_preloaded_symbols. The linker will fail if global_symbol_pipe
# really was required.
# Nullify the symbol file.
compile_command=`$echo "X$compile_command" | $Xsed -e "s% @SYMFILE@%%"`
finalize_command=`$echo "X$finalize_command" | $Xsed -e "s% @SYMFILE@%%"`
fi
if test "$need_relink" = no || test "$build_libtool_libs" != yes; then
# Replace the output file specification.
compile_command=`$echo "X$compile_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
link_command="$compile_command$compile_rpath"
# We have no uninstalled library dependencies, so finalize right now.
$show "$link_command"
$run eval "$link_command"
status=$?
# Delete the generated files.
if test -n "$dlsyms"; then
$show "$rm $output_objdir/${outputname}S.${objext}"
$run $rm "$output_objdir/${outputname}S.${objext}"
fi
exit $status
fi
if test -n "$shlibpath_var"; then
# We should set the shlibpath_var
rpath=
for dir in $temp_rpath; do
case $dir in
[\\/]* | [A-Za-z]:[\\/]*)
# Absolute path.
rpath="$rpath$dir:"
;;
*)
# Relative path: add a thisdir entry.
rpath="$rpath\$thisdir/$dir:"
;;
esac
done
temp_rpath="$rpath"
fi
if test -n "$compile_shlibpath$finalize_shlibpath"; then
compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
fi
if test -n "$finalize_shlibpath"; then
finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
fi
compile_var=
finalize_var=
if test -n "$runpath_var"; then
if test -n "$perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $perm_rpath; do
rpath="$rpath$dir:"
done
compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
if test -n "$finalize_perm_rpath"; then
# We should set the runpath_var.
rpath=
for dir in $finalize_perm_rpath; do
rpath="$rpath$dir:"
done
finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
fi
fi
if test "$no_install" = yes; then
# We don't need to create a wrapper script.
link_command="$compile_var$compile_command$compile_rpath"
# Replace the output file specification.
link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output"'%g'`
# Delete the old output file.
$run $rm $output
# Link the executable and exit
$show "$link_command"
$run eval "$link_command" || exit $?
exit $EXIT_SUCCESS
fi
if test "$hardcode_action" = relink; then
# Fast installation is not supported
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
$echo "$modename: warning: this platform does not like uninstalled shared libraries" 1>&2
$echo "$modename: \`$output' will be relinked during installation" 1>&2
else
if test "$fast_install" != no; then
link_command="$finalize_var$compile_command$finalize_rpath"
if test "$fast_install" = yes; then
relink_command=`$echo "X$compile_var$compile_command$compile_rpath" | $Xsed -e 's%@OUTPUT@%\$progdir/\$file%g'`
else
# fast_install is set to needless
relink_command=
fi
else
link_command="$compile_var$compile_command$compile_rpath"
relink_command="$finalize_var$finalize_command$finalize_rpath"
fi
fi
# Replace the output file specification.
link_command=`$echo "X$link_command" | $Xsed -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
# Delete the old output files.
$run $rm $output $output_objdir/$outputname $output_objdir/lt-$outputname
$show "$link_command"
$run eval "$link_command" || exit $?
# Now create the wrapper script.
$show "creating $output"
# Quote the relink command for shipping.
if test -n "$relink_command"; then
# Preserve any variables that may affect compiler behavior
for var in $variables_saved_for_relink; do
if eval test -z \"\${$var+set}\"; then
relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
elif eval var_value=\$$var; test -z "$var_value"; then
relink_command="$var=; export $var; $relink_command"
else
var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
relink_command="$var=\"$var_value\"; export $var; $relink_command"
fi
done
relink_command="(cd `pwd`; $relink_command)"
relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
fi
# Quote $echo for shipping.
if test "X$echo" = "X$SHELL $progpath --fallback-echo"; then
case $progpath in
[\\/]* | [A-Za-z]:[\\/]*) qecho="$SHELL $progpath --fallback-echo";;
*) qecho="$SHELL `pwd`/$progpath --fallback-echo";;
esac
qecho=`$echo "X$qecho" | $Xsed -e "$sed_quote_subst"`
else
qecho=`$echo "X$echo" | $Xsed -e "$sed_quote_subst"`
fi
# Only actually do things if our run command is non-null.
if test -z "$run"; then
# win32 will think the script is a binary if it has
# a .exe suffix, so we strip it off here.
case $output in
*.exe) output=`$echo $output|${SED} 's,.exe$,,'` ;;
esac
# test for cygwin because mv fails w/o .exe extensions
case $host in
*cygwin*)
exeext=.exe
outputname=`$echo $outputname|${SED} 's,.exe$,,'` ;;
*) exeext= ;;
esac
case $host in
*cygwin* | *mingw* )
cwrappersource=`$echo ${objdir}/lt-${output}.c`
cwrapper=`$echo ${output}.exe`
$rm $cwrappersource $cwrapper
trap "$rm $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15
cat > $cwrappersource <<EOF
/* $cwrappersource - temporary wrapper executable for $objdir/$outputname
Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
The $output program cannot be directly executed until all the libtool
libraries that it depends on are installed.
This wrapper executable should never be moved out of the build directory.
If it is, it will not operate correctly.
Currently, it simply execs the wrapper *script* "/bin/sh $output",
but could eventually absorb all of the scripts functionality and
exec $objdir/$outputname directly.
*/
EOF
cat >> $cwrappersource<<"EOF"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <malloc.h>
#include <stdarg.h>
#include <assert.h>
#if defined(PATH_MAX)
# define LT_PATHMAX PATH_MAX
#elif defined(MAXPATHLEN)
# define LT_PATHMAX MAXPATHLEN
#else
# define LT_PATHMAX 1024
#endif
#ifndef DIR_SEPARATOR
#define DIR_SEPARATOR '/'
#endif
#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \
defined (__OS2__)
#define HAVE_DOS_BASED_FILE_SYSTEM
#ifndef DIR_SEPARATOR_2
#define DIR_SEPARATOR_2 '\\'
#endif
#endif
#ifndef DIR_SEPARATOR_2
# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR)
#else /* DIR_SEPARATOR_2 */
# define IS_DIR_SEPARATOR(ch) \
(((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
#endif /* DIR_SEPARATOR_2 */
#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type)))
#define XFREE(stale) do { \
if (stale) { free ((void *) stale); stale = 0; } \
} while (0)
const char *program_name = NULL;
void * xmalloc (size_t num);
char * xstrdup (const char *string);
char * basename (const char *name);
char * fnqualify(const char *path);
char * strendzap(char *str, const char *pat);
void lt_fatal (const char *message, ...);
int
main (int argc, char *argv[])
{
char **newargz;
int i;
program_name = (char *) xstrdup ((char *) basename (argv[0]));
newargz = XMALLOC(char *, argc+2);
EOF
cat >> $cwrappersource <<EOF
newargz[0] = "$SHELL";
EOF
cat >> $cwrappersource <<"EOF"
newargz[1] = fnqualify(argv[0]);
/* we know the script has the same name, without the .exe */
/* so make sure newargz[1] doesn't end in .exe */
strendzap(newargz[1],".exe");
for (i = 1; i < argc; i++)
newargz[i+1] = xstrdup(argv[i]);
newargz[argc+1] = NULL;
EOF
cat >> $cwrappersource <<EOF
execv("$SHELL",newargz);
EOF
cat >> $cwrappersource <<"EOF"
}
void *
xmalloc (size_t num)
{
void * p = (void *) malloc (num);
if (!p)
lt_fatal ("Memory exhausted");
return p;
}
char *
xstrdup (const char *string)
{
return string ? strcpy ((char *) xmalloc (strlen (string) + 1), string) : NULL
;
}
char *
basename (const char *name)
{
const char *base;
#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
/* Skip over the disk name in MSDOS pathnames. */
if (isalpha (name[0]) && name[1] == ':')
name += 2;
#endif
for (base = name; *name; name++)
if (IS_DIR_SEPARATOR (*name))
base = name + 1;
return (char *) base;
}
char *
fnqualify(const char *path)
{
size_t size;
char *p;
char tmp[LT_PATHMAX + 1];
assert(path != NULL);
/* Is it qualified already? */
#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
if (isalpha (path[0]) && path[1] == ':')
return xstrdup (path);
#endif
if (IS_DIR_SEPARATOR (path[0]))
return xstrdup (path);
/* prepend the current directory */
/* doesn't handle '~' */
if (getcwd (tmp, LT_PATHMAX) == NULL)
lt_fatal ("getcwd failed");
size = strlen(tmp) + 1 + strlen(path) + 1; /* +2 for '/' and '\0' */
p = XMALLOC(char, size);
sprintf(p, "%s%c%s", tmp, DIR_SEPARATOR, path);
return p;
}
char *
strendzap(char *str, const char *pat)
{
size_t len, patlen;
assert(str != NULL);
assert(pat != NULL);
len = strlen(str);
patlen = strlen(pat);
if (patlen <= len)
{
str += len - patlen;
if (strcmp(str, pat) == 0)
*str = '\0';
}
return str;
}
static void
lt_error_core (int exit_status, const char * mode,
const char * message, va_list ap)
{
fprintf (stderr, "%s: %s: ", program_name, mode);
vfprintf (stderr, message, ap);
fprintf (stderr, ".\n");
if (exit_status >= 0)
exit (exit_status);
}
void
lt_fatal (const char *message, ...)
{
va_list ap;
va_start (ap, message);
lt_error_core (EXIT_FAILURE, "FATAL", message, ap);
va_end (ap);
}
EOF
# we should really use a build-platform specific compiler
# here, but OTOH, the wrappers (shell script and this C one)
# are only useful if you want to execute the "real" binary.
# Since the "real" binary is built for $host, then this
# wrapper might as well be built for $host, too.
$run $LTCC -s -o $cwrapper $cwrappersource
;;
esac
$rm $output
trap "$rm $output; exit $EXIT_FAILURE" 1 2 15
$echo > $output "\
#! $SHELL
# $output - temporary wrapper script for $objdir/$outputname
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# The $output program cannot be directly executed until all the libtool
# libraries that it depends on are installed.
#
# This wrapper script should never be moved out of the build directory.
# If it is, it will not operate correctly.
# Sed substitution that helps us do robust quoting. It backslashifies
# metacharacters that are still active within double-quoted strings.
Xsed='${SED} -e 1s/^X//'
sed_quote_subst='$sed_quote_subst'
# The HP-UX ksh and POSIX shell print the target directory to stdout
# if CDPATH is set.
if test \"\${CDPATH+set}\" = set; then CDPATH=:; export CDPATH; fi
relink_command=\"$relink_command\"
# This environment variable determines our operation mode.
if test \"\$libtool_install_magic\" = \"$magic\"; then
# install mode needs the following variable:
notinst_deplibs='$notinst_deplibs'
else
# When we are sourced in execute mode, \$file and \$echo are already set.
if test \"\$libtool_execute_magic\" != \"$magic\"; then
echo=\"$qecho\"
file=\"\$0\"
# Make sure echo works.
if test \"X\$1\" = X--no-reexec; then
# Discard the --no-reexec flag, and continue.
shift
elif test \"X\`(\$echo '\t') 2>/dev/null\`\" = 'X\t'; then
# Yippee, \$echo works!
:
else
# Restart under the correct shell, and then maybe \$echo will work.
exec $SHELL \"\$0\" --no-reexec \${1+\"\$@\"}
fi
fi\
"
$echo >> $output "\
# Find the directory that this script lives in.
thisdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*$%%'\`
test \"x\$thisdir\" = \"x\$file\" && thisdir=.
# Follow symbolic links until we get to the real thisdir.
file=\`ls -ld \"\$file\" | ${SED} -n 's/.*-> //p'\`
while test -n \"\$file\"; do
destdir=\`\$echo \"X\$file\" | \$Xsed -e 's%/[^/]*\$%%'\`
# If there was a directory component, then change thisdir.
if test \"x\$destdir\" != \"x\$file\"; then
case \"\$destdir\" in
[\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
*) thisdir=\"\$thisdir/\$destdir\" ;;
esac
fi
file=\`\$echo \"X\$file\" | \$Xsed -e 's%^.*/%%'\`
file=\`ls -ld \"\$thisdir/\$file\" | ${SED} -n 's/.*-> //p'\`
done
# Try to get the absolute directory name.
absdir=\`cd \"\$thisdir\" && pwd\`
test -n \"\$absdir\" && thisdir=\"\$absdir\"
"
if test "$fast_install" = yes; then
$echo >> $output "\
program=lt-'$outputname'$exeext
progdir=\"\$thisdir/$objdir\"
if test ! -f \"\$progdir/\$program\" || \\
{ file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\
test \"X\$file\" != \"X\$progdir/\$program\"; }; then
file=\"\$\$-\$program\"
if test ! -d \"\$progdir\"; then
$mkdir \"\$progdir\"
else
$rm \"\$progdir/\$file\"
fi"
$echo >> $output "\
# relink executable if necessary
if test -n \"\$relink_command\"; then
if relink_command_output=\`eval \$relink_command 2>&1\`; then :
else
$echo \"\$relink_command_output\" >&2
$rm \"\$progdir/\$file\"
exit $EXIT_FAILURE
fi
fi
$mv \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
{ $rm \"\$progdir/\$program\";
$mv \"\$progdir/\$file\" \"\$progdir/\$program\"; }
$rm \"\$progdir/\$file\"
fi"
else
$echo >> $output "\
program='$outputname'
progdir=\"\$thisdir/$objdir\"
"
fi
$echo >> $output "\
if test -f \"\$progdir/\$program\"; then"
# Export our shlibpath_var if we have one.
if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
$echo >> $output "\
# Add our own library path to $shlibpath_var
$shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
# Some systems cannot cope with colon-terminated $shlibpath_var
# The second colon is a workaround for a bug in BeOS R4 sed
$shlibpath_var=\`\$echo \"X\$$shlibpath_var\" | \$Xsed -e 's/::*\$//'\`
export $shlibpath_var
"
fi
# fixup the dll searchpath if we need to.
if test -n "$dllsearchpath"; then
$echo >> $output "\
# Add the dll search path components to the executable PATH
PATH=$dllsearchpath:\$PATH
"
fi
$echo >> $output "\
if test \"\$libtool_execute_magic\" != \"$magic\"; then
# Run the actual program with our arguments.
"
case $host in
# Backslashes separate directories on plain windows
*-*-mingw | *-*-os2*)
$echo >> $output "\
exec \$progdir\\\\\$program \${1+\"\$@\"}
"
;;
*)
$echo >> $output "\
exec \$progdir/\$program \${1+\"\$@\"}
"
;;
esac
$echo >> $output "\
\$echo \"\$0: cannot exec \$program \${1+\"\$@\"}\"
exit $EXIT_FAILURE
fi
else
# The program doesn't exist.
\$echo \"\$0: error: \$progdir/\$program does not exist\" 1>&2
\$echo \"This script is just a wrapper for \$program.\" 1>&2
$echo \"See the $PACKAGE documentation for more information.\" 1>&2
exit $EXIT_FAILURE
fi
fi\
"
chmod +x $output
fi
exit $EXIT_SUCCESS
;;
esac
# See if we need to build an old-fashioned archive.
for oldlib in $oldlibs; do
if test "$build_libtool_libs" = convenience; then
oldobjs="$libobjs_save"
addlibs="$convenience"
build_libtool_libs=no
else
if test "$build_libtool_libs" = module; then
oldobjs="$libobjs_save"
build_libtool_libs=no
else
oldobjs="$old_deplibs $non_pic_objects"
fi
addlibs="$old_convenience"
fi
if test -n "$addlibs"; then
gentop="$output_objdir/${outputname}x"
$show "${rm}r $gentop"
$run ${rm}r "$gentop"
$show "$mkdir $gentop"
$run $mkdir "$gentop"
status=$?
if test "$status" -ne 0 && test ! -d "$gentop"; then
exit $status
fi
generated="$generated $gentop"
# Add in members from convenience archives.
for xlib in $addlibs; do
# Extract the objects.
case $xlib in
[\\/]* | [A-Za-z]:[\\/]*) xabs="$xlib" ;;
*) xabs=`pwd`"/$xlib" ;;
esac
xlib=`$echo "X$xlib" | $Xsed -e 's%^.*/%%'`
xdir="$gentop/$xlib"
$show "${rm}r $xdir"
$run ${rm}r "$xdir"
$show "$mkdir $xdir"
$run $mkdir "$xdir"
status=$?
if test "$status" -ne 0 && test ! -d "$xdir"; then
exit $status
fi
# We will extract separately just the conflicting names and we will no
# longer touch any unique names. It is faster to leave these extract
# automatically by $AR in one run.
$show "(cd $xdir && $AR x $xabs)"
$run eval "(cd \$xdir && $AR x \$xabs)" || exit $?
if ($AR t "$xabs" | sort | sort -uc >/dev/null 2>&1); then
:
else
$echo "$modename: warning: object name conflicts; renaming object files" 1>&2
$echo "$modename: warning: to ensure that they will not overwrite" 1>&2
$AR t "$xabs" | sort | uniq -cd | while read -r count name
do
i=1
while test "$i" -le "$count"
do
# Put our $i before any first dot (extension)
# Never overwrite any file
name_to="$name"
while test "X$name_to" = "X$name" || test -f "$xdir/$name_to"
do
name_to=`$echo "X$name_to" | $Xsed -e "s/\([^.]*\)/\1-$i/"`
done
$show "(cd $xdir && $AR xN $i $xabs '$name' && $mv '$name' '$name_to')"
$run eval "(cd \$xdir && $AR xN $i \$xabs '$name' && $mv '$name' '$name_to')" || exit $?
i=`expr $i + 1`
done
done
fi
oldobjs="$oldobjs "`find $xdir -name \*.${objext} -print -o -name \*.lo -print | $NL2SP`
done
fi
# Do each command in the archive commands.
if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
cmds=$old_archive_from_new_cmds
else
eval cmds=\"$old_archive_cmds\"
if len=`expr "X$cmds" : ".*"` &&
test "$len" -le "$max_cmd_len" || test "$max_cmd_len" -le -1; then
cmds=$old_archive_cmds
else
# the command line is too long to link in one step, link in parts
$echo "using piecewise archive linking..."
save_RANLIB=$RANLIB
RANLIB=:
objlist=
concat_cmds=
save_oldobjs=$oldobjs
# GNU ar 2.10+ was changed to match POSIX; thus no paths are
# encoded into archives. This makes 'ar r' malfunction in
# this piecewise linking case whenever conflicting object
# names appear in distinct ar calls; check, warn and compensate.
if (for obj in $save_oldobjs
do
$echo "X$obj" | $Xsed -e 's%^.*/%%'
done | sort | sort -uc >/dev/null 2>&1); then
:
else
$echo "$modename: warning: object name conflicts; overriding AR_FLAGS to 'cq'" 1>&2
$echo "$modename: warning: to ensure that POSIX-compatible ar will work" 1>&2
AR_FLAGS=cq
fi
# Is there a better way of finding the last object in the list?
for obj in $save_oldobjs
do
last_oldobj=$obj
done
for obj in $save_oldobjs
do
oldobjs="$objlist $obj"
objlist="$objlist $obj"
eval test_cmds=\"$old_archive_cmds\"
if len=`expr "X$test_cmds" : ".*"` &&
test "$len" -le "$max_cmd_len"; then
:
else
# the above command should be used before it gets too long
oldobjs=$objlist
if test "$obj" = "$last_oldobj" ; then
RANLIB=$save_RANLIB
fi
test -z "$concat_cmds" || concat_cmds=$concat_cmds~
eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\"
objlist=
fi
done
RANLIB=$save_RANLIB
oldobjs=$objlist
if test "X$oldobjs" = "X" ; then
eval cmds=\"\$concat_cmds\"
else
eval cmds=\"\$concat_cmds~\$old_archive_cmds\"
fi
fi
fi
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
eval cmd=\"$cmd\"
IFS="$save_ifs"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
done
if test -n "$generated"; then
$show "${rm}r$generated"
$run ${rm}r$generated
fi
# Now create the libtool archive.
case $output in
*.la)
old_library=
test "$build_old_libs" = yes && old_library="$libname.$libext"
$show "creating $output"
# Preserve any variables that may affect compiler behavior
for var in $variables_saved_for_relink; do
if eval test -z \"\${$var+set}\"; then
relink_command="{ test -z \"\${$var+set}\" || unset $var || { $var=; export $var; }; }; $relink_command"
elif eval var_value=\$$var; test -z "$var_value"; then
relink_command="$var=; export $var; $relink_command"
else
var_value=`$echo "X$var_value" | $Xsed -e "$sed_quote_subst"`
relink_command="$var=\"$var_value\"; export $var; $relink_command"
fi
done
# Quote the link command for shipping.
relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)"
relink_command=`$echo "X$relink_command" | $Xsed -e "$sed_quote_subst"`
if test "$hardcode_automatic" = yes ; then
relink_command=
fi
# Only create the output if not a dry run.
if test -z "$run"; then
for installed in no yes; do
if test "$installed" = yes; then
if test -z "$install_libdir"; then
break
fi
output="$output_objdir/$outputname"i
# Replace all uninstalled libtool libraries with the installed ones
newdependency_libs=
for deplib in $dependency_libs; do
case $deplib in
*.la)
name=`$echo "X$deplib" | $Xsed -e 's%^.*/%%'`
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
if test -z "$libdir"; then
$echo "$modename: \`$deplib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
newdependency_libs="$newdependency_libs $libdir/$name"
;;
*) newdependency_libs="$newdependency_libs $deplib" ;;
esac
done
dependency_libs="$newdependency_libs"
newdlfiles=
for lib in $dlfiles; do
name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
if test -z "$libdir"; then
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
newdlfiles="$newdlfiles $libdir/$name"
done
dlfiles="$newdlfiles"
newdlprefiles=
for lib in $dlprefiles; do
name=`$echo "X$lib" | $Xsed -e 's%^.*/%%'`
eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
if test -z "$libdir"; then
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
exit $EXIT_FAILURE
fi
newdlprefiles="$newdlprefiles $libdir/$name"
done
dlprefiles="$newdlprefiles"
else
newdlfiles=
for lib in $dlfiles; do
case $lib in
[\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
*) abs=`pwd`"/$lib" ;;
esac
newdlfiles="$newdlfiles $abs"
done
dlfiles="$newdlfiles"
newdlprefiles=
for lib in $dlprefiles; do
case $lib in
[\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
*) abs=`pwd`"/$lib" ;;
esac
newdlprefiles="$newdlprefiles $abs"
done
dlprefiles="$newdlprefiles"
fi
$rm $output
# place dlname in correct position for cygwin
tdlname=$dlname
case $host,$output,$installed,$module,$dlname in
*cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll) tdlname=../bin/$dlname ;;
esac
$echo > $output "\
# $outputname - a libtool library file
# Generated by $PROGRAM - GNU $PACKAGE $VERSION$TIMESTAMP
#
# Please DO NOT delete this file!
# It is necessary for linking the library.
# The name that we can dlopen(3).
dlname='$tdlname'
# Names of this library.
library_names='$library_names'
# The name of the static archive.
old_library='$old_library'
# Libraries that this one depends upon.
dependency_libs='$dependency_libs'
# Version information for $libname.
current=$current
age=$age
revision=$revision
# Is this an already installed library?
installed=$installed
# Should we warn about portability when linking against -modules?
shouldnotlink=$module
# Files to dlopen/dlpreopen
dlopen='$dlfiles'
dlpreopen='$dlprefiles'
# Directory that this library needs to be installed in:
libdir='$install_libdir'"
if test "$installed" = no && test "$need_relink" = yes; then
$echo >> $output "\
relink_command=\"$relink_command\""
fi
done
fi
# Do a symbolic link so that the libtool archive can be found in
# LD_LIBRARY_PATH before the program is installed.
$show "(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)"
$run eval '(cd $output_objdir && $rm $outputname && $LN_S ../$outputname $outputname)' || exit $?
;;
esac
exit $EXIT_SUCCESS
;;
# libtool install mode
install)
modename="$modename: install"
# There may be an optional sh(1) argument at the beginning of
# install_prog (especially on Windows NT).
if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
# Allow the use of GNU shtool's install command.
$echo "X$nonopt" | $Xsed | grep shtool > /dev/null; then
# Aesthetically quote it.
arg=`$echo "X$nonopt" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
esac
install_prog="$arg "
arg="$1"
shift
else
install_prog=
arg="$nonopt"
fi
# The real first argument should be the name of the installation program.
# Aesthetically quote it.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
esac
install_prog="$install_prog$arg"
# We need to accept at least all the BSD install flags.
dest=
files=
opts=
prev=
install_type=
isdir=no
stripme=
for arg
do
if test -n "$dest"; then
files="$files $dest"
dest="$arg"
continue
fi
case $arg in
-d) isdir=yes ;;
-f) prev="-f" ;;
-g) prev="-g" ;;
-m) prev="-m" ;;
-o) prev="-o" ;;
-s)
stripme=" -s"
continue
;;
-*) ;;
*)
# If the previous option needed an argument, then skip it.
if test -n "$prev"; then
prev=
else
dest="$arg"
continue
fi
;;
esac
# Aesthetically quote the argument.
arg=`$echo "X$arg" | $Xsed -e "$sed_quote_subst"`
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*)
arg="\"$arg\""
;;
esac
install_prog="$install_prog $arg"
done
if test -z "$install_prog"; then
$echo "$modename: you must specify an install program" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
if test -n "$prev"; then
$echo "$modename: the \`$prev' option requires an argument" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
if test -z "$files"; then
if test -z "$dest"; then
$echo "$modename: no file or destination specified" 1>&2
else
$echo "$modename: you must specify a destination" 1>&2
fi
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# Strip any trailing slash from the destination.
dest=`$echo "X$dest" | $Xsed -e 's%/$%%'`
# Check to see that the destination is a directory.
test -d "$dest" && isdir=yes
if test "$isdir" = yes; then
destdir="$dest"
destname=
else
destdir=`$echo "X$dest" | $Xsed -e 's%/[^/]*$%%'`
test "X$destdir" = "X$dest" && destdir=.
destname=`$echo "X$dest" | $Xsed -e 's%^.*/%%'`
# Not a directory, so check to see that there is only one file specified.
set dummy $files
if test "$#" -gt 2; then
$echo "$modename: \`$dest' is not a directory" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
fi
case $destdir in
[\\/]* | [A-Za-z]:[\\/]*) ;;
*)
for file in $files; do
case $file in
*.lo) ;;
*)
$echo "$modename: \`$destdir' must be an absolute directory name" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
esac
done
;;
esac
# This variable tells wrapper scripts just to set variables rather
# than running their programs.
libtool_install_magic="$magic"
staticlibs=
future_libdirs=
current_libdirs=
for file in $files; do
# Do each installation.
case $file in
*.$libext)
# Do the static libraries later.
staticlibs="$staticlibs $file"
;;
*.la)
# Check to see that this really is a libtool archive.
if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$file' is not a valid libtool archive" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
library_names=
old_library=
relink_command=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Add the libdir to current_libdirs if it is the destination.
if test "X$destdir" = "X$libdir"; then
case "$current_libdirs " in
*" $libdir "*) ;;
*) current_libdirs="$current_libdirs $libdir" ;;
esac
else
# Note the libdir as a future libdir.
case "$future_libdirs " in
*" $libdir "*) ;;
*) future_libdirs="$future_libdirs $libdir" ;;
esac
fi
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`/
test "X$dir" = "X$file/" && dir=
dir="$dir$objdir"
if test -n "$relink_command"; then
# Determine the prefix the user has applied to our future dir.
inst_prefix_dir=`$echo "$destdir" | $SED "s%$libdir\$%%"`
# Don't allow the user to place us outside of our expected
# location b/c this prevents finding dependent libraries that
# are installed to the same prefix.
# At present, this check doesn't affect windows .dll's that
# are installed into $libdir/../bin (currently, that works fine)
# but it's something to keep an eye on.
if test "$inst_prefix_dir" = "$destdir"; then
$echo "$modename: error: cannot install \`$file' to a directory not ending in $libdir" 1>&2
exit $EXIT_FAILURE
fi
if test -n "$inst_prefix_dir"; then
# Stick the inst_prefix_dir data into the link command.
relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"`
else
relink_command=`$echo "$relink_command" | $SED "s%@inst_prefix_dir@%%"`
fi
$echo "$modename: warning: relinking \`$file'" 1>&2
$show "$relink_command"
if $run eval "$relink_command"; then :
else
$echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
exit $EXIT_FAILURE
fi
fi
# See the names of the shared library.
set dummy $library_names
if test -n "$2"; then
realname="$2"
shift
shift
srcname="$realname"
test -n "$relink_command" && srcname="$realname"T
# Install the shared library and build the symlinks.
$show "$install_prog $dir/$srcname $destdir/$realname"
$run eval "$install_prog $dir/$srcname $destdir/$realname" || exit $?
if test -n "$stripme" && test -n "$striplib"; then
$show "$striplib $destdir/$realname"
$run eval "$striplib $destdir/$realname" || exit $?
fi
if test "$#" -gt 0; then
# Delete the old symlinks, and create new ones.
for linkname
do
if test "$linkname" != "$realname"; then
$show "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
$run eval "(cd $destdir && $rm $linkname && $LN_S $realname $linkname)"
fi
done
fi
# Do each command in the postinstall commands.
lib="$destdir/$realname"
cmds=$postinstall_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
fi
# Install the pseudo-library for information purposes.
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
instname="$dir/$name"i
$show "$install_prog $instname $destdir/$name"
$run eval "$install_prog $instname $destdir/$name" || exit $?
# Maybe install the static library, too.
test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library"
;;
*.lo)
# Install (i.e. copy) a libtool object.
# Figure out destination file name, if it wasn't already specified.
if test -n "$destname"; then
destfile="$destdir/$destname"
else
destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
destfile="$destdir/$destfile"
fi
# Deduce the name of the destination old-style object file.
case $destfile in
*.lo)
staticdest=`$echo "X$destfile" | $Xsed -e "$lo2o"`
;;
*.$objext)
staticdest="$destfile"
destfile=
;;
*)
$echo "$modename: cannot copy a libtool object to \`$destfile'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
esac
# Install the libtool object if requested.
if test -n "$destfile"; then
$show "$install_prog $file $destfile"
$run eval "$install_prog $file $destfile" || exit $?
fi
# Install the old object if enabled.
if test "$build_old_libs" = yes; then
# Deduce the name of the old-style object file.
staticobj=`$echo "X$file" | $Xsed -e "$lo2o"`
$show "$install_prog $staticobj $staticdest"
$run eval "$install_prog \$staticobj \$staticdest" || exit $?
fi
exit $EXIT_SUCCESS
;;
*)
# Figure out destination file name, if it wasn't already specified.
if test -n "$destname"; then
destfile="$destdir/$destname"
else
destfile=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
destfile="$destdir/$destfile"
fi
# If the file is missing, and there is a .exe on the end, strip it
# because it is most likely a libtool script we actually want to
# install
stripped_ext=""
case $file in
*.exe)
if test ! -f "$file"; then
file=`$echo $file|${SED} 's,.exe$,,'`
stripped_ext=".exe"
fi
;;
esac
# Do a test to see if this is really a libtool program.
case $host in
*cygwin*|*mingw*)
wrapper=`$echo $file | ${SED} -e 's,.exe$,,'`
;;
*)
wrapper=$file
;;
esac
if (${SED} -e '4q' $wrapper | grep "^# Generated by .*$PACKAGE")>/dev/null 2>&1; then
notinst_deplibs=
relink_command=
# To insure that "foo" is sourced, and not "foo.exe",
# finese the cygwin/MSYS system by explicitly sourcing "foo."
# which disallows the automatic-append-.exe behavior.
case $build in
*cygwin* | *mingw*) wrapperdot=${wrapper}. ;;
*) wrapperdot=${wrapper} ;;
esac
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . ${wrapperdot} ;;
*) . ./${wrapperdot} ;;
esac
# Check the variables that should have been set.
if test -z "$notinst_deplibs"; then
$echo "$modename: invalid libtool wrapper script \`$wrapper'" 1>&2
exit $EXIT_FAILURE
fi
finalize=yes
for lib in $notinst_deplibs; do
# Check to see that each library is installed.
libdir=
if test -f "$lib"; then
# If there is no directory component, then add one.
case $lib in
*/* | *\\*) . $lib ;;
*) . ./$lib ;;
esac
fi
libfile="$libdir/"`$echo "X$lib" | $Xsed -e 's%^.*/%%g'` ### testsuite: skip nested quoting test
if test -n "$libdir" && test ! -f "$libfile"; then
$echo "$modename: warning: \`$lib' has not been installed in \`$libdir'" 1>&2
finalize=no
fi
done
relink_command=
# To insure that "foo" is sourced, and not "foo.exe",
# finese the cygwin/MSYS system by explicitly sourcing "foo."
# which disallows the automatic-append-.exe behavior.
case $build in
*cygwin* | *mingw*) wrapperdot=${wrapper}. ;;
*) wrapperdot=${wrapper} ;;
esac
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . ${wrapperdot} ;;
*) . ./${wrapperdot} ;;
esac
outputname=
if test "$fast_install" = no && test -n "$relink_command"; then
if test "$finalize" = yes && test -z "$run"; then
tmpdir="/tmp"
test -n "$TMPDIR" && tmpdir="$TMPDIR"
tmpdir="$tmpdir/libtool-$$"
save_umask=`umask`
umask 0077
if $mkdir "$tmpdir"; then
umask $save_umask
else
umask $save_umask
$echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2
continue
fi
file=`$echo "X$file$stripped_ext" | $Xsed -e 's%^.*/%%'`
outputname="$tmpdir/$file"
# Replace the output file specification.
relink_command=`$echo "X$relink_command" | $Xsed -e 's%@OUTPUT@%'"$outputname"'%g'`
$show "$relink_command"
if $run eval "$relink_command"; then :
else
$echo "$modename: error: relink \`$file' with the above command before installing it" 1>&2
${rm}r "$tmpdir"
continue
fi
file="$outputname"
else
$echo "$modename: warning: cannot relink \`$file'" 1>&2
fi
else
# Install the binary that we compiled earlier.
file=`$echo "X$file$stripped_ext" | $Xsed -e "s%\([^/]*\)$%$objdir/\1%"`
fi
fi
# remove .exe since cygwin /usr/bin/install will append another
# one anyways
case $install_prog,$host in
*/usr/bin/install*,*cygwin*)
case $file:$destfile in
*.exe:*.exe)
# this is ok
;;
*.exe:*)
destfile=$destfile.exe
;;
*:*.exe)
destfile=`$echo $destfile | ${SED} -e 's,.exe$,,'`
;;
esac
;;
esac
$show "$install_prog$stripme $file $destfile"
$run eval "$install_prog\$stripme \$file \$destfile" || exit $?
test -n "$outputname" && ${rm}r "$tmpdir"
;;
esac
done
for file in $staticlibs; do
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
# Set up the ranlib parameters.
oldlib="$destdir/$name"
$show "$install_prog $file $oldlib"
$run eval "$install_prog \$file \$oldlib" || exit $?
if test -n "$stripme" && test -n "$old_striplib"; then
$show "$old_striplib $oldlib"
$run eval "$old_striplib $oldlib" || exit $?
fi
# Do each command in the postinstall commands.
cmds=$old_postinstall_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || exit $?
done
IFS="$save_ifs"
done
if test -n "$future_libdirs"; then
$echo "$modename: warning: remember to run \`$progname --finish$future_libdirs'" 1>&2
fi
if test -n "$current_libdirs"; then
# Maybe just do a dry run.
test -n "$run" && current_libdirs=" -n$current_libdirs"
exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs'
else
exit $EXIT_SUCCESS
fi
;;
# libtool finish mode
finish)
modename="$modename: finish"
libdirs="$nonopt"
admincmds=
if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
for dir
do
libdirs="$libdirs $dir"
done
for libdir in $libdirs; do
if test -n "$finish_cmds"; then
# Do each command in the finish commands.
cmds=$finish_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd" || admincmds="$admincmds
$cmd"
done
IFS="$save_ifs"
fi
if test -n "$finish_eval"; then
# Do the single finish_eval.
eval cmds=\"$finish_eval\"
$run eval "$cmds" || admincmds="$admincmds
$cmds"
fi
done
fi
# Exit here if they wanted silent mode.
test "$show" = : && exit $EXIT_SUCCESS
$echo "----------------------------------------------------------------------"
$echo "Libraries have been installed in:"
for libdir in $libdirs; do
$echo " $libdir"
done
$echo
$echo "If you ever happen to want to link against installed libraries"
$echo "in a given directory, LIBDIR, you must either use libtool, and"
$echo "specify the full pathname of the library, or use the \`-LLIBDIR'"
$echo "flag during linking and do at least one of the following:"
if test -n "$shlibpath_var"; then
$echo " - add LIBDIR to the \`$shlibpath_var' environment variable"
$echo " during execution"
fi
if test -n "$runpath_var"; then
$echo " - add LIBDIR to the \`$runpath_var' environment variable"
$echo " during linking"
fi
if test -n "$hardcode_libdir_flag_spec"; then
libdir=LIBDIR
eval flag=\"$hardcode_libdir_flag_spec\"
$echo " - use the \`$flag' linker flag"
fi
if test -n "$admincmds"; then
$echo " - have your system administrator run these commands:$admincmds"
fi
if test -f /etc/ld.so.conf; then
$echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
fi
$echo
$echo "See any operating system documentation about shared libraries for"
$echo "more information, such as the ld(1) and ld.so(8) manual pages."
$echo "----------------------------------------------------------------------"
exit $EXIT_SUCCESS
;;
# libtool execute mode
execute)
modename="$modename: execute"
# The first argument is the command name.
cmd="$nonopt"
if test -z "$cmd"; then
$echo "$modename: you must specify a COMMAND" 1>&2
$echo "$help"
exit $EXIT_FAILURE
fi
# Handle -dlopen flags immediately.
for file in $execute_dlfiles; do
if test ! -f "$file"; then
$echo "$modename: \`$file' is not a file" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
dir=
case $file in
*.la)
# Check to see that this really is a libtool archive.
if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then :
else
$echo "$modename: \`$lib' is not a valid libtool archive" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
# Read the libtool library.
dlname=
library_names=
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Skip this library if it cannot be dlopened.
if test -z "$dlname"; then
# Warn if it was a shared library.
test -n "$library_names" && $echo "$modename: warning: \`$file' was not linked with \`-export-dynamic'"
continue
fi
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$file" && dir=.
if test -f "$dir/$objdir/$dlname"; then
dir="$dir/$objdir"
else
$echo "$modename: cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" 1>&2
exit $EXIT_FAILURE
fi
;;
*.lo)
# Just add the directory containing the .lo file.
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
test "X$dir" = "X$file" && dir=.
;;
*)
$echo "$modename: warning \`-dlopen' is ignored for non-libtool libraries and objects" 1>&2
continue
;;
esac
# Get the absolute pathname.
absdir=`cd "$dir" && pwd`
test -n "$absdir" && dir="$absdir"
# Now add the directory to shlibpath_var.
if eval "test -z \"\$$shlibpath_var\""; then
eval "$shlibpath_var=\"\$dir\""
else
eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
fi
done
# This variable tells wrapper scripts just to set shlibpath_var
# rather than running their programs.
libtool_execute_magic="$magic"
# Check if any of the arguments is a wrapper script.
args=
for file
do
case $file in
-*) ;;
*)
# Do a test to see if this is really a libtool program.
if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
# If there is no directory component, then add one.
case $file in
*/* | *\\*) . $file ;;
*) . ./$file ;;
esac
# Transform arg to wrapped name.
file="$progdir/$program"
fi
;;
esac
# Quote arguments (to preserve shell metacharacters).
file=`$echo "X$file" | $Xsed -e "$sed_quote_subst"`
args="$args \"$file\""
done
if test -z "$run"; then
if test -n "$shlibpath_var"; then
# Export the shlibpath_var.
eval "export $shlibpath_var"
fi
# Restore saved environment variables
if test "${save_LC_ALL+set}" = set; then
LC_ALL="$save_LC_ALL"; export LC_ALL
fi
if test "${save_LANG+set}" = set; then
LANG="$save_LANG"; export LANG
fi
# Now prepare to actually exec the command.
exec_cmd="\$cmd$args"
else
# Display what would be done.
if test -n "$shlibpath_var"; then
eval "\$echo \"\$shlibpath_var=\$$shlibpath_var\""
$echo "export $shlibpath_var"
fi
$echo "$cmd$args"
exit $EXIT_SUCCESS
fi
;;
# libtool clean and uninstall mode
clean | uninstall)
modename="$modename: $mode"
rm="$nonopt"
files=
rmforce=
exit_status=0
# This variable tells wrapper scripts just to set variables rather
# than running their programs.
libtool_install_magic="$magic"
for arg
do
case $arg in
-f) rm="$rm $arg"; rmforce=yes ;;
-*) rm="$rm $arg" ;;
*) files="$files $arg" ;;
esac
done
if test -z "$rm"; then
$echo "$modename: you must specify an RM program" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
fi
rmdirs=
origobjdir="$objdir"
for file in $files; do
dir=`$echo "X$file" | $Xsed -e 's%/[^/]*$%%'`
if test "X$dir" = "X$file"; then
dir=.
objdir="$origobjdir"
else
objdir="$dir/$origobjdir"
fi
name=`$echo "X$file" | $Xsed -e 's%^.*/%%'`
test "$mode" = uninstall && objdir="$dir"
# Remember objdir for removal later, being careful to avoid duplicates
if test "$mode" = clean; then
case " $rmdirs " in
*" $objdir "*) ;;
*) rmdirs="$rmdirs $objdir" ;;
esac
fi
# Don't error if the file doesn't exist and rm -f was used.
if (test -L "$file") >/dev/null 2>&1 \
|| (test -h "$file") >/dev/null 2>&1 \
|| test -f "$file"; then
:
elif test -d "$file"; then
exit_status=1
continue
elif test "$rmforce" = yes; then
continue
fi
rmfiles="$file"
case $name in
*.la)
# Possibly a libtool archive, so verify it.
if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
. $dir/$name
# Delete the libtool libraries and symlinks.
for n in $library_names; do
rmfiles="$rmfiles $objdir/$n"
done
test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library"
test "$mode" = clean && rmfiles="$rmfiles $objdir/$name $objdir/${name}i"
if test "$mode" = uninstall; then
if test -n "$library_names"; then
# Do each command in the postuninstall commands.
cmds=$postuninstall_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd"
if test "$?" -ne 0 && test "$rmforce" != yes; then
exit_status=1
fi
done
IFS="$save_ifs"
fi
if test -n "$old_library"; then
# Do each command in the old_postuninstall commands.
cmds=$old_postuninstall_cmds
save_ifs="$IFS"; IFS='~'
for cmd in $cmds; do
IFS="$save_ifs"
eval cmd=\"$cmd\"
$show "$cmd"
$run eval "$cmd"
if test "$?" -ne 0 && test "$rmforce" != yes; then
exit_status=1
fi
done
IFS="$save_ifs"
fi
# FIXME: should reinstall the best remaining shared library.
fi
fi
;;
*.lo)
# Possibly a libtool object, so verify it.
if (${SED} -e '2q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
# Read the .lo file
. $dir/$name
# Add PIC object to the list of files to remove.
if test -n "$pic_object" \
&& test "$pic_object" != none; then
rmfiles="$rmfiles $dir/$pic_object"
fi
# Add non-PIC object to the list of files to remove.
if test -n "$non_pic_object" \
&& test "$non_pic_object" != none; then
rmfiles="$rmfiles $dir/$non_pic_object"
fi
fi
;;
*)
if test "$mode" = clean ; then
noexename=$name
case $file in
*.exe)
file=`$echo $file|${SED} 's,.exe$,,'`
noexename=`$echo $name|${SED} 's,.exe$,,'`
# $file with .exe has already been added to rmfiles,
# add $file without .exe
rmfiles="$rmfiles $file"
;;
esac
# Do a test to see if this is a libtool program.
if (${SED} -e '4q' $file | grep "^# Generated by .*$PACKAGE") >/dev/null 2>&1; then
relink_command=
. $dir/$noexename
# note $name still contains .exe if it was in $file originally
# as does the version of $file that was added into $rmfiles
rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}"
if test "$fast_install" = yes && test -n "$relink_command"; then
rmfiles="$rmfiles $objdir/lt-$name"
fi
if test "X$noexename" != "X$name" ; then
rmfiles="$rmfiles $objdir/lt-${noexename}.c"
fi
fi
fi
;;
esac
$show "$rm $rmfiles"
$run $rm $rmfiles || exit_status=1
done
objdir="$origobjdir"
# Try to remove the ${objdir}s in the directories where we deleted files
for dir in $rmdirs; do
if test -d "$dir"; then
$show "rmdir $dir"
$run rmdir $dir >/dev/null 2>&1
fi
done
exit $exit_status
;;
"")
$echo "$modename: you must specify a MODE" 1>&2
$echo "$generic_help" 1>&2
exit $EXIT_FAILURE
;;
esac
if test -z "$exec_cmd"; then
$echo "$modename: invalid operation mode \`$mode'" 1>&2
$echo "$generic_help" 1>&2
exit $EXIT_FAILURE
fi
fi # test -z "$show_help"
if test -n "$exec_cmd"; then
eval exec $exec_cmd
exit $EXIT_FAILURE
fi
# We need to display help for each of the modes.
case $mode in
"") $echo \
"Usage: $modename [OPTION]... [MODE-ARG]...
Provide generalized library-building support services.
--config show all configuration variables
--debug enable verbose shell tracing
-n, --dry-run display commands without modifying any files
--features display basic configuration information and exit
--finish same as \`--mode=finish'
--help display this help message and exit
--mode=MODE use operation mode MODE [default=inferred from MODE-ARGS]
--quiet same as \`--silent'
--silent don't print informational messages
--tag=TAG use configuration variables from tag TAG
--version print version information
MODE must be one of the following:
clean remove files from the build directory
compile compile a source file into a libtool object
execute automatically set library path, then run a program
finish complete the installation of libtool libraries
install install libraries or executables
link create a library or an executable
uninstall remove libraries from an installed directory
MODE-ARGS vary depending on the MODE. Try \`$modename --help --mode=MODE' for
a more detailed description of MODE.
Report bugs to <[email protected]>."
exit $EXIT_SUCCESS
;;
clean)
$echo \
"Usage: $modename [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
Remove files from the build directory.
RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
to RM.
If FILE is a libtool library, object or program, all the files associated
with it are deleted. Otherwise, only FILE itself is deleted using RM."
;;
compile)
$echo \
"Usage: $modename [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
Compile a source file into a libtool library object.
This mode accepts the following additional options:
-o OUTPUT-FILE set the output file name to OUTPUT-FILE
-prefer-pic try to building PIC objects only
-prefer-non-pic try to building non-PIC objects only
-static always build a \`.o' file suitable for static linking
COMPILE-COMMAND is a command to be used in creating a \`standard' object file
from the given SOURCEFILE.
The output file name is determined by removing the directory component from
SOURCEFILE, then substituting the C source code suffix \`.c' with the
library object suffix, \`.lo'."
;;
execute)
$echo \
"Usage: $modename [OPTION]... --mode=execute COMMAND [ARGS]...
Automatically set library path, then run a program.
This mode accepts the following additional options:
-dlopen FILE add the directory containing FILE to the library path
This mode sets the library path environment variable according to \`-dlopen'
flags.
If any of the ARGS are libtool executable wrappers, then they are translated
into their corresponding uninstalled binary, and any of their required library
directories are added to the library path.
Then, COMMAND is executed, with ARGS as arguments."
;;
finish)
$echo \
"Usage: $modename [OPTION]... --mode=finish [LIBDIR]...
Complete the installation of libtool libraries.
Each LIBDIR is a directory that contains libtool libraries.
The commands that this mode executes may require superuser privileges. Use
the \`--dry-run' option if you just want to see what would be executed."
;;
install)
$echo \
"Usage: $modename [OPTION]... --mode=install INSTALL-COMMAND...
Install executables or libraries.
INSTALL-COMMAND is the installation command. The first component should be
either the \`install' or \`cp' program.
The rest of the components are interpreted as arguments to that command (only
BSD-compatible install options are recognized)."
;;
link)
$echo \
"Usage: $modename [OPTION]... --mode=link LINK-COMMAND...
Link object files or libraries together to form another library, or to
create an executable program.
LINK-COMMAND is a command using the C compiler that you would use to create
a program from several object files.
The following components of LINK-COMMAND are treated specially:
-all-static do not do any dynamic linking at all
-avoid-version do not add a version suffix if possible
-dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime
-dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols
-export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
-export-symbols SYMFILE
try to export only the symbols listed in SYMFILE
-export-symbols-regex REGEX
try to export only the symbols matching REGEX
-LLIBDIR search LIBDIR for required installed libraries
-lNAME OUTPUT-FILE requires the installed library libNAME
-module build a library that can dlopened
-no-fast-install disable the fast-install mode
-no-install link a not-installable executable
-no-undefined declare that a library does not refer to external symbols
-o OUTPUT-FILE create OUTPUT-FILE from the specified objects
-objectlist FILE Use a list of object files found in FILE to specify objects
-precious-files-regex REGEX
don't remove output files matching REGEX
-release RELEASE specify package release information
-rpath LIBDIR the created library will eventually be installed in LIBDIR
-R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries
-static do not do any dynamic linking of libtool libraries
-version-info CURRENT[:REVISION[:AGE]]
specify library version info [each variable defaults to 0]
All other options (arguments beginning with \`-') are ignored.
Every other argument is treated as a filename. Files ending in \`.la' are
treated as uninstalled libtool libraries, other files are standard or library
object files.
If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
only library objects (\`.lo' files) may be specified, and \`-rpath' is
required, except when creating a convenience library.
If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
using \`ar' and \`ranlib', or on Windows using \`lib'.
If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
is created, otherwise an executable program is created."
;;
uninstall)
$echo \
"Usage: $modename [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
Remove libraries from an installation directory.
RM is the name of the program to use to delete files associated with each FILE
(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed
to RM.
If FILE is a libtool library, all the files associated with it are deleted.
Otherwise, only FILE itself is deleted using RM."
;;
*)
$echo "$modename: invalid operation mode \`$mode'" 1>&2
$echo "$help" 1>&2
exit $EXIT_FAILURE
;;
esac
$echo
$echo "Try \`$modename --help' for more information about other modes."
exit $EXIT_SUCCESS
# The TAGs below are defined such that we never get into a situation
# in which we disable both kinds of libraries. Given conflicting
# choices, we go for a static library, that is the most portable,
# since we can't tell whether shared libraries were disabled because
# the user asked for that or because the platform doesn't support
# them. This is particularly important on AIX, because we don't
# support having both static and shared libraries enabled at the same
# time on that platform, so we default to a shared-only configuration.
# If a disable-shared tag is given, we'll fallback to a static-only
# configuration. But we'll never go from static-only to shared-only.
# ### BEGIN LIBTOOL TAG CONFIG: disable-shared
build_libtool_libs=no
build_old_libs=yes
# ### END LIBTOOL TAG CONFIG: disable-shared
# ### BEGIN LIBTOOL TAG CONFIG: disable-static
build_old_libs=`case $build_libtool_libs in yes) $echo no;; *) $echo yes;; esac`
# ### END LIBTOOL TAG CONFIG: disable-static
# Local Variables:
# mode:shell-script
# sh-indentation:2
# End:
|
bougyman/sfs
|
uvfs/ltmain.sh
|
Shell
|
gpl-2.0
| 184,019 |
go install
echo "// The goi18n command formats and merges translation files." > doc.go
echo "//" >> doc.go
echo "// go get -u github.com/nicksnyder/go-i18n/goi18n" >> doc.go
echo "// goi18n -help" >> doc.go
echo "//" >> doc.go
echo "// Help documentation:" >> doc.go
echo "//" >> doc.go
goi18n | sed -e 's/^/\/\/ /' >> doc.go
goi18n merge -help | sed -e 's/^/\/\/ /' >> doc.go
goi18n constants -help | sed -e 's/^/\/\/ /' >> doc.go
echo "package main" >> doc.go
|
jabley/cf-metrics
|
vendor/src/github.com/nicksnyder/go-i18n/goi18n/gendoc.sh
|
Shell
|
mit
| 482 |
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../..
source "${KUBE_ROOT}/test/kubemark/common/util.sh"
# Wrapper for gcloud compute, running it $RETRIES times in case of failures.
# Args:
# $@: all stuff that goes after 'gcloud compute'
function run-gcloud-compute-with-retries {
run-cmd-with-retries gcloud compute "$@"
}
function authenticate-docker {
echo "Configuring registry authentication"
mkdir -p "${HOME}/.docker"
gcloud beta auth configure-docker -q
}
function create-master-instance-with-resources {
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE}"
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \
${GCLOUD_COMMON_ARGS} \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
if [ "${EVENT_PD:-}" == "true" ]; then
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-event-pd" \
${GCLOUD_COMMON_ARGS} \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
fi
run-gcloud-compute-with-retries addresses create "${MASTER_NAME}-ip" \
--project "${PROJECT}" \
--region "${REGION}" -q
MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
# Override the master image project to cos-cloud for COS images staring with `cos` string prefix.
DEFAULT_GCI_PROJECT=google-containers
if [[ "${GCI_VERSION}" == "cos"* ]]; then
DEFAULT_GCI_PROJECT=cos-cloud
fi
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-${DEFAULT_GCI_PROJECT}}
run-gcloud-compute-with-retries instances create "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--address "${MASTER_IP}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
--image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \
--subnet "${SUBNETWORK:-${NETWORK}}" \
--scopes "storage-ro,logging-write" \
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
run-gcloud-compute-with-retries instances add-metadata "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--metadata-from-file startup-script="${KUBE_ROOT}/test/kubemark/resources/start-kubemark-master.sh"
if [ "${EVENT_PD:-}" == "true" ]; then
echo "Attaching ${MASTER_NAME}-event-pd to ${MASTER_NAME}"
run-gcloud-compute-with-retries instances attach-disk "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
--disk "${MASTER_NAME}-event-pd" \
--device-name="master-event-pd"
fi
run-gcloud-compute-with-retries firewall-rules create "${MASTER_NAME}-https" \
--project "${PROJECT}" \
--network "${NETWORK}" \
--source-ranges "0.0.0.0/0" \
--target-tags "${MASTER_TAG}" \
--allow "tcp:443"
}
# Command to be executed is '$1'.
# No. of retries is '$2' (if provided) or 1 (default).
function execute-cmd-on-master-with-retries() {
RETRIES="${2:-1}" run-gcloud-compute-with-retries ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" --command="$1"
}
function copy-files() {
run-gcloud-compute-with-retries scp --recurse --zone="${ZONE}" --project="${PROJECT}" $@
}
function delete-master-instance-and-resources {
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE} --quiet"
gcloud compute instances delete "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} || true
gcloud compute disks delete "${MASTER_NAME}-pd" \
${GCLOUD_COMMON_ARGS} || true
gcloud compute disks delete "${MASTER_NAME}-event-pd" \
${GCLOUD_COMMON_ARGS} &> /dev/null || true
gcloud compute addresses delete "${MASTER_NAME}-ip" \
--project "${PROJECT}" \
--region "${REGION}" \
--quiet || true
gcloud compute firewall-rules delete "${MASTER_NAME}-https" \
--project "${PROJECT}" \
--quiet || true
if [ "${SEPARATE_EVENT_MACHINE:-false}" == "true" ]; then
gcloud compute instances delete "${EVENT_STORE_NAME}" \
${GCLOUD_COMMON_ARGS} || true
gcloud compute disks delete "${EVENT_STORE_NAME}-pd" \
${GCLOUD_COMMON_ARGS} || true
fi
}
|
tizhou86/kubernetes
|
test/kubemark/gce/util.sh
|
Shell
|
apache-2.0
| 4,714 |
##############################################################################
# A descriptive listing of core Gradle commands
############################################################################
gradle-or-gradlew() {
if [ -f ./gradlew ] ; then
echo "executing gradlew instead of gradle";
./gradlew "$@";
else
gradle "$@";
fi
}
alias gradle=gradle-or-gradlew;
function _gradle_core_commands() {
local ret=1 state
_arguments ':subcommand:->subcommand' && ret=0
case $state in
subcommand)
subcommands=(
"properties:Display all project properties"
"tasks:Calculate and display all tasks"
"dependencies:Calculate and display all dependencies"
"projects:Discover and display all sub-projects"
"build:Build the project"
"help:Display help"
)
_describe -t subcommands 'gradle subcommands' subcommands && ret=0
esac
return ret
}
function _gradle_arguments() {
_arguments -C \
'-a[Do not rebuild project dependencies]' \
'-b[Specifies the build file]' \
'-c[Specifies the settings file]' \
'-d[Log at the debug level]' \
'-g[Specifies the Gradle user home directory]' \
'-h[Shows a help message]' \
'-i[Set log level to INFO]' \
'-m[Runs the build with all task actions disabled]' \
'-p[Specifies the start directory for Gradle]' \
'-q[Log errors only]' \
'-s[Print out the stacktrace also for user exceptions]' \
'-t[Continuous mode. Automatically re-run build after changes]' \
'-u[Don''t search in parent directories for a settings.gradle file]' \
'-v[Prints Gradle version info]' \
'-x[Specify a task to be excluded]' \
'-D[Set a system property]' \
'-I[Specifies an initialization script]' \
'-P[Sets a project property of the root project]' \
'-S[Print out the full (very verbose) stacktrace]' \
'--build-file[Specifies the build file]' \
'--configure-on-demand[Only relevant projects are configured]' \
'--console[Type of console output to generate (plain, auto, or rich)]' \
'--continue[Continues task execution after a task failure]' \
'--continuous[Continuous mode. Automatically re-run build after changes]' \
'--daemon[Use the Gradle Daemon]' \
'--debug[Log at the debug level]' \
'--dry-run[Runs the build with all task actions disabled]' \
'--exclude-task[Specify a task to be excluded]' \
'--full-stacktrace[Print out the full (very verbose) stacktrace]' \
'--gradle-user-home[Specifies the Gradle user home directory]' \
'--gui[Launches the Gradle GUI app (Deprecated)]' \
'--help[Shows a help message]' \
'--include-build[Run the build as a composite, including the specified build]' \
'--info[Set log level to INFO]' \
'--init-script[Specifies an initialization script]' \
'--max-workers[Set the maximum number of workers that Gradle may use]' \
'--no-daemon[Do not use the Gradle Daemon]' \
'--no-rebuild[Do not rebuild project dependencies]' \
'--no-search-upwards[Don''t search in parent directories for a settings.gradle file]' \
'--offline[Build without accessing network resources]' \
'--parallel[Build projects in parallel]' \
'--profile[Profile build time and create report]' \
'--project-cache-dir[Specifies the project-specific cache directory]' \
'--project-dir[Specifies the start directory for Gradle]' \
'--project-prop[Sets a project property of the root project]' \
'--quiet[Log errors only]' \
'--recompile-scripts[Forces scripts to be recompiled, bypassing caching]' \
'--refresh-dependencies[Refresh the state of dependencies]' \
'--rerun-task[Specifies that any task optimization is ignored]' \
'--settings-file[Specifies the settings file]' \
'--stacktrace[Print out the stacktrace also for user exceptions]' \
'--status[Print Gradle Daemon status]' \
'--stop[Stop all Gradle Daemons]' \
'--system-prop[Set a system property]' \
'--version[Prints Gradle version info]' \
'*::command:->command' \
&& return 0
}
##############################################################################
# Examine the build.gradle file to see if its timestamp has changed;
# and if so, regenerate the .gradle_tasks cache file
############################################################################
_gradle_does_task_list_need_generating () {
[[ ! -f .gradletasknamecache ]] || [[ build.gradle -nt .gradletasknamecache || build.gradle.kts -nt .gradletasknamecache ]]
}
##############
# Parse the tasks from `gradle(w) tasks --all` and return them to the calling function.
# All lines in the output from gradle(w) that are between /^-+$/ and /^\s*$/
# are considered to be tasks. If and when gradle adds support for listing tasks
# for programmatic parsing, this method can be deprecated.
##############
_gradle_parse_tasks () {
lines_might_be_tasks=false
task_name_buffer=""
while read -r line; do
if [[ $line =~ ^-+$ ]]; then
lines_might_be_tasks=true
# Empty buffer, because it contains items that are not tasks
task_name_buffer=""
elif [[ $line =~ ^\s*$ ]]; then
if [[ "$lines_might_be_tasks" = true ]]; then
# If a newline is found, echo the buffer to the calling function
while read -r task; do
echo $task | awk '/[a-zA-Z0-9:-]+/ {print $1}'
done <<< "$task_name_buffer"
# Empty buffer, because we are done with the tasks
task_name_buffer=""
fi
lines_might_be_tasks=false
elif [[ "$lines_might_be_tasks" = true ]]; then
task_name_buffer="${task_name_buffer}\n${line}"
fi
done <<< "$1"
}
##############
# Gradle tasks from subprojects are allowed to be executed without specifying
# the subproject; that task will then be called on all subprojects.
# gradle(w) tasks --all only lists tasks per subproject, but when autocompleting
# we often want to be able to run a specific task on all subprojects, e.g.
# "gradle clean".
# This function uses the list of tasks from "gradle tasks --all", and for each
# line grabs everything after the last ":" and combines that output with the original
# output. The combined list is returned as the result of this function.
##############
_gradle_parse_and_extract_tasks () {
# All tasks
tasks=$(_gradle_parse_tasks "$1")
# Task name without sub project(s) prefix
simple_tasks=$(echo $tasks | awk 'BEGIN { FS = ":" } { print $NF }')
echo "$tasks\n$simple_tasks"
}
##############################################################################
# Discover the gradle tasks by running "gradle tasks --all"
############################################################################
_gradle_tasks () {
if [[ -f build.gradle || -f build.gradle.kts || -f settings.gradle || -f settings.gradle.kts ]]; then
_gradle_arguments
if _gradle_does_task_list_need_generating; then
_gradle_parse_and_extract_tasks "$(gradle tasks --all)" > .gradletasknamecache
fi
compadd -X "==== Gradle Tasks ====" $(cat .gradletasknamecache)
fi
}
_gradlew_tasks () {
if [[ -f build.gradle || -f build.gradle.kts || -f settings.gradle || -f settings.gradle.kts ]]; then
_gradle_arguments
if _gradle_does_task_list_need_generating; then
_gradle_parse_and_extract_tasks "$(./gradlew tasks --all)" > .gradletasknamecache
fi
compadd -X "==== Gradlew Tasks ====" $(cat .gradletasknamecache)
fi
}
##############################################################################
# Register the completions against the gradle and gradlew commands
############################################################################
compdef _gradle_tasks gradle
compdef _gradlew_tasks gradlew
compdef _gradlew_tasks gw
|
twleung/oh-my-zsh
|
plugins/gradle/gradle.plugin.zsh
|
Shell
|
mit
| 7,783 |
#!/bin/sh
. "${TEST_SCRIPTS_DIR}/unit.sh"
define_test "3 nodes, delete middle, add 2 nodes, less debug"
CTDB_DEBUGLEVEL=0
setup_nodes <<EOF
192.168.20.41
#192.168.20.42
192.168.20.43
192.168.20.44
192.168.20.45
EOF
ok_null
simple_test <<EOF
NODEMAP
0 192.168.20.41 0x0 CURRENT RECMASTER
1 192.168.20.42 0x1
2 192.168.20.43 0x0
VNNMAP
654321
0
2
EOF
|
SpectraLogic/samba
|
ctdb/tests/tool/stubby.reloadnodes.022.sh
|
Shell
|
gpl-3.0
| 384 |
#!/bin/sh
FLOPS='
*.c
'
. ../../tools/flint_skel.sh
|
gquintard/Varnish-Cache
|
lib/libvcc/flint.sh
|
Shell
|
bsd-2-clause
| 55 |
#!/bin/tcsh
sudo kill `ps -ax | grep collectroute | perl -ne '/^\s*(\d+)\s+/; print "$1 ";'`
sudo rm -f /tmp/clicksocket
|
olanb7/final-proj
|
conf/ron/stop-traceroute.sh
|
Shell
|
gpl-2.0
| 121 |
#!/bin/sh
test_description='test basic hash implementation'
. ./test-lib.sh
test_expect_success 'test basic SHA-1 hash values' '
test-tool sha1 </dev/null >actual &&
grep da39a3ee5e6b4b0d3255bfef95601890afd80709 actual &&
printf "a" | test-tool sha1 >actual &&
grep 86f7e437faa5a7fce15d1ddcb9eaeaea377667b8 actual &&
printf "abc" | test-tool sha1 >actual &&
grep a9993e364706816aba3e25717850c26c9cd0d89d actual &&
printf "message digest" | test-tool sha1 >actual &&
grep c12252ceda8be8994d5fa0290a47231c1d16aae3 actual &&
printf "abcdefghijklmnopqrstuvwxyz" | test-tool sha1 >actual &&
grep 32d10c7b8cf96570ca04ce37f2a19d84240d3a89 actual &&
perl -e "$| = 1; print q{aaaaaaaaaa} for 1..100000;" | \
test-tool sha1 >actual &&
grep 34aa973cd4c4daa4f61eeb2bdbad27316534016f actual &&
printf "blob 0\0" | test-tool sha1 >actual &&
grep e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 actual &&
printf "blob 3\0abc" | test-tool sha1 >actual &&
grep f2ba8f84ab5c1bce84a7b441cb1959cfc7093b7f actual &&
printf "tree 0\0" | test-tool sha1 >actual &&
grep 4b825dc642cb6eb9a060e54bf8d69288fbee4904 actual
'
test_expect_success 'test basic SHA-256 hash values' '
test-tool sha256 </dev/null >actual &&
grep e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 actual &&
printf "a" | test-tool sha256 >actual &&
grep ca978112ca1bbdcafac231b39a23dc4da786eff8147c4e72b9807785afee48bb actual &&
printf "abc" | test-tool sha256 >actual &&
grep ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad actual &&
printf "message digest" | test-tool sha256 >actual &&
grep f7846f55cf23e14eebeab5b4e1550cad5b509e3348fbc4efa3a1413d393cb650 actual &&
printf "abcdefghijklmnopqrstuvwxyz" | test-tool sha256 >actual &&
grep 71c480df93d6ae2f1efad1447c66c9525e316218cf51fc8d9ed832f2daf18b73 actual &&
# Try to exercise the chunking code by turning autoflush on.
perl -e "$| = 1; print q{aaaaaaaaaa} for 1..100000;" | \
test-tool sha256 >actual &&
grep cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 actual &&
perl -e "$| = 1; print q{abcdefghijklmnopqrstuvwxyz} for 1..100000;" | \
test-tool sha256 >actual &&
grep e406ba321ca712ad35a698bf0af8d61fc4dc40eca6bdcea4697962724ccbde35 actual &&
printf "blob 0\0" | test-tool sha256 >actual &&
grep 473a0f4c3be8a93681a267e3b1e9a7dcda1185436fe141f7749120a303721813 actual &&
printf "blob 3\0abc" | test-tool sha256 >actual &&
grep c1cf6e465077930e88dc5136641d402f72a229ddd996f627d60e9639eaba35a6 actual &&
printf "tree 0\0" | test-tool sha256 >actual &&
grep 6ef19b41225c5369f1c104d45d8d85efa9b057b53b14b4b9b939dd74decc5321 actual
'
test_done
|
devzero2000/git-core
|
t/t0015-hash.sh
|
Shell
|
gpl-2.0
| 2,640 |
#!/bin/bash
sed -i.bak 's|perl -w|/usr/bin/env perl|' t/date.t
head t/date.t
# If it has Build.PL use that, otherwise use Makefile.PL
if [ -f Build.PL ]; then
perl Build.PL
perl ./Build
perl ./Build test
# Make sure this goes in site
perl ./Build install --installdirs site
elif [ -f Makefile.PL ]; then
# Make sure this goes in site
perl Makefile.PL INSTALLDIRS=site
make
make test
make install
else
echo 'Unable to find Build.PL or Makefile.PL. You need to modify build.sh.'
exit 1
fi
|
joachimwolff/bioconda-recipes
|
recipes/perl-http-date/build.sh
|
Shell
|
mit
| 539 |
#
# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
#
# @bug 4088176
# @summary Test reading an evolved class serialization into the original class
rm *.class tmp.ser
javac WriteAddedField.java
java ${TESTVMOPTS} WriteAddedField
rm *.class
javac ReadAddedField.java
java ${TESTVMOPTS} ReadAddedField
|
stain/jdk8u
|
test/java/io/Serializable/evolution/AddedExternField/run.sh
|
Shell
|
gpl-2.0
| 1,285 |
#!/usr/bin/env bash
# Copyright 2009 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Generate Go code listing errors and other #defined constant
# values (ENAMETOOLONG etc.), by asking the preprocessor
# about the definitions.
unset LANG
export LC_ALL=C
export LC_CTYPE=C
if test -z "$GOARCH" -o -z "$GOOS"; then
echo 1>&2 "GOARCH or GOOS not defined in environment"
exit 1
fi
# Check that we are using the new build system if we should
if [[ "$GOOS" = "linux" ]] && [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
echo 1>&2 "In the Docker based build system, mkerrors should not be called directly."
echo 1>&2 "See README.md"
exit 1
fi
if [[ "$GOOS" = "aix" ]]; then
CC=${CC:-gcc}
else
CC=${CC:-cc}
fi
if [[ "$GOOS" = "solaris" ]]; then
# Assumes GNU versions of utilities in PATH.
export PATH=/usr/gnu/bin:$PATH
fi
uname=$(uname)
includes_AIX='
#include <net/if.h>
#include <net/netopt.h>
#include <netinet/ip_mroute.h>
#include <sys/protosw.h>
#include <sys/stropts.h>
#include <sys/mman.h>
#include <sys/poll.h>
#include <sys/termio.h>
#include <termios.h>
#include <fcntl.h>
#define AF_LOCAL AF_UNIX
'
includes_Darwin='
#define _DARWIN_C_SOURCE
#define KERNEL
#define _DARWIN_USE_64_BIT_INODE
#include <stdint.h>
#include <sys/attr.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/ptrace.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <sys/xattr.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <termios.h>
'
includes_DragonFly='
#include <sys/types.h>
#include <sys/event.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/wait.h>
#include <sys/ioctl.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/route.h>
#include <netinet/in.h>
#include <termios.h>
#include <netinet/ip.h>
#include <net/ip_mroute/ip_mroute.h>
'
includes_FreeBSD='
#include <sys/capsicum.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/wait.h>
#include <sys/ioctl.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/route.h>
#include <netinet/in.h>
#include <termios.h>
#include <netinet/ip.h>
#include <netinet/ip_mroute.h>
#include <sys/extattr.h>
#if __FreeBSD__ >= 10
#define IFT_CARP 0xf8 // IFT_CARP is deprecated in FreeBSD 10
#undef SIOCAIFADDR
#define SIOCAIFADDR _IOW(105, 26, struct oifaliasreq) // ifaliasreq contains if_data
#undef SIOCSIFPHYADDR
#define SIOCSIFPHYADDR _IOW(105, 70, struct oifaliasreq) // ifaliasreq contains if_data
#endif
'
includes_Linux='
#define _LARGEFILE_SOURCE
#define _LARGEFILE64_SOURCE
#ifndef __LP64__
#define _FILE_OFFSET_BITS 64
#endif
#define _GNU_SOURCE
// <sys/ioctl.h> is broken on powerpc64, as it fails to include definitions of
// these structures. We just include them copied from <bits/termios.h>.
#if defined(__powerpc__)
struct sgttyb {
char sg_ispeed;
char sg_ospeed;
char sg_erase;
char sg_kill;
short sg_flags;
};
struct tchars {
char t_intrc;
char t_quitc;
char t_startc;
char t_stopc;
char t_eofc;
char t_brkc;
};
struct ltchars {
char t_suspc;
char t_dsuspc;
char t_rprntc;
char t_flushc;
char t_werasc;
char t_lnextc;
};
#endif
#include <bits/sockaddr.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <sys/inotify.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/prctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/signalfd.h>
#include <sys/socket.h>
#include <sys/xattr.h>
#include <linux/errqueue.h>
#include <linux/if.h>
#include <linux/if_alg.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_ppp.h>
#include <linux/if_tun.h>
#include <linux/if_packet.h>
#include <linux/if_addr.h>
#include <linux/falloc.h>
#include <linux/filter.h>
#include <linux/fs.h>
#include <linux/kexec.h>
#include <linux/keyctl.h>
#include <linux/magic.h>
#include <linux/memfd.h>
#include <linux/module.h>
#include <linux/netfilter/nfnetlink.h>
#include <linux/netlink.h>
#include <linux/net_namespace.h>
#include <linux/perf_event.h>
#include <linux/random.h>
#include <linux/reboot.h>
#include <linux/rtnetlink.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/seccomp.h>
#include <linux/sockios.h>
#include <linux/wait.h>
#include <linux/icmpv6.h>
#include <linux/serial.h>
#include <linux/can.h>
#include <linux/vm_sockets.h>
#include <linux/taskstats.h>
#include <linux/genetlink.h>
#include <linux/watchdog.h>
#include <linux/hdreg.h>
#include <linux/rtc.h>
#include <linux/if_xdp.h>
#include <mtd/ubi-user.h>
#include <net/route.h>
#if defined(__sparc__)
// On sparc{,64}, the kernel defines struct termios2 itself which clashes with the
// definition in glibc. As only the error constants are needed here, include the
// generic termibits.h (which is included by termbits.h on sparc).
#include <asm-generic/termbits.h>
#else
#include <asm/termbits.h>
#endif
#ifndef MSG_FASTOPEN
#define MSG_FASTOPEN 0x20000000
#endif
#ifndef PTRACE_GETREGS
#define PTRACE_GETREGS 0xc
#endif
#ifndef PTRACE_SETREGS
#define PTRACE_SETREGS 0xd
#endif
#ifndef SOL_NETLINK
#define SOL_NETLINK 270
#endif
#ifdef SOL_BLUETOOTH
// SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
// but it is already in bluetooth_linux.go
#undef SOL_BLUETOOTH
#endif
// Certain constants are missing from the fs/crypto UAPI
#define FS_KEY_DESC_PREFIX "fscrypt:"
#define FS_KEY_DESC_PREFIX_SIZE 8
#define FS_MAX_KEY_SIZE 64
'
includes_NetBSD='
#include <sys/types.h>
#include <sys/param.h>
#include <sys/event.h>
#include <sys/extattr.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <sys/termios.h>
#include <sys/ttycom.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/ip_mroute.h>
#include <netinet/if_ether.h>
// Needed since <sys/param.h> refers to it...
#define schedppq 1
'
includes_OpenBSD='
#include <sys/types.h>
#include <sys/param.h>
#include <sys/event.h>
#include <sys/mman.h>
#include <sys/mount.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/termios.h>
#include <sys/ttycom.h>
#include <sys/unistd.h>
#include <sys/wait.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_types.h>
#include <net/if_var.h>
#include <net/route.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/ip_mroute.h>
#include <netinet/if_ether.h>
#include <net/if_bridge.h>
// We keep some constants not supported in OpenBSD 5.5 and beyond for
// the promise of compatibility.
#define EMUL_ENABLED 0x1
#define EMUL_NATIVE 0x2
#define IPV6_FAITH 0x1d
#define IPV6_OPTIONS 0x1
#define IPV6_RTHDR_STRICT 0x1
#define IPV6_SOCKOPT_RESERVED1 0x3
#define SIOCGIFGENERIC 0xc020693a
#define SIOCSIFGENERIC 0x80206939
#define WALTSIG 0x4
'
includes_SunOS='
#include <limits.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/sockio.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <sys/ioctl.h>
#include <sys/mkdev.h>
#include <net/bpf.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/if_types.h>
#include <net/route.h>
#include <netinet/in.h>
#include <termios.h>
#include <netinet/ip.h>
#include <netinet/ip_mroute.h>
'
includes='
#include <sys/types.h>
#include <sys/file.h>
#include <fcntl.h>
#include <dirent.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <errno.h>
#include <sys/signal.h>
#include <signal.h>
#include <sys/resource.h>
#include <time.h>
'
ccflags="$@"
# Write go tool cgo -godefs input.
(
echo package unix
echo
echo '/*'
indirect="includes_$(uname)"
echo "${!indirect} $includes"
echo '*/'
echo 'import "C"'
echo 'import "syscall"'
echo
echo 'const ('
# The gcc command line prints all the #defines
# it encounters while processing the input
echo "${!indirect} $includes" | $CC -x c - -E -dM $ccflags |
awk '
$1 != "#define" || $2 ~ /\(/ || $3 == "" {next}
$2 ~ /^E([ABCD]X|[BIS]P|[SD]I|S|FL)$/ {next} # 386 registers
$2 ~ /^(SIGEV_|SIGSTKSZ|SIGRT(MIN|MAX))/ {next}
$2 ~ /^(SCM_SRCRT)$/ {next}
$2 ~ /^(MAP_FAILED)$/ {next}
$2 ~ /^ELF_.*$/ {next}# <asm/elf.h> contains ELF_ARCH, etc.
$2 ~ /^EXTATTR_NAMESPACE_NAMES/ ||
$2 ~ /^EXTATTR_NAMESPACE_[A-Z]+_STRING/ {next}
$2 !~ /^ECCAPBITS/ &&
$2 !~ /^ETH_/ &&
$2 !~ /^EPROC_/ &&
$2 !~ /^EQUIV_/ &&
$2 !~ /^EXPR_/ &&
$2 ~ /^E[A-Z0-9_]+$/ ||
$2 ~ /^B[0-9_]+$/ ||
$2 ~ /^(OLD|NEW)DEV$/ ||
$2 == "BOTHER" ||
$2 ~ /^CI?BAUD(EX)?$/ ||
$2 == "IBSHIFT" ||
$2 ~ /^V[A-Z0-9]+$/ ||
$2 ~ /^CS[A-Z0-9]/ ||
$2 ~ /^I(SIG|CANON|CRNL|UCLC|EXTEN|MAXBEL|STRIP|UTF8)$/ ||
$2 ~ /^IGN/ ||
$2 ~ /^IX(ON|ANY|OFF)$/ ||
$2 ~ /^IN(LCR|PCK)$/ ||
$2 !~ "X86_CR3_PCID_NOFLUSH" &&
$2 ~ /(^FLU?SH)|(FLU?SH$)/ ||
$2 ~ /^C(LOCAL|READ|MSPAR|RTSCTS)$/ ||
$2 == "BRKINT" ||
$2 == "HUPCL" ||
$2 == "PENDIN" ||
$2 == "TOSTOP" ||
$2 == "XCASE" ||
$2 == "ALTWERASE" ||
$2 == "NOKERNINFO" ||
$2 ~ /^PAR/ ||
$2 ~ /^SIG[^_]/ ||
$2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
$2 ~ /^O?XTABS$/ ||
$2 ~ /^TC[IO](ON|OFF)$/ ||
$2 ~ /^IN_/ ||
$2 ~ /^LOCK_(SH|EX|NB|UN)$/ ||
$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ ||
$2 ~ /^TP_STATUS_/ ||
$2 ~ /^FALLOC_/ ||
$2 == "ICMPV6_FILTER" ||
$2 == "SOMAXCONN" ||
$2 == "NAME_MAX" ||
$2 == "IFNAMSIZ" ||
$2 ~ /^CTL_(HW|KERN|MAXNAME|NET|QUERY)$/ ||
$2 ~ /^KERN_(HOSTNAME|OS(RELEASE|TYPE)|VERSION)$/ ||
$2 ~ /^HW_MACHINE$/ ||
$2 ~ /^SYSCTL_VERS/ ||
$2 !~ "MNT_BITS" &&
$2 ~ /^(MS|MNT|UMOUNT)_/ ||
$2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
$2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT)_/ ||
$2 ~ /^KEXEC_/ ||
$2 ~ /^LINUX_REBOOT_CMD_/ ||
$2 ~ /^LINUX_REBOOT_MAGIC[12]$/ ||
$2 ~ /^MODULE_INIT_/ ||
$2 !~ "NLA_TYPE_MASK" &&
$2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ ||
$2 ~ /^SIOC/ ||
$2 ~ /^TIOC/ ||
$2 ~ /^TCGET/ ||
$2 ~ /^TCSET/ ||
$2 ~ /^TC(FLSH|SBRKP?|XONC)$/ ||
$2 !~ "RTF_BITS" &&
$2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ ||
$2 ~ /^BIOC/ ||
$2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ ||
$2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ ||
$2 ~ /^PRIO_(PROCESS|PGRP|USER)/ ||
$2 ~ /^CLONE_[A-Z_]+/ ||
$2 !~ /^(BPF_TIMEVAL)$/ &&
$2 ~ /^(BPF|DLT)_/ ||
$2 ~ /^(CLOCK|TIMER)_/ ||
$2 ~ /^CAN_/ ||
$2 ~ /^CAP_/ ||
$2 ~ /^ALG_/ ||
$2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE|IOC_(GET|SET)_ENCRYPTION)/ ||
$2 ~ /^GRND_/ ||
$2 ~ /^RND/ ||
$2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ ||
$2 ~ /^KEYCTL_/ ||
$2 ~ /^PERF_EVENT_IOC_/ ||
$2 ~ /^SECCOMP_MODE_/ ||
$2 ~ /^SPLICE_/ ||
$2 ~ /^SYNC_FILE_RANGE_/ ||
$2 !~ /^AUDIT_RECORD_MAGIC/ &&
$2 !~ /IOC_MAGIC/ &&
$2 ~ /^[A-Z][A-Z0-9_]+_MAGIC2?$/ ||
$2 ~ /^(VM|VMADDR)_/ ||
$2 ~ /^IOCTL_VM_SOCKETS_/ ||
$2 ~ /^(TASKSTATS|TS)_/ ||
$2 ~ /^CGROUPSTATS_/ ||
$2 ~ /^GENL_/ ||
$2 ~ /^STATX_/ ||
$2 ~ /^RENAME/ ||
$2 ~ /^UBI_IOC[A-Z]/ ||
$2 ~ /^UTIME_/ ||
$2 ~ /^XATTR_(CREATE|REPLACE|NO(DEFAULT|FOLLOW|SECURITY)|SHOWCOMPRESSION)/ ||
$2 ~ /^ATTR_(BIT_MAP_COUNT|(CMN|VOL|FILE)_)/ ||
$2 ~ /^FSOPT_/ ||
$2 ~ /^WDIOC_/ ||
$2 ~ /^NFN/ ||
$2 ~ /^XDP_/ ||
$2 ~ /^(HDIO|WIN|SMART)_/ ||
$2 !~ "WMESGLEN" &&
$2 ~ /^W[A-Z0-9]+$/ ||
$2 ~/^PPPIOC/ ||
$2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)}
$2 ~ /^__WCOREFLAG$/ {next}
$2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
{next}
' | sort
echo ')'
) >_const.go
# Pull out the error names for later.
errors=$(
echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print $2 }' |
sort
)
# Pull out the signal names for later.
signals=$(
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' |
egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort
)
# Again, writing regexps to a file.
echo '#include <errno.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^E[A-Z0-9_]+$/ { print "^\t" $2 "[ \t]*=" }' |
sort >_error.grep
echo '#include <signal.h>' | $CC -x c - -E -dM $ccflags |
awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' |
egrep -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' |
sort >_signal.grep
echo '// mkerrors.sh' "$@"
echo '// Code generated by the command above; see README.md. DO NOT EDIT.'
echo
echo "// +build ${GOARCH},${GOOS}"
echo
go tool cgo -godefs -- "$@" _const.go >_error.out
cat _error.out | grep -vf _error.grep | grep -vf _signal.grep
echo
echo '// Errors'
echo 'const ('
cat _error.out | grep -f _error.grep | sed 's/=\(.*\)/= syscall.Errno(\1)/'
echo ')'
echo
echo '// Signals'
echo 'const ('
cat _error.out | grep -f _signal.grep | sed 's/=\(.*\)/= syscall.Signal(\1)/'
echo ')'
# Run C program to print error and syscall strings.
(
echo -E "
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <ctype.h>
#include <string.h>
#include <signal.h>
#define nelem(x) (sizeof(x)/sizeof((x)[0]))
enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below
struct tuple {
int num;
const char *name;
};
struct tuple errors[] = {
"
for i in $errors
do
echo -E ' {'$i', "'$i'" },'
done
echo -E "
};
struct tuple signals[] = {
"
for i in $signals
do
echo -E ' {'$i', "'$i'" },'
done
# Use -E because on some systems bash builtin interprets \n itself.
echo -E '
};
static int
tuplecmp(const void *a, const void *b)
{
return ((struct tuple *)a)->num - ((struct tuple *)b)->num;
}
int
main(void)
{
int i, e;
char buf[1024], *p;
printf("\n\n// Error table\n");
printf("var errorList = [...]struct {\n");
printf("\tnum syscall.Errno\n");
printf("\tname string\n");
printf("\tdesc string\n");
printf("} {\n");
qsort(errors, nelem(errors), sizeof errors[0], tuplecmp);
for(i=0; i<nelem(errors); i++) {
e = errors[i].num;
if(i > 0 && errors[i-1].num == e)
continue;
strcpy(buf, strerror(e));
// lowercase first letter: Bad -> bad, but STREAM -> STREAM.
if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
buf[0] += a - A;
printf("\t{ %d, \"%s\", \"%s\" },\n", e, errors[i].name, buf);
}
printf("}\n\n");
printf("\n\n// Signal table\n");
printf("var signalList = [...]struct {\n");
printf("\tnum syscall.Signal\n");
printf("\tname string\n");
printf("\tdesc string\n");
printf("} {\n");
qsort(signals, nelem(signals), sizeof signals[0], tuplecmp);
for(i=0; i<nelem(signals); i++) {
e = signals[i].num;
if(i > 0 && signals[i-1].num == e)
continue;
strcpy(buf, strsignal(e));
// lowercase first letter: Bad -> bad, but STREAM -> STREAM.
if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z)
buf[0] += a - A;
// cut trailing : number.
p = strrchr(buf, ":"[0]);
if(p)
*p = '\0';
printf("\t{ %d, \"%s\", \"%s\" },\n", e, signals[i].name, buf);
}
printf("}\n\n");
return 0;
}
'
) >_errors.c
$CC $ccflags -o _errors _errors.c && $GORUN ./_errors && rm -f _errors.c _errors _const.go _error.grep _signal.grep _error.out
|
sigma-random/bettercap
|
vendor/golang.org/x/sys/unix/mkerrors.sh
|
Shell
|
gpl-3.0
| 16,318 |
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0+
#
# Alternate sleeping and spinning on randomly selected CPUs. The purpose
# of this script is to inflict random OS jitter on a concurrently running
# test.
#
# Usage: jitter.sh me duration [ sleepmax [ spinmax ] ]
#
# me: Random-number-generator seed salt.
# duration: Time to run in seconds.
# sleepmax: Maximum microseconds to sleep, defaults to one second.
# spinmax: Maximum microseconds to spin, defaults to one millisecond.
#
# Copyright (C) IBM Corporation, 2016
#
# Authors: Paul E. McKenney <[email protected]>
me=$(($1 * 1000))
duration=$2
sleepmax=${3-1000000}
spinmax=${4-1000}
n=1
starttime=`gawk 'BEGIN { print systime(); }' < /dev/null`
nohotplugcpus=
for i in /sys/devices/system/cpu/cpu[0-9]*
do
if test -f $i/online
then
:
else
curcpu=`echo $i | sed -e 's/^[^0-9]*//'`
nohotplugcpus="$nohotplugcpus $curcpu"
fi
done
while :
do
# Check for done.
t=`gawk -v s=$starttime 'BEGIN { print systime() - s; }' < /dev/null`
if test "$t" -gt "$duration"
then
exit 0;
fi
# Set affinity to randomly selected online CPU
if cpus=`grep 1 /sys/devices/system/cpu/*/online 2>&1 |
sed -e 's,/[^/]*$,,' -e 's/^[^0-9]*//'`
then
:
else
cpus=
fi
# Do not leave out non-hot-pluggable CPUs
cpus="$cpus $nohotplugcpus"
cpumask=`awk -v cpus="$cpus" -v me=$me -v n=$n 'BEGIN {
srand(n + me + systime());
ncpus = split(cpus, ca);
curcpu = ca[int(rand() * ncpus + 1)];
mask = lshift(1, curcpu);
if (mask + 0 <= 0)
mask = 1;
printf("%#x\n", mask);
}' < /dev/null`
n=$(($n+1))
if ! taskset -p $cpumask $$ > /dev/null 2>&1
then
echo taskset failure: '"taskset -p ' $cpumask $$ '"'
exit 1
fi
# Sleep a random duration
sleeptime=`awk -v me=$me -v n=$n -v sleepmax=$sleepmax 'BEGIN {
srand(n + me + systime());
printf("%06d", int(rand() * sleepmax));
}' < /dev/null`
n=$(($n+1))
sleep .$sleeptime
# Spin a random duration
limit=`awk -v me=$me -v n=$n -v spinmax=$spinmax 'BEGIN {
srand(n + me + systime());
printf("%06d", int(rand() * spinmax));
}' < /dev/null`
n=$(($n+1))
for i in {1..$limit}
do
echo > /dev/null
done
done
exit 1
|
c0d3z3r0/linux-rockchip
|
tools/testing/selftests/rcutorture/bin/jitter.sh
|
Shell
|
gpl-2.0
| 2,157 |
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
export KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::util::ensure_clean_working_dir
_tmpdir="$(kube::realpath $(mktemp -d -t verify-generated-files.XXXXXX))"
kube::util::trap_add "rm -rf ${_tmpdir}" EXIT
_tmp_gopath="${_tmpdir}/go"
_tmp_kuberoot="${_tmp_gopath}/src/k8s.io/kubernetes"
mkdir -p "${_tmp_kuberoot}/.."
cp -a "${KUBE_ROOT}" "${_tmp_kuberoot}/.."
cd "${_tmp_kuberoot}"
# clean out anything from the temp dir that's not checked in
git clean -ffxd
# regenerate any generated code
make generated_files
diff=$(git diff --name-only)
if [[ -n "${diff}" ]]; then
echo "!!! Generated code is out of date:" >&2
echo "${diff}" >&2
echo >&2
echo "Please run make generated_files." >&2
exit 1
fi
|
childsb/origin
|
vendor/k8s.io/kubernetes/hack/verify-generated-files.sh
|
Shell
|
apache-2.0
| 1,409 |
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0+
#
# Create a spreadsheet from torture-test Kconfig options and kernel boot
# parameters. Run this in the directory containing the scenario files.
#
# Usage: config2csv path.csv [ "scenario1 scenario2 ..." ]
#
# By default, this script will take the list of scenarios from the CFLIST
# file in that directory, otherwise it will consider only the scenarios
# specified on the command line. It will examine each scenario's file
# and also its .boot file, if present, and create a column in the .csv
# output file. Note that "CFLIST" is a synonym for all the scenarios in the
# CFLIST file, which allows easy comparison of those scenarios with selected
# scenarios such as BUSTED that are normally omitted from CFLIST files.
csvout=${1}
if test -z "$csvout"
then
echo "Need .csv output file as first argument."
exit 1
fi
shift
defaultconfigs="`tr '\012' ' ' < CFLIST`"
if test "$#" -eq 0
then
scenariosarg=$defaultconfigs
else
scenariosarg=$*
fi
scenarios="`echo $scenariosarg | sed -e "s/\<CFLIST\>/$defaultconfigs/g"`"
T=/tmp/config2latex.sh.$$
trap 'rm -rf $T' 0
mkdir $T
cat << '---EOF---' >> $T/p.awk
END {
---EOF---
for i in $scenarios
do
echo ' s["'$i'"] = 1;' >> $T/p.awk
grep -v '^#' < $i | grep -v '^ *$' > $T/p
if test -r $i.boot
then
tr -s ' ' '\012' < $i.boot | grep -v '^#' >> $T/p
fi
sed -e 's/^[^=]*$/&=?/' < $T/p |
sed -e 's/^\([^=]*\)=\(.*\)$/\tp["\1:'"$i"'"] = "\2";\n\tc["\1"] = 1;/' >> $T/p.awk
done
cat << '---EOF---' >> $T/p.awk
ns = asorti(s, ss);
nc = asorti(c, cs);
for (j = 1; j <= ns; j++)
printf ",\"%s\"", ss[j];
printf "\n";
for (i = 1; i <= nc; i++) {
printf "\"%s\"", cs[i];
for (j = 1; j <= ns; j++) {
printf ",\"%s\"", p[cs[i] ":" ss[j]];
}
printf "\n";
}
}
---EOF---
awk -f $T/p.awk < /dev/null > $T/p.csv
cp $T/p.csv $csvout
|
Linutronix/linux
|
tools/testing/selftests/rcutorture/bin/config2csv.sh
|
Shell
|
gpl-2.0
| 1,842 |
#!/bin/sh
# ver_test_5.sh -- test that symbol has correct version
# Copyright 2008 Free Software Foundation, Inc.
# Written by Ian Lance Taylor <[email protected]>.
# This file is part of gold.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
# MA 02110-1301, USA.
# This file goes with ver_test_4.script and ver_test_5.script. The
# symbol t2_2 is not defined when ver_test_5.script is used.
check()
{
if ! grep -q "$2" "$1"
then
echo "Did not find expected symbol in $1:"
echo " $2"
echo ""
echo "Actual output below:"
cat "$1"
exit 1
fi
}
check ver_test_5.syms "t3_2@@VER5"
check ver_test_5.syms "t2_2@VER2"
exit 0
|
HackLinux/goblin-core
|
riscv/llvm/3.5/binutils-2.21.1/gold/testsuite/ver_test_5.sh
|
Shell
|
bsd-3-clause
| 1,280 |
#!/bin/sh
umask 077
YEAR="`date +%Y`"
DATE="`date +%Y-%m-%d`"
myerror()
{
echo "Error: $*" 1>&2
exit 1
}
archive_log()
{
FILE="${1}"
DEST="${2}"
if [ -f "${DEST}" -o -f "${DEST}.gz" ]; then
echo "Skipping ${FILE}: Archive already exists" 1>&2
else
echo "Archiving file ${FILE} to ${DEST}"
mv "${FILE}" "${DEST}"
touch "${FILE}"
LOGS="${LOGS} ${DEST}"
fi
}
restart_syslog()
{
for pid in /var/run/syslogd.pid /var/run/rsyslogd.pid; do
if [ -s "${pid}" ]; then
kill -HUP `cat ${pid}`
sleep 2
return
fi
done
myerror "Cannot find syslog pid file"
}
[ $# -ge 2 ] || myerror "Usage: `basename $0` <basedir> <file|dir> ..."
LOGDIR="${1}"
shift
while [ "$*" ]; do
if [ -f "${LOGDIR}/${1}" ]; then
dstdir=${LOGDIR}/archive/${YEAR}
dstfile=${dstdir}/`basename ${1}`.${DATE}
[ -d "${dstdir}" ] || mkdir -p ${dstdir}
archive_log ${LOGDIR}/${1} ${dstfile}
elif [ -d "${LOGDIR}/${1}" ]; then
for f in ${LOGDIR}/${1}/*.log; do
if [ -f "${f}" ]; then
dstdir=${LOGDIR}/archive/${1}/${YEAR}
dstfile=${dstdir}/`basename ${f}`.${DATE}
[ -d "${dstdir}" ] || mkdir -p ${dstdir}
archive_log ${f} ${dstfile}
else
echo "Skipping ${f}: not a file" 1>&2
fi
done
else
echo "Skipping ${1}: not a file or directory" 1>&2
fi
shift
done
restart_syslog
for log in ${LOGS}; do
nice gzip -f ${log} || myerror "Error while gzipping ${log}"
loggz="`basename ${log}`.gz"
( cd `dirname ${log}` && openssl sha1 -out ${loggz}.sha1 ${loggz} )
done
|
jkryorg/ansible-roles
|
rsyslog/files/logarchiver.sh
|
Shell
|
isc
| 1,752 |
# set ZEEGUU_WEB_CONFIG in your .bashrc if you don't want to go with the default
[ -z $ZEEGUU_WEB_CONFIG ] && export ZEEGUU_WEB_CONFIG=`pwd`/default_web.cfg
python -m zeeguu_web
|
MrAlexDeluxe/Zeeguu-Web
|
server_test.sh
|
Shell
|
mit
| 178 |
#!/bin/bash
SHAREDDIR="/home/server/externals"
LOGDIR="$SHAREDDIR/logs"
STARTUPSCRIPTSDIR="/home/server"
APPLOG="node.log"
CURRENTSCREENLIST=$(screen -list)
function stop {
pkill "node.sh"
}
function start {
if [ -e "$SHAREDDIR/$APPLOG" ]
then
mkdir -p "$SHAREDDIR"
CURRENTAPPLOGDATE=$(date -r "$SHAREDDIR/$APPLOG" +%Y%m%d_%H%M%S)
mv "$SHAREDDIR/$APPLOG" "$LOGDIR/${CURRENTAPPLOGDATE}_${APPLOG}"
fi
exec "$STARTUPSCRIPTSDIR/node.sh"
}
if [ "$1" == "start" ]
then
echo "Starting node server..."
start
echo "finished"
elif [ "$1" == "stop" ]
then
echo "Stopping node server..."
stop
echo "finished"
else
exec "$@"
fi
|
falcoprescher/Echtzeitanwendung_Node_JS
|
dockerplayground/server/realtimeapp_node/config/entrypoint.sh
|
Shell
|
mit
| 656 |
#!/bin/bash -eu
source config/ec2_test
YUM_PACKAGES="git gcc-c++ cmake ruby-devel"
GEM_PACKAGES="rgl"
SHOGI_SERVER_DIR="shogi-server"
BACKUP_SCRIPT="test_backup.sh"
LOCAL_BACKUP_SCRIPT="./tools/${BACKUP_SCRIPT}"
scp -i ${EC2_SSH_KEY} ${REMOTE_BACKUP_KEY} ec2-user@${EC2_HOST}:~/.ssh/id_rsa
scp -i ${EC2_SSH_KEY} ${LOCAL_BACKUP_SCRIPT} ec2-user@${EC2_HOST}:~/${BACKUP_SCRIPT}
ssh -i ${EC2_SSH_KEY} -t -t ec2-user@${EC2_HOST} <<EOF
#!/bin/bash -eu
chmod 600 .ssh/id_rsa
sudo yum update -y
sudo yum install -y ${YUM_PACKAGES}
gem install --remote ${GEM_PACKAGES}
git clone --branch "${SHOGI_SERVER_BRANCH}" --depth 1 "${SHOGI_SERVER_REPO}" "${SHOGI_SERVER_DIR}"
cd ${SHOGI_SERVER_DIR}
nohup ruby shogi-server test 4081 > stdout.log 2> stderr.log < /dev/null &
nohup ../${BACKUP_SCRIPT} ${REMOTE_BACKUP_HOST} ${REMOTE_BACKUP_NAME} < /dev/null &
cd ..
exit
EOF
|
sunfish-shogi/sunfish4
|
tools/ec2_test_setup.sh
|
Shell
|
mit
| 876 |
#!/usr/bin/env bash
scriptpath=$(realpath "$0")
backendpath=$(dirname "$scriptpath")
buildpath=$(dirname "$backendpath")
mainpath=$(dirname "$buildpath")
input=$mainpath
if [ ! -d "$input" ]; then
echo "Error: Input '$input' is not a directory"
exit 2
fi
exec docker run --rm -i -t --user="$(id -u):$(id -g)" -p 127.0.0.1:6060:6060 -v "$input":/data blang/posty-build-backend bash -c "GOPATH=\$(wgo env GOPATH) godoc -http ':6060'"
|
blang/posty
|
build/backend/godocs.sh
|
Shell
|
mit
| 442 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.