code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
echo beginning dist push
# mark each file in dist as trackable
while read file; do
git update-index --no-assume-unchanged $file
done <<<`find ./dist -type f`
# remove .gitignore cache of dist files
git rm --cached -q -r dist
# force add dist (since its in .gitignore we pass -f)
git add -f dist
# grab version number from package.json for commit message
while read version; do
git commit -m "Bumping Dist to version $version"
done <<< `cat package.json | grep version | awk '{print $2}' | tr -d "\"\,"`
# push dist to master
git push origin master
# mark each file in dist as untrackable again
while read file; do
git update-index --assume-unchanged $file
done <<< `find ./dist -type f`
|
jenjwong/britecharts
|
src/tasks/helpers/push_dist.sh
|
Shell
|
apache-2.0
| 705 |
#!/bin/sh
# (c) Copyright 2009 - 2010 Xilinx, Inc. All rights reserved.
#
# This file contains confidential and proprietary information
# of Xilinx, Inc. and is protected under U.S. and
# international copyright and other intellectual property
# laws.
#
# DISCLAIMER
# This disclaimer is not a license and does not grant any
# rights to the materials distributed herewith. Except as
# otherwise provided in a valid license issued to you by
# Xilinx, and to the maximum extent permitted by applicable
# law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
# WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
# AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
# BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
# INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
# (2) Xilinx shall not be liable (whether in contract or tort,
# including negligence, or under any other theory of
# liability) for any loss or damage of any kind or nature
# related to, arising under or in connection with these
# materials, including for any direct, or any indirect,
# special, incidental, or consequential loss or damage
# (including loss of data, profits, goodwill, or any type of
# loss or damage suffered as a result of any action brought
# by a third party) even if such damage or loss was
# reasonably foreseeable or Xilinx had been advised of the
# possibility of the same.
#
# CRITICAL APPLICATIONS
# Xilinx products are not designed or intended to be fail-
# safe, or for use in any application requiring fail-safe
# performance, such as life-support or safety devices or
# systems, Class III medical devices, nuclear facilities,
# applications related to the deployment of airbags, or any
# other applications that could lead to death, personal
# injury, or severe property or environmental damage
# (individually and collectively, "Critical
# Applications"). Customer assumes the sole risk and
# liability of any use of Xilinx products in Critical
# Applications, subject only to applicable laws and
# regulations governing limitations on product liability.
#
# THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
# PART OF THIS FILE AT ALL TIMES.
# Clean up the results directory
rm -rf results
mkdir results
#Synthesize the Wrapper Files
echo 'Synthesizing example design with Synplify'
synplify_pro -batch synplify.prj -licensetype synplifypro_xilinx
# Copy the netlist generated by Coregen
echo 'Copying files from the netlist directory to the results directory'
cp ../../system_axi_vdma_0_wrapper_fifo_generator_v9_1.ngc results/
# Copy the constraints files generated by Coregen
echo 'Copying files from constraints directory to results directory'
cp ../example_design/system_axi_vdma_0_wrapper_fifo_generator_v9_1_exdes.ucf results/
cd results
echo 'Running ngdbuild'
ngdbuild -p xc7z010-clg400-3 -sd ../../../ system_axi_vdma_0_wrapper_fifo_generator_v9_1_exdes
echo 'Running map'
map system_axi_vdma_0_wrapper_fifo_generator_v9_1_exdes -o mapped.ncd
echo 'Running par'
par mapped.ncd routed.ncd
echo 'Running trce'
trce -e 10 routed.ncd mapped.pcf -o routed
echo 'Running design through bitgen'
bitgen -w routed -g UnconstrainedPins:Allow
echo 'Running netgen to create gate level VHDL model'
netgen -ofmt vhdl -sim -tm system_axi_vdma_0_wrapper_fifo_generator_v9_1_exdes -pcf mapped.pcf -w routed.ncd routed.vhd
|
kennethlyn/parallella-lcd-fpga
|
system/implementation/system_axi_vdma_0_wrapper_fifo_generator_v9_1/implement/implement_synplify.sh
|
Shell
|
bsd-3-clause
| 3,373 |
#!/usr/bin/env bash
${IDRIS:-idris} $@ --quiet --port none < input.in
${IDRIS:-idris} $@ basic024.idr -o basic024
./basic024
rm -f basic024 *.ibc
|
kojiromike/Idris-dev
|
test/basic024/run.sh
|
Shell
|
bsd-3-clause
| 149 |
#!/bin/bash
FN="mirna102xgaincdf_2.18.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.13/data/annotation/src/contrib/mirna102xgaincdf_2.18.0.tar.gz"
"https://bioarchive.galaxyproject.org/mirna102xgaincdf_2.18.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mirna102xgaincdf/bioconductor-mirna102xgaincdf_2.18.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mirna102xgaincdf/bioconductor-mirna102xgaincdf_2.18.0_src_all.tar.gz"
)
MD5="9091a45c2ac15c2de0263743ab334f97"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
phac-nml/bioconda-recipes
|
recipes/bioconductor-mirna102xgaincdf/post-link.sh
|
Shell
|
mit
| 1,459 |
#!/bin/bash
if [ $# -ne 2 ]; then
echo "Usage: compile-ruby-cf.sh [ruby version] [destination]"
echo "Use RUBYGEMS_VERSION, BUNDLER_VERSION, LIBYAML_DIR"
exit 1
fi
set -e
RUBY_VERSION=$1
DESTINATION=$2
if [ -z "${LIBYAML_DIR}" ]; then LIBYAML_DIR="/var/vcap/packages/libyaml"; fi
MAJOR_RUBY_VERSION=${RUBY_VERSION:0:3}
MINOR_RUBY_VERSION=${RUBY_VERSION:0:5}
RUBY_TARBALL=ruby-${RUBY_VERSION}.tar.gz
wget ftp://ftp.ruby-lang.org/pub/ruby/${MAJOR_RUBY_VERSION}/${RUBY_TARBALL}
tar zxvf ${RUBY_TARBALL}
(
cd ruby-${RUBY_VERSION}
./configure --prefix=${DESTINATION} --disable-install-doc --with-opt-dir=${LIBYAML_DIR} --enable-load-relative
make
make install
)
if [ -z "${RUBYGEMS_VERSION}" ]; then RUBYGEMS_VERSION="1.8.24"; fi
RUBYGEMS_TARBALL="rubygems-${RUBYGEMS_VERSION}.tgz"
wget http://production.cf.rubygems.org/rubygems/${RUBYGEMS_TARBALL}
tar zxvf ${RUBYGEMS_TARBALL}
(
cd rubygems-${RUBYGEMS_VERSION}
$DESTINATION/bin/ruby setup.rb
)
if [ -z "${BUNDLER_VERSION}" ]; then BUNDLER_VERSION="1.3.2"; fi
$DESTINATION/bin/gem install bundler --version ${BUNDLER_VERSION} --no-rdoc --no-ri
if [ $MINOR_RUBY_VERSION -eq "1.8.7" -o $MINOR_RUBY_VERSION -eq "1.9.2" ]; then
(
cd $DESTINATION/bin
for FILENAME in irb testrb ri rdoc erb rake gem bundle
do
sed -i -e '1c\
#!/bin/sh\
# -*- ruby -*-\
bindir=`cd -P "${0%/*}" 2>/dev/null; pwd`\
prefix="${bindir%/bin}"\
export LD_LIBRARY_PATH="$prefix/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"\
exec "$bindir/ruby" -x "$0" "$@"\
#!/usr/bin/env ruby' $FILENAME
done
)
fi
RUBY_PACKAGE=ruby-${MINOR_RUBY_VERSION}.tgz
echo "Creating ${RUBY_PACKAGE}..."
tar czf $RUBY_PACKAGE -C $DESTINATION .
echo "done"
|
ActiveState/heroku-buildpack-ruby
|
scripts/compile-ruby-cf.sh
|
Shell
|
mit
| 1,673 |
#!/bin/bash
# Copyright (C) 2011, 2012 Andy Aschwanden and Ed Bueler
set -e # exit on error
echo "# PISM Storglaciaren 3d Model"
if [ -n "${SCRIPTNAME:+1}" ] ; then
echo "[SCRIPTNAME=$SCRIPTNAME (already set)]"
echo ""
else
SCRIPTNAME="#(psg_3d.sh)"
fi
NN=2 # default number of processors
if [ $# -gt 0 ] ; then # if user says "psg_flowline.sh 8" then NN = 8
NN="$1"
fi
echo "$SCRIPTNAME NN = $NN"
# set MPIDO if using different MPI execution command, for example:
# $ export PISM_MPIDO="aprun -n "
if [ -n "${PISM_MPIDO:+1}" ] ; then # check if env var is already set
echo "$SCRIPTNAME PISM_MPIDO = $PISM_MPIDO (already set)"
else
PISM_MPIDO="mpiexec -n "
echo "$SCRIPTNAME PISM_MPIDO = $PISM_MPIDO"
fi
# check if env var PISM_DO was set (i.e. PISM_DO=echo for a 'dry' run)
if [ -n "${PISM_DO:+1}" ] ; then # check if env var DO is already set
echo "$SCRIPTNAME PISM_DO = $PISM_DO (already set)"
else
PISM_DO=""
fi
# prefix to pism (not to executables)
if [ -n "${PISM_PREFIX:+1}" ] ; then # check if env var is already set
echo "$SCRIPTNAME PISM_PREFIX = $PISM_PREFIX (already set)"
else
PISM_PREFIX="" # just a guess
echo "$SCRIPTNAME PISM_PREFIX = $PISM_PREFIX"
fi
# set PISM_EXEC if using different executables, for example:
# $ export PISM_EXEC="pismr -cold"
if [ -n "${PISM_EXEC:+1}" ] ; then # check if env var is already set
echo "$SCRIPTNAME PISM_EXEC = $PISM_EXEC (already set)"
else
PISM_EXEC="pismr"
echo "$SCRIPTNAME PISM_EXEC = $PISM_EXEC"
fi
echo
PCONFIG=psg_config.nc
# cat prefix and exec together
PISM="${PISM_PREFIX}${PISM_EXEC} -cts -config_override $PCONFIG"
DATANAME=storglaciaren_3d.nc
PISM_DATANAME=pism_$DATANAME
INNAME=$PISM_DATANAME
COUPLER="-surface given" # FIXME should be using PSElevation as in flowline example
# 40 m grid
GRID="-Mx 94 -My 51 -Mz 151 -Mbz 1 -Lz 300 -z_spacing equal"
GS=40
SKIP=200
# 20 m grid
#GRID="-Mx 186 -My 101 -Mz 301 -Mbz 1 -Lz 300 -z_spacing equal"
#GS=20
#SKIP=500
# 10 m grid
#GRID="-Mx 371 -My 201 -Mz 301 -Mbz 1 -Lz 300 -z_spacing equal"
#GS=10
#SKIP=500
# bootstrap and do smoothing run
OUTNAME=psg_3d_${GS}m_pre1.nc
echo
echo "$SCRIPTNAME bootstrapping plus short smoothing run for 1 a"
cmd="$PISM_MPIDO $NN $PISM -skip $SKIP -boot_file $INNAME $GRID \
$COUPLER -y 1 -o $OUTNAME"
$PISM_DO $cmd
# FIXME: reasonable to run SIA somewhat longer to equilibrium to establish stable thermodynamical state before any attempt to invert surface velocities; this is the start of it
# FIXME: also reasonable to use hybrid model with ad hoc specification of basal yield stress
INNAME=$OUTNAME
incSTEADYNAME=inc_psg_3d_${GS}m_Tsteady.nc # FIXME: missing a field because of bug
STEADYNAME=psg_3d_${GS}m_Tsteady.nc
echo
echo "$SCRIPTNAME running toward thermodynamical steady state"
cmd="$PISM_MPIDO $NN $PISM -i $INNAME \
$COUPLER -y 200 -no_mass -o $incSTEADYNAME"
$PISM_DO $cmd
echo
echo "$SCRIPTNAME done"
|
JohannesFeldmann/pism
|
examples/storglaciaren/psg_3d.sh
|
Shell
|
gpl-2.0
| 2,991 |
#!/bin/bash
#set -x
set -e
echo "running load_rnaseq_annotation.sh $1"
# locate this shell script, and source a generic shell script to process all params related settings
UPLOAD_SCRIPTS_DIRECTORY=$(dirname "$0")
UPLOAD_DATA_TYPE="annotation"
source "$UPLOAD_SCRIPTS_DIRECTORY/process_params.inc"
# Execute some basic checks
if [ -z "$GPL_ID" ] || [ -z "$ANNOTATION_TITLE" ]; then
echo "Following variables need to be set:"
echo " GPL_ID=$GPL_ID"
echo " ANNOTATION_TITLE=$ANNOTATION_TITLE"
exit 1
fi
if [ ! -d logs ] ; then mkdir logs; fi
# Start the upload
$KITCHEN -norep=Y \
-file="$KETTLE_JOBS/load_rnaseq_annotation.kjb" \
-log="logs/load_rnaseq_annotation_$(date +"%Y%m%d%H%M").log" \
-param:DATA_LOCATION="$DATA_LOCATION" \
-param:DATA_FILE="$ANNOTATIONS_FILE"
#-param:SORT_DIR=/tmp \
#-param:GPL_ID="$GPL_ID" \
#-param:LOAD_TYPE=I \
#-param:ANNOTATION_TITLE="$ANNOTATION_TITLE"
|
cdejonge/transmart-data
|
samples/oracle/load_rnaseq_annotation.sh
|
Shell
|
gpl-3.0
| 937 |
#!/bin/sh
python2 "${HYPERDEX_SRCDIR}"/test/runner.py --space="space kv key k attributes map(float, int) v" --daemons=1 -- \
python2 "${HYPERDEX_SRCDIR}"/test/python/DataTypeMapFloatInt.py {HOST} {PORT}
|
tempbottle/HyperDex
|
test/sh/bindings.python.DataTypeMapFloatInt.sh
|
Shell
|
bsd-3-clause
| 208 |
#!/bin/bash
rlwrap monero-wallet-cli --wallet-file wallet_05.bin --password "" --testnet --trusted-daemon --daemon-address localhost:38081 --log-file wallet_05.log
|
eiabea/bitmonero
|
tests/libwallet_api_tests/scripts/open_wallet_5.sh
|
Shell
|
bsd-3-clause
| 167 |
#! /bin/bash
#
# Subnodes uninstall script. Removes dnsmasq, hostapd, bridge-utils, batctl, iw. Does *not* yet remove Node.js. Deletes subnoes folder and files within.
# Sarah Grant
# Updated 17 April 2015
#
# TO-DO
# - Remove node.js
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CHECK USER PRIVILEGES
(( `id -u` )) && echo "This script *must* be ran with root privileges, try prefixing with sudo. i.e sudo $0" && exit 1
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Uninstall Subnodes
#
read -p "Do you wish to uninstall subnodes from your Raspberry Pi? [N] " yn
case $yn in
[Yy]* )
clear
echo "Disabling the batman-adv kernel..."
# remove the batman-adv module to be started on boot
#sed -i '$a batman-adv' /etc/modules
modprobe -r batman-adv;
echo ""
echo -en "Disabling hostapd and dnsmasq on boot... "
update-rc.d hostapd disable
update-rc.d dnsmasq disable
# remove hostapd init file
echo -en "Deleting default hostapd and configuration files... "
rm /etc/default/hostapd
rm /etc/hostapd/hostapd.conf
echo -en "[OK]\n"
# remove dnsmasq
echo -en "Deleting dnsmasq configuration file... "
rm /etc/dnsmasq.conf
echo -en "[OK]\n"
echo ""
echo -en "Purging iw, batctl, bridge-utils, hostapd and dnsmasq... "
# how do i uninstall with apt-get
apt-get purge -y bridge-utils hostapd dnsmasq batctl iw
apt-get autoremove
echo -en "[OK]\n"
# restore the previous interfaces file
echo -en "Restoring previous network interfaces configuration file... "
rm /etc/network/interfaces
mv /etc/network/interfaces.orig.bak /etc/network/interfaces
echo -en "[OK]\n"
# Remove startup scripts and delete
echo -en "Disabling and deleting startup subnodes startup scripts... "
update-rc.d -f subnodes_mesh remove
rm /etc/init.d/subnodes_mesh
update-rc.d -f subnodes_ap remove
rm /etc/init.d/subnodes_ap
echo "Deleting subnodes folder "
cd /home/pi/
rm -rf /home/pi/subnodes
echo -en "[OK]\n"
read -p "Do you wish to reboot now? [N] " yn
case $yn in
[Yy]* )
reboot;;
[Nn]* ) exit 0;;
esac
;;
[Nn]* ) exit 0;;
esac
exit 0
|
HonghaiJia/subnodes
|
uninstall.sh
|
Shell
|
agpl-3.0
| 2,267 |
#!/bin/bash
# SDK
# https://dl.google.com/android/repository/sdk-tools-linux-3859397.zip
# SHA-256 444e22ce8ca0f67353bda4b85175ed3731cae3ffa695ca18119cbacef1c1bea0
# latest version available here: https://developer.android.com/studio/index.html
# NDK
# https://dl.google.com/android/repository/android-ndk-r15c-linux-x86_64.zip
# SHA-1 0bf02d4e8b85fd770fd7b9b2cdec57f9441f27a2
# latest version available here: https://developer.android.com/ndk/downloads/index.html
BASH_RC=~/.bashrc
GODOT_BUILD_TOOLS_PATH=./godot-dev/build-tools
mkdir -p $GODOT_BUILD_TOOLS_PATH
cd $GODOT_BUILD_TOOLS_PATH
ANDROID_BASE_URL=http://dl.google.com/android/repository
ANDROID_SDK_RELEASE=4333796
ANDROID_SDK_DIR=android-sdk
ANDROID_SDK_FILENAME=sdk-tools-linux-$ANDROID_SDK_RELEASE.zip
ANDROID_SDK_URL=$ANDROID_BASE_URL/$ANDROID_SDK_FILENAME
ANDROID_SDK_PATH=$GODOT_BUILD_TOOLS_PATH/$ANDROID_SDK_DIR
ANDROID_SDK_SHA256=92ffee5a1d98d856634e8b71132e8a95d96c83a63fde1099be3d86df3106def9
ANDROID_NDK_RELEASE=r18
ANDROID_NDK_DIR=android-ndk
ANDROID_NDK_FILENAME=android-ndk-$ANDROID_NDK_RELEASE-linux-x86_64.zip
ANDROID_NDK_URL=$ANDROID_BASE_URL/$ANDROID_NDK_FILENAME
ANDROID_NDK_PATH=$GODOT_BUILD_TOOLS_PATH/$ANDROID_NDK_DIR
ANDROID_NDK_SHA1=2ac2e8e1ef73ed551cac3a1479bb28bd49369212
echo
echo "Download and install Android development tools ..."
echo
if [ ! -e $ANDROID_SDK_FILENAME ]; then
echo "Downloading: Android SDK ..."
curl -L -O $ANDROID_SDK_URL
else
echo $ANDROID_SDK_SHA1 $ANDROID_SDK_FILENAME > $ANDROID_SDK_FILENAME.sha1
if [ $(shasum -a 256 < $ANDROID_SDK_FILENAME | awk '{print $1;}') != $ANDROID_SDK_SHA1 ]; then
echo "Downloading: Android SDK ..."
curl -L -O $ANDROID_SDK_URL
fi
fi
if [ ! -d $ANDROID_SDK_DIR ]; then
echo "Extracting: Android SDK ..."
unzip -qq $ANDROID_SDK_FILENAME -d $ANDROID_SDK_DIR
echo
fi
if [ ! -e $ANDROID_NDK_FILENAME ]; then
echo "Downloading: Android NDK ..."
curl -L -O $ANDROID_NDK_URL
else
echo $ANDROID_NDK_MD5 $ANDROID_NDK_FILENAME > $ANDROID_NDK_FILENAME.md5
if [ $(shasum -a 1 < $ANDROID_NDK_FILENAME | awk '{print $1;}') != $ANDROID_NDK_SHA1 ]; then
echo "Downloading: Android NDK ..."
curl -L -O $ANDROID_NDK_URL
fi
fi
if [ ! -d $ANDROID_NDK_DIR ]; then
echo "Extracting: Android NDK ..."
unzip -qq $ANDROID_NDK_FILENAME
mv android-ndk-$ANDROID_NDK_RELEASE $ANDROID_NDK_DIR
echo
fi
echo "Installing: Android Tools ..."
#$ANDROID_SDK_DIR/tools/bin/sdkmanager --all
yes | $ANDROID_SDK_DIR/tools/bin/sdkmanager --licenses > /dev/null
$ANDROID_SDK_DIR/tools/bin/sdkmanager 'tools' > /dev/null
$ANDROID_SDK_DIR/tools/bin/sdkmanager 'platform-tools' > /dev/null
$ANDROID_SDK_DIR/tools/bin/sdkmanager 'build-tools;28.0.1' > /dev/null
echo
EXPORT_VAL="export ANDROID_HOME=$ANDROID_SDK_PATH"
if ! grep -q "^$EXPORT_VAL" $BASH_RC; then
echo $EXPORT_VAL >> $BASH_RC
fi
#eval $EXPORT_VAL
EXPORT_VAL="export ANDROID_NDK_ROOT=$ANDROID_NDK_PATH"
if ! grep -q "^$EXPORT_VAL" $BASH_RC; then
echo $EXPORT_VAL >> $BASH_RC
fi
#eval $EXPORT_VAL
EXPORT_VAL="export PATH=$PATH:$ANDROID_SDK_PATH/tools"
if ! grep -q "^export PATH=.*$ANDROID_SDK_PATH/tools.*" $BASH_RC; then
echo $EXPORT_VAL >> $BASH_RC
fi
#eval $EXPORT_VAL
EXPORT_VAL="export PATH=$PATH:$ANDROID_SDK_PATH/tools/bin"
if ! grep -q "^export PATH=.*$ANDROID_SDK_PATH/tools/bin.*" $BASH_RC; then
echo $EXPORT_VAL >> $BASH_RC
fi
#eval $EXPORT_VAL
echo
echo "Done!"
echo
|
mcanders/godot
|
misc/travis/android-tools-linux.sh
|
Shell
|
mit
| 3,435 |
#! /usr/bin/env bash
#
# Copyright (C) 2013-2015 Zhang Rui <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#IJK_OPENSSL_UPSTREAM=https://github.com/openssl/openssl
IJK_OPENSSL_UPSTREAM=https://github.com/Bilibili/openssl.git
IJK_OPENSSL_FORK=https://github.com/Bilibili/openssl.git
IJK_OPENSSL_COMMIT=OpenSSL_1_0_2d
IJK_OPENSSL_LOCAL_REPO=extra/openssl
set -e
TOOLS=tools
echo "== pull openssl base =="
sh $TOOLS/pull-repo-base.sh $IJK_OPENSSL_UPSTREAM $IJK_OPENSSL_LOCAL_REPO
function pull_fork()
{
echo "== pull openssl fork $1 =="
sh $TOOLS/pull-repo-ref.sh $IJK_OPENSSL_FORK android/contrib/openssl-$1 ${IJK_OPENSSL_LOCAL_REPO}
cd android/contrib/openssl-$1
git checkout ${IJK_OPENSSL_COMMIT}
cd -
}
pull_fork "armv7a"
pull_fork "armv5"
pull_fork "x86"
|
treejames/ijkplayer
|
init-android-openssl.sh
|
Shell
|
gpl-2.0
| 1,304 |
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -e
export PYTHONPATH=$PIO_HOME/tests:$PYTHONPATH
echo "Sleeping $SLEEP_TIME seconds for all services to be ready..."
sleep $SLEEP_TIME
# create S3 bucket in localstack
aws --endpoint-url=http://localstack:4572 --region=us-east-1 s3 mb s3://pio_bucket
eval $@
|
takezoe/incubator-predictionio
|
tests/docker-files/init.sh
|
Shell
|
apache-2.0
| 1,062 |
#1/usr/bin/env bash
PROJECT_DIR=""
SDK_SOURCE_DIR=$(cd `dirname $0` && pwd)
usage() {
echo "Usage: $0 [-s SDK_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2
exit 1
}
while getopts "hs:d:" options; do
case "${options}" in
s)
SDK_SOURCE_DIR=${OPTARG}
if [ "$SDK_SOURCE_DIR" == "" ]; then
echo "path to SDK source directory is required" || exit
usage
fi
;;
d)
PROJECT_DIR=${OPTARG}
;;
h)
usage
;;
*)
usage
;;
esac
done
if [ "$PROJECT_DIR" != "" ]; then
cd $PROJECT_DIR || exit
fi
go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/aws-sdk-go-v2" | while read x; do
repPath=${x/github.com\/aws\/aws-sdk-go-v2/${SDK_SOURCE_DIR}}
echo -replace $x=$repPath
done | xargs go mod edit
|
tektoncd/pipeline
|
vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh
|
Shell
|
apache-2.0
| 778 |
#!/bin/bash
#
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Test parts of the local_repository binding which are broken with jdk7
#
# Load test environment
source $(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/test-setup.sh \
|| { echo "test-setup.sh not found!" >&2; exit 1; }
# Creates an indirect dependency on X from A and make sure the error message
# refers to the correct label, both in an external repository and not.
function test_indirect_dep_message() {
local external_dir=$TEST_TMPDIR/ext-dir
mkdir -p a b $external_dir/x
cat > a/A.java <<EOF
package a;
import x.X;
public class A {
public static void main(String args[]) {
X.print();
}
}
EOF
cat > a/BUILD <<EOF
java_binary(
name = "a",
main_class = "a.A",
srcs = ["A.java"],
deps = ["//b"],
)
EOF
cat > b/B.java <<EOF
package b;
public class B {
public static void print() {
System.out.println("B");
}
}
EOF
cat > b/BUILD <<EOF
java_library(
name = "b",
srcs = ["B.java"],
deps = ["@x_repo//x"],
visibility = ["//visibility:public"],
)
EOF
cp -r a b $external_dir
touch $external_dir/WORKSPACE
cat > $external_dir/x/X.java <<EOF
package x;
public class X {
public static void print() {
System.out.println("X");
}
}
EOF
cat > $external_dir/x/BUILD <<EOF
java_library(
name = "x",
srcs = ["X.java"],
visibility = ["//visibility:public"],
)
EOF
cat > WORKSPACE <<EOF
local_repository(
name = "x_repo",
path = "$external_dir",
)
EOF
bazel build @x_repo//a >& $TEST_log && fail "Building @x_repo//a should error out"
expect_log "** Please add the following dependencies:"
expect_log "@x_repo//x to @x_repo//a"
}
run_suite "local repository tests for jdk8 only"
|
anupcshan/bazel
|
src/test/shell/bazel/local_repository_test_jdk8.sh
|
Shell
|
apache-2.0
| 2,300 |
#!/bin/sh
test -n "$srcdir" || srcdir=`dirname "$0"`
test -n "$srcdir" || srcdir=.
olddir=`pwd`
cd $srcdir
AUTORECONF=`which autoreconf`
if test -z $AUTORECONF; then
echo "*** No autoreconf found, please intall it ***"
exit 1
fi
mkdir -p m4
autoreconf --force --install --verbose
cd $olddir
test -n "$NOCONFIGURE" || "$srcdir/configure" "$@"
|
GNOME/ostree-init
|
autogen.sh
|
Shell
|
gpl-2.0
| 364 |
uptime
|
achivetta/skyefs
|
util/opencloud/uptime.sh
|
Shell
|
lgpl-2.1
| 7 |
#!/bin/sh
mkdir -p $PREFIX/bin
mkdir -p $PREFIX/bin/lib
cp BtToxin_Digger.pl $PREFIX/bin/BtToxin_Digger
cp Scripts/*.pl $PREFIX/bin/
cp lib/own.pm $PREFIX/bin/lib/
cp -r BTTCMP_db $PREFIX/bin/BTTCMP_db
cp -r BTTCMP_models $PREFIX/bin/BTTCMP_models
cp pgcgap/* $PREFIX/bin/
chmod a+x $PREFIX/bin/BtToxin_Digger
chmod a+x $PREFIX/bin/coreprocess.pl
chmod a+x $PREFIX/bin/get_genes_table.pl
chmod a+x $PREFIX/bin/get_all_info_nucl.pl
chmod a+x $PREFIX/bin/get_all_info_orfs.pl
chmod a+x $PREFIX/bin/get_all_info_prot.pl
chmod a+x $PREFIX/bin/lib/own.pm
chmod a+x $PREFIX/bin/pgcgap
chmod a+x $PREFIX/bin/genome_LenFilter_stats.pl
chmod a+x $PREFIX/bin/get_stats_summary.pl
BtToxin_Digger --version
|
cokelaer/bioconda-recipes
|
recipes/bttoxin_digger/build.sh
|
Shell
|
mit
| 699 |
#!/bin/bash --
# Author: @DirtyCajunRice
# Check if ran by root
if [[ $UID -ne 0 ]]; then
echo 'Script must be run as root'
exit 1
fi
# Check for distro; continue if debian/ubuntu || exit
if [[ $(cat /etc/issue) =~ Debian ]]; then
distro=debian
elif [[ $(cat /etc/issue) =~ Ubuntu ]]; then
distro=ubuntu
else
echo "This script will only work on Debian and Ubuntu Distros, but you are using $(cat /etc/issue)"
exit 1
fi
# Get external ip address (checking 3 sites for redundancy)
for i in 'ipecho.net/plain' 'ifconfig.me' 'checkip.amazonaws.com'; do
extip=$(curl -s ${i})
[[ ! ${extip} ]] || break
done
# Get internal ip address
intip=$(ip r g 8.8.8.8 | awk 'NR==1{print $7};')
# Installed whiptail for script prerequisite
apt-get -qq install whiptail -y
# Check to see what SickRage Dependencies are missing
packages=$(dpkg -l unrar-free git-core openssl libssl-dev python2.7 2>1 | grep "no packages" | \
awk '{ print $6 }' | tr '\n' ' ')
if [[ ${packages} ]]; then
# Show Whiptail and install required files
{
i=1
while read -r line; do
i=$(( $i + 1 ))
echo ${i}
done < <(apt-get update && apt-get install ${packages} -y)
} | whiptail --title "Progress" --gauge "Installing $packages" 8 80 0
fi
# Check to see if all prior packages were installed successfully. if not exit 1 and display whiptail issues
if [[ $(dpkg -l unrar-free git-core openssl libssl-dev python2.7 2>&1 | grep "no packages" | \
awk '{print $6 }') ]]; then
whiptail --title "Package Installation Failed" --msgbox " These Packages have failed:
$(dpkg -l unrar-free git-core openssl libssl-dev python2.7 2>&1 | grep "no packages" | awk '{print $6 }')
Please resolve these issues and restart the install script" 15 66
exit 1
fi
# Check to see if sickrage exists; If not make user/group
if [[ ! "$(getent group sickrage)" ]]; then
echo "Adding SickRage Group"
addgroup --system sickrage
fi
if [[ ! "$(getent passwd sickrage)" ]]; then
echo "Adding SickRage User"
adduser --disabled-password --system --home /var/lib/sickrage --gecos "SickRage" --ingroup sickrage sickrage
fi
# Check to see if /opt/sickrage exists. If it does ask if they want to overwrite it. if they do not exit 1
# if they do, remove the whole directory and recreate
if [[ ! -d /opt/sickrage ]]; then
echo "Creating New SickRage Folder"
mkdir /opt/sickrage && chown sickrage:sickrage /opt/sickrage
echo "Git Cloning In Progress"
su -c "cd /opt && git clone -q https://github.com/SickRage/SickRage.git /opt/sickrage" -s /bin/bash sickrage
else
whiptail --title 'Overwrite?' --yesno "/opt/sickrage already exists, do you want to overwrite it?" 8 40
choice=$?
if [[ ${choice} == 0 ]]; then
echo "Removing Old SickRage Folder And Creating New SickRage Folder"
rm -rf /opt/sickrage && mkdir /opt/sickrage && chown sickrage:sickrage /opt/sickrage
echo "Git Cloning In Progress"
su -c "cd /opt && git clone -q https://github.com/SickRage/SickRage.git /opt/sickrage" -s /bin/bash sickrage
else
echo
exit 1
fi
fi
# Depending on Distro, Cp the service script, then change the owner/group and change the permissions. Finally
# start the service
if [[ ${distro} = ubuntu ]]; then
if [[ $(/sbin/init --version 2> /dev/null) =~ upstart ]]; then
echo "Copying Startup Script To Upstart"
cp /opt/sickrage/runscripts/init.upstart /etc/init/sickrage.conf
chown root:root /etc/init/sickrage.conf && chmod 644 /etc/init/sickrage.conf
echo "Starting SickRage"
service sickrage start
elif [[ $(systemctl) =~ -\.mount ]]; then
echo "Copying Startup Script To systemd"
cp /opt/sickrage/runscripts/init.systemd /etc/systemd/system/sickrage.service
chown root:root /etc/systemd/system/sickrage.service && chmod 644 /etc/systemd/system/sickrage.service
echo "Starting SickRage"
systemctl -q enable sickrage && systemctl -q start sickrage
else
echo "Copying Startup Script To init"
cp /opt/sickrage/runscripts/init.ubuntu /etc/init.d/sickrage
chown root:root /etc/init.d/sickrage && chmod 644 /etc/init.d/sickrage
echo "Starting SickRage"
update-rc.d sickrage defaults && service sickrage start
fi
elif [[ ${distro} = debian ]]; then
if [[ $(systemctl) =~ -\.mount ]]; then
echo "Copying Startup Script To systemd"
cp /opt/sickrage/runscripts/init.systemd /etc/systemd/system/sickrage.service
chown root:root /etc/systemd/system/sickrage.service && chmod 644 /etc/systemd/system/sickrage.service
echo "Starting SickRage"
systemctl -q enable sickrage && systemctl -q start sickrage
else
echo "Copying Startup Script To init"
cp /opt/sickrage/runscripts/init.debian /etc/init.d/sickrage
chown root:root /etc/init.d/sickrage && chmod 755 /etc/init.d/sickrage
echo "Starting SickRage"
update-rc.d sickrage defaults && service sickrage start
fi
fi
# Finish by explaining the script is finished and give them the relevant IP addresses
whiptail --title Complete --msgbox "Check that everything has been set up correctly by going to:
Internal IP: http://$intip:8081
OR
External IP: http://$extip:8081
make sure to add sickrage to your download clients group" 15 66
|
b0ttl3z/SickRage
|
contrib/debian-ubuntu-install.sh
|
Shell
|
gpl-3.0
| 5,384 |
#!/bin/sh
. /lib/functions.sh
. ../netifd-proto.sh
init_proto "$@"
proto_openconnect_init_config() {
proto_config_add_string "server"
proto_config_add_int "port"
proto_config_add_string "username"
proto_config_add_string "serverhash"
proto_config_add_string "authgroup"
proto_config_add_string "password"
proto_config_add_string "password2"
proto_config_add_string "token_mode"
proto_config_add_string "token_secret"
proto_config_add_string "interface"
proto_config_add_string "os"
proto_config_add_string "csd_wrapper"
no_device=1
available=1
}
proto_openconnect_setup() {
local config="$1"
json_get_vars server port username serverhash authgroup password password2 interface token_mode token_secret os csd_wrapper
grep -q tun /proc/modules || insmod tun
ifname="vpn-$config"
logger -t openconnect "initializing..."
serv_addr=
for ip in $(resolveip -t 10 "$server"); do
( proto_add_host_dependency "$interface" "$ip" "$ifname" )
serv_addr=1
done
[ -n "$serv_addr" ] || {
logger -t openconnect "Could not resolve server address: '$server'"
sleep 60
proto_setup_failed "$config"
exit 1
}
[ -n "$port" ] && port=":$port"
cmdline="$server$port -i "$ifname" --non-inter --syslog --script /lib/netifd/vpnc-script"
# migrate to standard config files
[ -f "/etc/config/openconnect-user-cert-vpn-$config.pem" ] && mv "/etc/config/openconnect-user-cert-vpn-$config.pem" "/etc/openconnect/user-cert-vpn-$config.pem"
[ -f "/etc/config/openconnect-user-key-vpn-$config.pem" ] && mv "/etc/config/openconnect-user-key-vpn-$config.pem" "/etc/openconnect/user-key-vpn-$config.pem"
[ -f "/etc/config/openconnect-ca-vpn-$config.pem" ] && mv "/etc/config/openconnect-ca-vpn-$config.pem" "/etc/openconnect/ca-vpn-$config.pem"
[ -f /etc/openconnect/user-cert-vpn-$config.pem ] && append cmdline "-c /etc/openconnect/user-cert-vpn-$config.pem"
[ -f /etc/openconnect/user-key-vpn-$config.pem ] && append cmdline "--sslkey /etc/openconnect/user-key-vpn-$config.pem"
[ -f /etc/openconnect/ca-vpn-$config.pem ] && {
append cmdline "--cafile /etc/openconnect/ca-vpn-$config.pem"
append cmdline "--no-system-trust"
}
[ -n "$serverhash" ] && {
append cmdline " --servercert=$serverhash"
append cmdline "--no-system-trust"
}
[ -n "$authgroup" ] && append cmdline "--authgroup $authgroup"
[ -n "$username" ] && append cmdline "-u $username"
[ -n "$password" ] && {
umask 077
mkdir -p /var/etc
pwfile="/var/etc/openconnect-$config.passwd"
echo "$password" > "$pwfile"
[ -n "$password2" ] && echo "$password2" >> "$pwfile"
append cmdline "--passwd-on-stdin"
}
[ -n "$token_mode" ] && append cmdline "--token-mode=$token_mode"
[ -n "$token_secret" ] && append cmdline "--token-secret=$token_secret"
[ -n "$os" ] && append cmdline "--os=$os"
[ -n "$csd_wrapper" ] && [ -x "$csd_wrapper" ] && append cmdline "--csd-wrapper=$csd_wrapper"
proto_export INTERFACE="$config"
logger -t openconnect "executing 'openconnect $cmdline'"
if [ -f "$pwfile" ]; then
proto_run_command "$config" /usr/sbin/openconnect-wrapper $pwfile $cmdline
else
proto_run_command "$config" /usr/sbin/openconnect $cmdline
fi
}
proto_openconnect_teardown() {
local config="$1"
pwfile="/var/etc/openconnect-$config.passwd"
rm -f $pwfile
logger -t openconnect "bringing down openconnect"
proto_kill_command "$config" 2
}
add_protocol openconnect
|
male-puppies/packages
|
net/openconnect/files/openconnect.sh
|
Shell
|
gpl-2.0
| 3,383 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# If we have any arguments at all, this is a push and not just setup.
is_push=$@
readonly KNOWN_TOKENS_FILE="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv"
function ensure-basic-networking() {
# Deal with GCE networking bring-up race. (We rely on DNS for a lot,
# and it's just not worth doing a whole lot of startup work if this
# isn't ready yet.)
until getent hosts metadata.google.internal &>/dev/null; do
echo 'Waiting for functional DNS (trying to resolve metadata.google.internal)...'
sleep 3
done
until getent hosts $(hostname -f || echo _error_) &>/dev/null; do
echo 'Waiting for functional DNS (trying to resolve my own FQDN)...'
sleep 3
done
until getent hosts $(hostname -i || echo _error_) &>/dev/null; do
echo 'Waiting for functional DNS (trying to resolve my own IP)...'
sleep 3
done
echo "Networking functional on $(hostname) ($(hostname -i))"
}
function ensure-install-dir() {
INSTALL_DIR="/var/cache/kubernetes-install"
mkdir -p ${INSTALL_DIR}
cd ${INSTALL_DIR}
}
function salt-apiserver-timeout-grain() {
cat <<EOF >>/etc/salt/minion.d/grains.conf
minRequestTimeout: '$1'
EOF
}
function set-broken-motd() {
echo -e '\nBroken (or in progress) GCE Kubernetes node setup! Suggested first step:\n tail /var/log/startupscript.log\n' > /etc/motd
}
function set-good-motd() {
echo -e '\n=== GCE Kubernetes node setup complete ===\n' > /etc/motd
}
function curl-metadata() {
curl --fail --retry 5 --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/attributes/${1}"
}
function set-kube-env() {
local kube_env_yaml="${INSTALL_DIR}/kube_env.yaml"
until curl-metadata kube-env > "${kube_env_yaml}"; do
echo 'Waiting for kube-env...'
sleep 3
done
# kube-env has all the environment variables we care about, in a flat yaml format
eval "$(python -c '
import pipes,sys,yaml
for k,v in yaml.load(sys.stdin).iteritems():
print("""readonly {var}={value}""".format(var = k, value = pipes.quote(str(v))))
print("""export {var}""".format(var = k))
' < """${kube_env_yaml}""")"
}
function remove-docker-artifacts() {
echo "== Deleting docker0 =="
# Forcibly install bridge-utils (options borrowed from Salt logs).
until apt-get -q -y -o DPkg::Options::=--force-confold -o DPkg::Options::=--force-confdef install bridge-utils; do
echo "== install of bridge-utils failed, retrying =="
sleep 5
done
# Remove docker artifacts on minion nodes, if present
iptables -t nat -F || true
ifconfig docker0 down || true
brctl delbr docker0 || true
echo "== Finished deleting docker0 =="
}
# Retry a download until we get it.
#
# $1 is the URL to download
download-or-bust() {
local -r url="$1"
local -r file="${url##*/}"
rm -f "$file"
until curl --ipv4 -Lo "$file" --connect-timeout 20 --retry 6 --retry-delay 10 "${url}"; do
echo "Failed to download file (${url}). Retrying."
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
# Install salt from GCS. See README.md for instructions on how to update these
# debs.
install-salt() {
if dpkg -s salt-minion &>/dev/null; then
echo "== SaltStack already installed, skipping install step =="
return
fi
echo "== Refreshing package database =="
until apt-get update; do
echo "== apt-get update failed, retrying =="
sleep 5
done
mkdir -p /var/cache/salt-install
cd /var/cache/salt-install
DEBS=(
libzmq3_3.2.3+dfsg-1~bpo70~dst+1_amd64.deb
python-zmq_13.1.0-1~bpo70~dst+1_amd64.deb
salt-common_2014.1.13+ds-1~bpo70+1_all.deb
salt-minion_2014.1.13+ds-1~bpo70+1_all.deb
)
URL_BASE="https://storage.googleapis.com/kubernetes-release/salt"
for deb in "${DEBS[@]}"; do
if [ ! -e "${deb}" ]; then
download-or-bust "${URL_BASE}/${deb}"
fi
done
# Based on
# https://major.io/2014/06/26/install-debian-packages-without-starting-daemons/
# We do this to prevent Salt from starting the salt-minion
# daemon. The other packages don't have relevant daemons. (If you
# add a package that needs a daemon started, add it to a different
# list.)
cat > /usr/sbin/policy-rc.d <<EOF
#!/bin/sh
echo "Salt shall not start." >&2
exit 101
EOF
chmod 0755 /usr/sbin/policy-rc.d
for deb in "${DEBS[@]}"; do
echo "== Installing ${deb}, ignore dependency complaints (will fix later) =="
dpkg --skip-same-version --force-depends -i "${deb}"
done
# This will install any of the unmet dependencies from above.
echo "== Installing unmet dependencies =="
until apt-get install -f -y; do
echo "== apt-get install failed, retrying =="
sleep 5
done
rm /usr/sbin/policy-rc.d
# Log a timestamp
echo "== Finished installing Salt =="
}
# Ensure salt-minion isn't running and never runs
stop-salt-minion() {
if [[ -e /etc/init/salt-minion.override ]]; then
# Assume this has already run (upgrade, or baked into containervm)
return
fi
# This ensures it on next reboot
echo manual > /etc/init/salt-minion.override
update-rc.d salt-minion disable
while service salt-minion status >/dev/null; do
echo "salt-minion found running, stopping"
service salt-minion stop
sleep 1
done
}
# Mounts a persistent disk (formatting if needed) to store the persistent data
# on the master -- etcd's data, a few settings, and security certs/keys/tokens.
#
# This function can be reused to mount an existing PD because all of its
# operations modifying the disk are idempotent -- safe_format_and_mount only
# formats an unformatted disk, and mkdir -p will leave a directory be if it
# already exists.
mount-master-pd() {
# TODO(zmerlynn): GKE is still lagging in master-pd creation
if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then
return
fi
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
device_path="/dev/disk/by-id/${relative_path}"
# Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used.
echo "Mounting master-pd"
mkdir -p /mnt/master-pd
/usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${device_path}" /mnt/master-pd &>/var/log/master-pd-mount.log || \
{ echo "!!! master-pd mount failed, review /var/log/master-pd-mount.log !!!"; return 1; }
# Contains all the data stored in etcd
mkdir -m 700 -p /mnt/master-pd/var/etcd
# Contains the dynamically generated apiserver auth certs and keys
mkdir -p /mnt/master-pd/srv/kubernetes
# Contains the cluster's initial config parameters and auth tokens
mkdir -p /mnt/master-pd/srv/salt-overlay
# Directory for kube-apiserver to store SSH key (if necessary)
mkdir -p /mnt/master-pd/srv/sshproxy
ln -s -f /mnt/master-pd/var/etcd /var/etcd
ln -s -f /mnt/master-pd/srv/kubernetes /srv/kubernetes
ln -s -f /mnt/master-pd/srv/sshproxy /srv/sshproxy
ln -s -f /mnt/master-pd/srv/salt-overlay /srv/salt-overlay
# This is a bit of a hack to get around the fact that salt has to run after the
# PD and mounted directory are already set up. We can't give ownership of the
# directory to etcd until the etcd user and group exist, but they don't exist
# until salt runs if we don't create them here. We could alternatively make the
# permissions on the directory more permissive, but this seems less bad.
if ! id etcd &>/dev/null; then
useradd -s /sbin/nologin -d /var/etcd etcd
fi
chown -R etcd /mnt/master-pd/var/etcd
chgrp -R etcd /mnt/master-pd/var/etcd
}
# Create the overlay files for the salt tree. We create these in a separate
# place so that we can blow away the rest of the salt configs on a kube-push and
# re-apply these.
function create-salt-pillar() {
# Always overwrite the cluster-params.sls (even on a push, we have
# these variables)
mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")'
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
enable_l7_loadbalancing: '$(echo "$ENABLE_L7_LOADBALANCING" | sed -e "s/'/''/g")'
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
enable_cluster_registry: '$(echo "$ENABLE_CLUSTER_REGISTRY" | sed -e "s/'/''/g")'
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
network_provider: '$(echo "$NETWORK_PROVIDER" | sed -e "s/'/''/g")'
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG" | sed -e "s/'/''/g")'
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")'
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")'
enable_manifest_url: '$(echo "$ENABLE_MANIFEST_URL" | sed -e "s/'/''/g")'
manifest_url: '$(echo "$MANIFEST_URL" | sed -e "s/'/''/g")'
manifest_url_header: '$(echo "$MANIFEST_URL_HEADER" | sed -e "s/'/''/g")'
num_nodes: $(echo "${NUM_NODES}" | sed -e "s/'/''/g")
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
EOF
if [ -n "${KUBELET_PORT:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubelet_port: '$(echo "$KUBELET_PORT" | sed -e "s/'/''/g")'
EOF
fi
# Configuration changes for test clusters
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
apiserver_test_args: '$(echo "$APISERVER_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${API_SERVER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
api_server_test_log_level: '$(echo "$API_SERVER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBELET_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubelet_test_args: '$(echo "$KUBELET_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBELET_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubelet_test_log_level: '$(echo "$KUBELET_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
controller_manager_test_args: '$(echo "$CONTROLLER_MANAGER_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
controller_manager_test_log_level: '$(echo "$CONTROLLER_MANAGER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${SCHEDULER_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
scheduler_test_args: '$(echo "$SCHEDULER_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
scheduler_test_log_level: '$(echo "$SCHEDULER_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubeproxy_test_args: '$(echo "$KUBEPROXY_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
if [ -n "${KUBEPROXY_TEST_LOG_LEVEL:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubeproxy_test_log_level: '$(echo "$KUBEPROXY_TEST_LOG_LEVEL" | sed -e "s/'/''/g")'
EOF
fi
# TODO: Replace this with a persistent volume (and create it).
if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
cluster_registry_disk_type: gce
cluster_registry_disk_size: $(echo $(convert-bytes-gce-kube ${CLUSTER_REGISTRY_DISK_SIZE}) | sed -e "s/'/''/g")
cluster_registry_disk_name: $(echo ${CLUSTER_REGISTRY_DISK} | sed -e "s/'/''/g")
EOF
fi
if [ -n "${TERMINATED_POD_GC_THRESHOLD:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
terminated_pod_gc_threshold: '$(echo "${TERMINATED_POD_GC_THRESHOLD}" | sed -e "s/'/''/g")'
EOF
fi
}
# The job of this function is simple, but the basic regular expression syntax makes
# this difficult to read. What we want to do is convert from [0-9]+B, KB, KiB, MB, etc
# into [0-9]+, Ki, Mi, Gi, etc.
# This is done in two steps:
# 1. Convert from [0-9]+X?i?B into [0-9]X? (X denotes the prefix, ? means the field
# is optional.
# 2. Attach an 'i' to the end of the string if we find a letter.
# The two step process is needed to handle the edge case in which we want to convert
# a raw byte count, as the result should be a simple number (e.g. 5B -> 5).
function convert-bytes-gce-kube() {
local -r storage_space=$1
echo "${storage_space}" | sed -e 's/^\([0-9]\+\)\([A-Z]\)\?i\?B$/\1\2/g' -e 's/\([A-Z]\)$/\1i/'
}
# This should only happen on cluster initialization.
#
# - Uses KUBE_PASSWORD and KUBE_USER to generate basic_auth.csv.
# - Uses KUBE_BEARER_TOKEN, KUBELET_TOKEN, and KUBE_PROXY_TOKEN to generate
# known_tokens.csv (KNOWN_TOKENS_FILE).
# - Uses CA_CERT, MASTER_CERT, and MASTER_KEY to populate the SSL credentials
# for the apiserver.
# - Optionally uses KUBECFG_CERT and KUBECFG_KEY to store a copy of the client
# cert credentials.
#
# After the first boot and on upgrade, these files exists on the master-pd
# and should never be touched again (except perhaps an additional service
# account, see NB below.)
function create-salt-master-auth() {
if [[ ! -e /srv/kubernetes/ca.crt ]]; then
if [[ ! -z "${CA_CERT:-}" ]] && [[ ! -z "${MASTER_CERT:-}" ]] && [[ ! -z "${MASTER_KEY:-}" ]]; then
mkdir -p /srv/kubernetes
(umask 077;
echo "${CA_CERT}" | base64 -d > /srv/kubernetes/ca.crt;
echo "${MASTER_CERT}" | base64 -d > /srv/kubernetes/server.cert;
echo "${MASTER_KEY}" | base64 -d > /srv/kubernetes/server.key;
# Kubecfg cert/key are optional and included for backwards compatibility.
# TODO(roberthbailey): Remove these two lines once GKE no longer requires
# fetching clients certs from the master VM.
echo "${KUBECFG_CERT:-}" | base64 -d > /srv/kubernetes/kubecfg.crt;
echo "${KUBECFG_KEY:-}" | base64 -d > /srv/kubernetes/kubecfg.key)
fi
fi
if [ ! -e "${BASIC_AUTH_FILE}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver
(umask 077;
echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${BASIC_AUTH_FILE}")
fi
if [ ! -e "${KNOWN_TOKENS_FILE}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-apiserver
(umask 077;
echo "${KUBE_BEARER_TOKEN},admin,admin" > "${KNOWN_TOKENS_FILE}";
echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}";
echo "${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy" >> "${KNOWN_TOKENS_FILE}")
# Generate tokens for other "service accounts". Append to known_tokens.
#
# NB: If this list ever changes, this script actually has to
# change to detect the existence of this file, kill any deleted
# old tokens and add any new tokens (to handle the upgrade case).
local -r service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns")
for account in "${service_accounts[@]}"; do
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${token},${account},${account}" >> "${KNOWN_TOKENS_FILE}"
done
fi
}
# This should happen only on cluster initialization. After the first boot
# and on upgrade, the kubeconfig file exists on the master-pd and should
# never be touched again.
#
# - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and
# KUBELET_KEY to generate a kubeconfig file for the kubelet to securely
# connect to the apiserver.
function create-salt-master-kubelet-auth() {
# Only configure the kubelet on the master if the required variables are
# set in the environment.
if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
create-salt-kubelet-auth
fi
}
# This should happen both on cluster initialization and node upgrades.
#
# - Uses KUBELET_CA_CERT (falling back to CA_CERT), KUBELET_CERT, and
# KUBELET_KEY to generate a kubeconfig file for the kubelet to securely
# connect to the apiserver.
function create-salt-kubelet-auth() {
local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
if [ ! -e "${kubelet_kubeconfig_file}" ]; then
# If there isn't a CA certificate set specifically for the kubelet, use
# the cluster CA certificate.
if [[ -z "${KUBELET_CA_CERT:-}" ]]; then
KUBELET_CA_CERT="${CA_CERT}"
fi
mkdir -p /srv/salt-overlay/salt/kubelet
(umask 077;
cat > "${kubelet_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
client-certificate-data: ${KUBELET_CERT}
client-key-data: ${KUBELET_KEY}
clusters:
- name: local
cluster:
certificate-authority-data: ${KUBELET_CA_CERT}
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
EOF
)
fi
}
# This should happen both on cluster initialization and node upgrades.
#
# - Uses the CA_CERT and KUBE_PROXY_TOKEN to generate a kubeconfig file for
# the kube-proxy to securely connect to the apiserver.
function create-salt-kubeproxy-auth() {
local -r kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
if [ ! -e "${kube_proxy_kubeconfig_file}" ]; then
mkdir -p /srv/salt-overlay/salt/kube-proxy
(umask 077;
cat > "${kube_proxy_kubeconfig_file}" <<EOF
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: ${KUBE_PROXY_TOKEN}
clusters:
- name: local
cluster:
certificate-authority-data: ${CA_CERT}
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context
EOF
)
fi
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLy have no excuse not to do the reboot
# optimization.
# TODO(zmerlynn): This may not be set yet by everyone (GKE).
if [[ -z "${SERVER_BINARY_TAR_HASH:-}" ]]; then
echo "Downloading binary release sha1 (not found in env)"
download-or-bust "${SERVER_BINARY_TAR_URL}.sha1"
SERVER_BINARY_TAR_HASH=$(cat "${SERVER_BINARY_TAR_URL##*/}.sha1")
fi
echo "Downloading binary release tar (${SERVER_BINARY_TAR_URL})"
download-or-bust "${SERVER_BINARY_TAR_URL}"
validate-hash "${SERVER_BINARY_TAR_URL##*/}" "${SERVER_BINARY_TAR_HASH}"
echo "Validated ${SERVER_BINARY_TAR_URL} SHA1 = ${SERVER_BINARY_TAR_HASH}"
# TODO(zmerlynn): This may not be set yet by everyone (GKE).
if [[ -z "${SALT_TAR_HASH:-}" ]]; then
echo "Downloading Salt tar sha1 (not found in env)"
download-or-bust "${SALT_TAR_URL}.sha1"
SALT_TAR_HASH=$(cat "${SALT_TAR_URL##*/}.sha1")
fi
echo "Downloading Salt tar ($SALT_TAR_URL)"
download-or-bust "$SALT_TAR_URL"
validate-hash "${SALT_TAR_URL##*/}" "${SALT_TAR_HASH}"
echo "Validated ${SALT_TAR_URL} SHA1 = ${SALT_TAR_HASH}"
echo "Unpacking Salt tree and checking integrity of binary release tar"
rm -rf kubernetes
tar xzf "${SALT_TAR_URL##*/}" && tar tzf "${SERVER_BINARY_TAR_URL##*/}" > /dev/null
}
function download-release() {
# In case of failure checking integrity of release, retry.
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running release install script"
kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"
}
function fix-apt-sources() {
sed -i -e "\|^deb.*http://http.debian.net/debian| s/^/#/" /etc/apt/sources.list
sed -i -e "\|^deb.*http://ftp.debian.org/debian| s/^/#/" /etc/apt/sources.list.d/backports.list
}
function salt-run-local() {
cat <<EOF >/etc/salt/minion.d/local.conf
file_client: local
file_roots:
base:
- /srv/salt
EOF
}
function salt-debug-log() {
cat <<EOF >/etc/salt/minion.d/log-level-debug.conf
log_level: debug
log_level_logfile: debug
EOF
}
function salt-master-role() {
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-master
cloud: gce
EOF
cat <<EOF >/etc/gce.conf
[global]
EOF
CLOUD_CONFIG='' # Set to non-empty path if we are using the gce.conf file
if ! [[ -z "${PROJECT_ID:-}" ]] && ! [[ -z "${TOKEN_URL:-}" ]] && ! [[ -z "${TOKEN_BODY:-}" ]] && ! [[ -z "${NODE_NETWORK:-}" ]] ; then
cat <<EOF >>/etc/gce.conf
token-url = ${TOKEN_URL}
token-body = ${TOKEN_BODY}
project-id = ${PROJECT_ID}
network-name = ${NODE_NETWORK}
EOF
CLOUD_CONFIG=/etc/gce.conf
EXTERNAL_IP=$(curl --fail --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip")
cat <<EOF >>/etc/salt/minion.d/grains.conf
advertise_address: '${EXTERNAL_IP}'
proxy_ssh_user: '${PROXY_SSH_USER}'
EOF
fi
if [[ -n "${MULTIZONE:-}" ]]; then
cat <<EOF >>/etc/gce.conf
multizone = ${MULTIZONE}
EOF
CLOUD_CONFIG=/etc/gce.conf
fi
if [[ -n ${CLOUD_CONFIG:-} ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
cloud_config: ${CLOUD_CONFIG}
EOF
else
rm -f /etc/gce.conf
fi
# If the kubelet on the master is enabled, give it the same CIDR range
# as a generic node.
if [[ ! -z "${KUBELET_APISERVER:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
kubelet_api_servers: '${KUBELET_APISERVER}'
cbr-cidr: 10.123.45.0/30
EOF
else
# If the kubelet is running disconnected from a master, give it a fixed
# CIDR range.
cat <<EOF >>/etc/salt/minion.d/grains.conf
cbr-cidr: ${MASTER_IP_RANGE}
EOF
fi
if [[ ! -z "${RUNTIME_CONFIG:-}" ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
EOF
fi
}
function salt-node-role() {
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
roles:
- kubernetes-pool
cbr-cidr: 10.123.45.0/30
cloud: gce
api_servers: '${KUBERNETES_MASTER_NAME}'
EOF
}
function salt-docker-opts() {
DOCKER_OPTS=""
if [[ -n "${EXTRA_DOCKER_OPTS-}" ]]; then
DOCKER_OPTS="${EXTRA_DOCKER_OPTS}"
fi
if [[ -n "{DOCKER_OPTS}" ]]; then
cat <<EOF >>/etc/salt/minion.d/grains.conf
docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")'
EOF
fi
}
function configure-salt() {
fix-apt-sources
mkdir -p /etc/salt/minion.d
salt-run-local
if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
salt-master-role
if [ -n "${KUBE_APISERVER_REQUEST_TIMEOUT:-}" ]; then
salt-apiserver-timeout-grain $KUBE_APISERVER_REQUEST_TIMEOUT
fi
else
salt-node-role
salt-docker-opts
fi
install-salt
stop-salt-minion
}
function run-salt() {
echo "== Calling Salt =="
salt-call --local state.highstate || true
}
####################################################################################
if [[ -z "${is_push}" ]]; then
echo "== kube-up node config starting =="
set-broken-motd
ensure-basic-networking
ensure-install-dir
set-kube-env
[[ "${KUBERNETES_MASTER}" == "true" ]] && mount-master-pd
create-salt-pillar
if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
create-salt-master-auth
create-salt-master-kubelet-auth
else
create-salt-kubelet-auth
create-salt-kubeproxy-auth
fi
download-release
configure-salt
remove-docker-artifacts
run-salt
set-good-motd
if curl-metadata k8s-user-startup-script > "${INSTALL_DIR}/k8s-user-script.sh"; then
user_script=$(cat "${INSTALL_DIR}/k8s-user-script.sh")
fi
if [[ ! -z ${user_script:-} ]]; then
chmod u+x "${INSTALL_DIR}/k8s-user-script.sh"
echo "== running user startup script =="
"${INSTALL_DIR}/k8s-user-script.sh"
fi
echo "== kube-up node config done =="
else
echo "== kube-push node config starting =="
ensure-basic-networking
ensure-install-dir
set-kube-env
create-salt-pillar
download-release
run-salt
echo "== kube-push node config done =="
fi
|
Samsung-AG/kubernetes
|
cluster/gce/configure-vm.sh
|
Shell
|
apache-2.0
| 25,849 |
# bash/zsh completion support for core Git.
#
# Copyright (C) 2006,2007 Shawn O. Pearce <[email protected]>
# Conceptually based on gitcompletion (http://gitweb.hawaga.org.uk/).
# Distributed under the GNU General Public License, version 2.0.
#
# The contained completion routines provide support for completing:
#
# *) local and remote branch names
# *) local and remote tag names
# *) .git/remotes file names
# *) git 'subcommands'
# *) tree paths within 'ref:path/to/file' expressions
# *) file paths within current working directory and index
# *) common --long-options
#
# To use these routines:
#
# 1) Copy this file to somewhere (e.g. ~/.git-completion.bash).
# 2) Add the following line to your .bashrc/.zshrc:
# source ~/.git-completion.bash
# 3) Consider changing your PS1 to also show the current branch,
# see git-prompt.sh for details.
#
# If you use complex aliases of form '!f() { ... }; f', you can use the null
# command ':' as the first command in the function body to declare the desired
# completion style. For example '!f() { : git commit ; ... }; f' will
# tell the completion to use commit completion. This also works with aliases
# of form "!sh -c '...'". For example, "!sh -c ': git commit ; ... '".
case "$COMP_WORDBREAKS" in
*:*) : great ;;
*) COMP_WORDBREAKS="$COMP_WORDBREAKS:"
esac
# __gitdir accepts 0 or 1 arguments (i.e., location)
# returns location of .git repo
__gitdir ()
{
if [ -z "${1-}" ]; then
if [ -n "${__git_dir-}" ]; then
echo "$__git_dir"
elif [ -n "${GIT_DIR-}" ]; then
test -d "${GIT_DIR-}" || return 1
echo "$GIT_DIR"
elif [ -d .git ]; then
echo .git
else
git rev-parse --git-dir 2>/dev/null
fi
elif [ -d "$1/.git" ]; then
echo "$1/.git"
else
echo "$1"
fi
}
# The following function is based on code from:
#
# bash_completion - programmable completion functions for bash 3.2+
#
# Copyright © 2006-2008, Ian Macdonald <[email protected]>
# © 2009-2010, Bash Completion Maintainers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# The latest version of this software can be obtained here:
#
# http://bash-completion.alioth.debian.org/
#
# RELEASE: 2.x
# This function can be used to access a tokenized list of words
# on the command line:
#
# __git_reassemble_comp_words_by_ref '=:'
# if test "${words_[cword_-1]}" = -w
# then
# ...
# fi
#
# The argument should be a collection of characters from the list of
# word completion separators (COMP_WORDBREAKS) to treat as ordinary
# characters.
#
# This is roughly equivalent to going back in time and setting
# COMP_WORDBREAKS to exclude those characters. The intent is to
# make option types like --date=<type> and <rev>:<path> easy to
# recognize by treating each shell word as a single token.
#
# It is best not to set COMP_WORDBREAKS directly because the value is
# shared with other completion scripts. By the time the completion
# function gets called, COMP_WORDS has already been populated so local
# changes to COMP_WORDBREAKS have no effect.
#
# Output: words_, cword_, cur_.
__git_reassemble_comp_words_by_ref()
{
local exclude i j first
# Which word separators to exclude?
exclude="${1//[^$COMP_WORDBREAKS]}"
cword_=$COMP_CWORD
if [ -z "$exclude" ]; then
words_=("${COMP_WORDS[@]}")
return
fi
# List of word completion separators has shrunk;
# re-assemble words to complete.
for ((i=0, j=0; i < ${#COMP_WORDS[@]}; i++, j++)); do
# Append each nonempty word consisting of just
# word separator characters to the current word.
first=t
while
[ $i -gt 0 ] &&
[ -n "${COMP_WORDS[$i]}" ] &&
# word consists of excluded word separators
[ "${COMP_WORDS[$i]//[^$exclude]}" = "${COMP_WORDS[$i]}" ]
do
# Attach to the previous token,
# unless the previous token is the command name.
if [ $j -ge 2 ] && [ -n "$first" ]; then
((j--))
fi
first=
words_[$j]=${words_[j]}${COMP_WORDS[i]}
if [ $i = $COMP_CWORD ]; then
cword_=$j
fi
if (($i < ${#COMP_WORDS[@]} - 1)); then
((i++))
else
# Done.
return
fi
done
words_[$j]=${words_[j]}${COMP_WORDS[i]}
if [ $i = $COMP_CWORD ]; then
cword_=$j
fi
done
}
if ! type _get_comp_words_by_ref >/dev/null 2>&1; then
_get_comp_words_by_ref ()
{
local exclude cur_ words_ cword_
if [ "$1" = "-n" ]; then
exclude=$2
shift 2
fi
__git_reassemble_comp_words_by_ref "$exclude"
cur_=${words_[cword_]}
while [ $# -gt 0 ]; do
case "$1" in
cur)
cur=$cur_
;;
prev)
prev=${words_[$cword_-1]}
;;
words)
words=("${words_[@]}")
;;
cword)
cword=$cword_
;;
esac
shift
done
}
fi
__gitcompappend ()
{
local x i=${#COMPREPLY[@]}
for x in $1; do
if [[ "$x" == "$3"* ]]; then
COMPREPLY[i++]="$2$x$4"
fi
done
}
__gitcompadd ()
{
COMPREPLY=()
__gitcompappend "$@"
}
# Generates completion reply, appending a space to possible completion words,
# if necessary.
# It accepts 1 to 4 arguments:
# 1: List of possible completion words.
# 2: A prefix to be added to each possible completion word (optional).
# 3: Generate possible completion matches for this word (optional).
# 4: A suffix to be appended to each possible completion word (optional).
__gitcomp ()
{
local cur_="${3-$cur}"
case "$cur_" in
--*=)
;;
*)
local c i=0 IFS=$' \t\n'
for c in $1; do
c="$c${4-}"
if [[ $c == "$cur_"* ]]; then
case $c in
--*=*|*.) ;;
*) c="$c " ;;
esac
COMPREPLY[i++]="${2-}$c"
fi
done
;;
esac
}
# Variation of __gitcomp_nl () that appends to the existing list of
# completion candidates, COMPREPLY.
__gitcomp_nl_append ()
{
local IFS=$'\n'
__gitcompappend "$1" "${2-}" "${3-$cur}" "${4- }"
}
# Generates completion reply from newline-separated possible completion words
# by appending a space to all of them.
# It accepts 1 to 4 arguments:
# 1: List of possible completion words, separated by a single newline.
# 2: A prefix to be added to each possible completion word (optional).
# 3: Generate possible completion matches for this word (optional).
# 4: A suffix to be appended to each possible completion word instead of
# the default space (optional). If specified but empty, nothing is
# appended.
__gitcomp_nl ()
{
COMPREPLY=()
__gitcomp_nl_append "$@"
}
# Generates completion reply with compgen from newline-separated possible
# completion filenames.
# It accepts 1 to 3 arguments:
# 1: List of possible completion filenames, separated by a single newline.
# 2: A directory prefix to be added to each possible completion filename
# (optional).
# 3: Generate possible completion matches for this word (optional).
__gitcomp_file ()
{
local IFS=$'\n'
# XXX does not work when the directory prefix contains a tilde,
# since tilde expansion is not applied.
# This means that COMPREPLY will be empty and Bash default
# completion will be used.
__gitcompadd "$1" "${2-}" "${3-$cur}" ""
# use a hack to enable file mode in bash < 4
compopt -o filenames +o nospace 2>/dev/null ||
compgen -f /non-existing-dir/ > /dev/null
}
# Execute 'git ls-files', unless the --committable option is specified, in
# which case it runs 'git diff-index' to find out the files that can be
# committed. It return paths relative to the directory specified in the first
# argument, and using the options specified in the second argument.
__git_ls_files_helper ()
{
if [ "$2" == "--committable" ]; then
git -C "$1" diff-index --name-only --relative HEAD
else
# NOTE: $2 is not quoted in order to support multiple options
git -C "$1" ls-files --exclude-standard $2
fi 2>/dev/null
}
# __git_index_files accepts 1 or 2 arguments:
# 1: Options to pass to ls-files (required).
# 2: A directory path (optional).
# If provided, only files within the specified directory are listed.
# Sub directories are never recursed. Path must have a trailing
# slash.
__git_index_files ()
{
local dir="$(__gitdir)" root="${2-.}" file
if [ -d "$dir" ]; then
__git_ls_files_helper "$root" "$1" |
while read -r file; do
case "$file" in
?*/*) echo "${file%%/*}" ;;
*) echo "$file" ;;
esac
done | sort | uniq
fi
}
__git_heads ()
{
local dir="$(__gitdir)"
if [ -d "$dir" ]; then
git --git-dir="$dir" for-each-ref --format='%(refname:short)' \
refs/heads
return
fi
}
__git_tags ()
{
local dir="$(__gitdir)"
if [ -d "$dir" ]; then
git --git-dir="$dir" for-each-ref --format='%(refname:short)' \
refs/tags
return
fi
}
# __git_refs accepts 0, 1 (to pass to __gitdir), or 2 arguments
# presence of 2nd argument means use the guess heuristic employed
# by checkout for tracking branches
__git_refs ()
{
local i hash dir="$(__gitdir "${1-}")" track="${2-}"
local format refs
if [ -d "$dir" ]; then
case "$cur" in
refs|refs/*)
format="refname"
refs="${cur%/*}"
track=""
;;
*)
for i in HEAD FETCH_HEAD ORIG_HEAD MERGE_HEAD; do
if [ -e "$dir/$i" ]; then echo $i; fi
done
format="refname:short"
refs="refs/tags refs/heads refs/remotes"
;;
esac
git --git-dir="$dir" for-each-ref --format="%($format)" \
$refs
if [ -n "$track" ]; then
# employ the heuristic used by git checkout
# Try to find a remote branch that matches the completion word
# but only output if the branch name is unique
local ref entry
git --git-dir="$dir" for-each-ref --shell --format="ref=%(refname:short)" \
"refs/remotes/" | \
while read -r entry; do
eval "$entry"
ref="${ref#*/}"
if [[ "$ref" == "$cur"* ]]; then
echo "$ref"
fi
done | sort | uniq -u
fi
return
fi
case "$cur" in
refs|refs/*)
git ls-remote "$dir" "$cur*" 2>/dev/null | \
while read -r hash i; do
case "$i" in
*^{}) ;;
*) echo "$i" ;;
esac
done
;;
*)
echo "HEAD"
git for-each-ref --format="%(refname:short)" -- \
"refs/remotes/$dir/" 2>/dev/null | sed -e "s#^$dir/##"
;;
esac
}
# __git_refs2 requires 1 argument (to pass to __git_refs)
__git_refs2 ()
{
local i
for i in $(__git_refs "$1"); do
echo "$i:$i"
done
}
# __git_refs_remotes requires 1 argument (to pass to ls-remote)
__git_refs_remotes ()
{
local i hash
git ls-remote "$1" 'refs/heads/*' 2>/dev/null | \
while read -r hash i; do
echo "$i:refs/remotes/$1/${i#refs/heads/}"
done
}
__git_remotes ()
{
local d="$(__gitdir)"
test -d "$d/remotes" && ls -1 "$d/remotes"
git --git-dir="$d" remote
}
__git_list_merge_strategies ()
{
git merge -s help 2>&1 |
sed -n -e '/[Aa]vailable strategies are: /,/^$/{
s/\.$//
s/.*://
s/^[ ]*//
s/[ ]*$//
p
}'
}
__git_merge_strategies=
# 'git merge -s help' (and thus detection of the merge strategy
# list) fails, unfortunately, if run outside of any git working
# tree. __git_merge_strategies is set to the empty string in
# that case, and the detection will be repeated the next time it
# is needed.
__git_compute_merge_strategies ()
{
test -n "$__git_merge_strategies" ||
__git_merge_strategies=$(__git_list_merge_strategies)
}
__git_complete_revlist_file ()
{
local pfx ls ref cur_="$cur"
case "$cur_" in
*..?*:*)
return
;;
?*:*)
ref="${cur_%%:*}"
cur_="${cur_#*:}"
case "$cur_" in
?*/*)
pfx="${cur_%/*}"
cur_="${cur_##*/}"
ls="$ref:$pfx"
pfx="$pfx/"
;;
*)
ls="$ref"
;;
esac
case "$COMP_WORDBREAKS" in
*:*) : great ;;
*) pfx="$ref:$pfx" ;;
esac
__gitcomp_nl "$(git --git-dir="$(__gitdir)" ls-tree "$ls" 2>/dev/null \
| sed '/^100... blob /{
s,^.* ,,
s,$, ,
}
/^120000 blob /{
s,^.* ,,
s,$, ,
}
/^040000 tree /{
s,^.* ,,
s,$,/,
}
s/^.* //')" \
"$pfx" "$cur_" ""
;;
*...*)
pfx="${cur_%...*}..."
cur_="${cur_#*...}"
__gitcomp_nl "$(__git_refs)" "$pfx" "$cur_"
;;
*..*)
pfx="${cur_%..*}.."
cur_="${cur_#*..}"
__gitcomp_nl "$(__git_refs)" "$pfx" "$cur_"
;;
*)
__gitcomp_nl "$(__git_refs)"
;;
esac
}
# __git_complete_index_file requires 1 argument:
# 1: the options to pass to ls-file
#
# The exception is --committable, which finds the files appropriate commit.
__git_complete_index_file ()
{
local pfx="" cur_="$cur"
case "$cur_" in
?*/*)
pfx="${cur_%/*}"
cur_="${cur_##*/}"
pfx="${pfx}/"
;;
esac
__gitcomp_file "$(__git_index_files "$1" ${pfx:+"$pfx"})" "$pfx" "$cur_"
}
__git_complete_file ()
{
__git_complete_revlist_file
}
__git_complete_revlist ()
{
__git_complete_revlist_file
}
__git_complete_remote_or_refspec ()
{
local cur_="$cur" cmd="${words[1]}"
local i c=2 remote="" pfx="" lhs=1 no_complete_refspec=0
if [ "$cmd" = "remote" ]; then
((c++))
fi
while [ $c -lt $cword ]; do
i="${words[c]}"
case "$i" in
--mirror) [ "$cmd" = "push" ] && no_complete_refspec=1 ;;
--all)
case "$cmd" in
push) no_complete_refspec=1 ;;
fetch)
return
;;
*) ;;
esac
;;
-*) ;;
*) remote="$i"; break ;;
esac
((c++))
done
if [ -z "$remote" ]; then
__gitcomp_nl "$(__git_remotes)"
return
fi
if [ $no_complete_refspec = 1 ]; then
return
fi
[ "$remote" = "." ] && remote=
case "$cur_" in
*:*)
case "$COMP_WORDBREAKS" in
*:*) : great ;;
*) pfx="${cur_%%:*}:" ;;
esac
cur_="${cur_#*:}"
lhs=0
;;
+*)
pfx="+"
cur_="${cur_#+}"
;;
esac
case "$cmd" in
fetch)
if [ $lhs = 1 ]; then
__gitcomp_nl "$(__git_refs2 "$remote")" "$pfx" "$cur_"
else
__gitcomp_nl "$(__git_refs)" "$pfx" "$cur_"
fi
;;
pull|remote)
if [ $lhs = 1 ]; then
__gitcomp_nl "$(__git_refs "$remote")" "$pfx" "$cur_"
else
__gitcomp_nl "$(__git_refs)" "$pfx" "$cur_"
fi
;;
push)
if [ $lhs = 1 ]; then
__gitcomp_nl "$(__git_refs)" "$pfx" "$cur_"
else
__gitcomp_nl "$(__git_refs "$remote")" "$pfx" "$cur_"
fi
;;
esac
}
__git_complete_strategy ()
{
__git_compute_merge_strategies
case "$prev" in
-s|--strategy)
__gitcomp "$__git_merge_strategies"
return 0
esac
case "$cur" in
--strategy=*)
__gitcomp "$__git_merge_strategies" "" "${cur##--strategy=}"
return 0
;;
esac
return 1
}
__git_commands () {
if test -n "${GIT_TESTING_COMMAND_COMPLETION:-}"
then
printf "%s" "${GIT_TESTING_COMMAND_COMPLETION}"
else
git help -a|egrep '^ [a-zA-Z0-9]'
fi
}
__git_list_all_commands ()
{
local i IFS=" "$'\n'
for i in $(__git_commands)
do
case $i in
*--*) : helper pattern;;
*) echo $i;;
esac
done
}
__git_all_commands=
__git_compute_all_commands ()
{
test -n "$__git_all_commands" ||
__git_all_commands=$(__git_list_all_commands)
}
__git_list_porcelain_commands ()
{
local i IFS=" "$'\n'
__git_compute_all_commands
for i in $__git_all_commands
do
case $i in
*--*) : helper pattern;;
applymbox) : ask gittus;;
applypatch) : ask gittus;;
archimport) : import;;
cat-file) : plumbing;;
check-attr) : plumbing;;
check-ignore) : plumbing;;
check-mailmap) : plumbing;;
check-ref-format) : plumbing;;
checkout-index) : plumbing;;
commit-tree) : plumbing;;
count-objects) : infrequent;;
credential) : credentials;;
credential-*) : credentials helper;;
cvsexportcommit) : export;;
cvsimport) : import;;
cvsserver) : daemon;;
daemon) : daemon;;
diff-files) : plumbing;;
diff-index) : plumbing;;
diff-tree) : plumbing;;
fast-import) : import;;
fast-export) : export;;
fsck-objects) : plumbing;;
fetch-pack) : plumbing;;
fmt-merge-msg) : plumbing;;
for-each-ref) : plumbing;;
hash-object) : plumbing;;
http-*) : transport;;
index-pack) : plumbing;;
init-db) : deprecated;;
local-fetch) : plumbing;;
ls-files) : plumbing;;
ls-remote) : plumbing;;
ls-tree) : plumbing;;
mailinfo) : plumbing;;
mailsplit) : plumbing;;
merge-*) : plumbing;;
mktree) : plumbing;;
mktag) : plumbing;;
pack-objects) : plumbing;;
pack-redundant) : plumbing;;
pack-refs) : plumbing;;
parse-remote) : plumbing;;
patch-id) : plumbing;;
prune) : plumbing;;
prune-packed) : plumbing;;
quiltimport) : import;;
read-tree) : plumbing;;
receive-pack) : plumbing;;
remote-*) : transport;;
rerere) : plumbing;;
rev-list) : plumbing;;
rev-parse) : plumbing;;
runstatus) : plumbing;;
sh-setup) : internal;;
shell) : daemon;;
show-ref) : plumbing;;
send-pack) : plumbing;;
show-index) : plumbing;;
ssh-*) : transport;;
stripspace) : plumbing;;
symbolic-ref) : plumbing;;
unpack-file) : plumbing;;
unpack-objects) : plumbing;;
update-index) : plumbing;;
update-ref) : plumbing;;
update-server-info) : daemon;;
upload-archive) : plumbing;;
upload-pack) : plumbing;;
write-tree) : plumbing;;
var) : infrequent;;
verify-pack) : infrequent;;
verify-tag) : plumbing;;
*) echo $i;;
esac
done
}
__git_porcelain_commands=
__git_compute_porcelain_commands ()
{
test -n "$__git_porcelain_commands" ||
__git_porcelain_commands=$(__git_list_porcelain_commands)
}
# Lists all set config variables starting with the given section prefix,
# with the prefix removed.
__git_get_config_variables ()
{
local section="$1" i IFS=$'\n'
for i in $(git --git-dir="$(__gitdir)" config --get-regexp "^$section\..*" 2>/dev/null); do
i="${i#$section.}"
echo "${i/ */}"
done
}
__git_pretty_aliases ()
{
__git_get_config_variables "pretty"
}
__git_aliases ()
{
__git_get_config_variables "alias"
}
# __git_aliased_command requires 1 argument
__git_aliased_command ()
{
local word cmdline=$(git --git-dir="$(__gitdir)" \
config --get "alias.$1")
for word in $cmdline; do
case "$word" in
\!gitk|gitk)
echo "gitk"
return
;;
\!*) : shell command alias ;;
-*) : option ;;
*=*) : setting env ;;
git) : git itself ;;
\(\)) : skip parens of shell function definition ;;
{) : skip start of shell helper function ;;
:) : skip null command ;;
\'*) : skip opening quote after sh -c ;;
*)
echo "$word"
return
esac
done
}
# __git_find_on_cmdline requires 1 argument
__git_find_on_cmdline ()
{
local word subcommand c=1
while [ $c -lt $cword ]; do
word="${words[c]}"
for subcommand in $1; do
if [ "$subcommand" = "$word" ]; then
echo "$subcommand"
return
fi
done
((c++))
done
}
__git_has_doubledash ()
{
local c=1
while [ $c -lt $cword ]; do
if [ "--" = "${words[c]}" ]; then
return 0
fi
((c++))
done
return 1
}
# Try to count non option arguments passed on the command line for the
# specified git command.
# When options are used, it is necessary to use the special -- option to
# tell the implementation were non option arguments begin.
# XXX this can not be improved, since options can appear everywhere, as
# an example:
# git mv x -n y
#
# __git_count_arguments requires 1 argument: the git command executed.
__git_count_arguments ()
{
local word i c=0
# Skip "git" (first argument)
for ((i=1; i < ${#words[@]}; i++)); do
word="${words[i]}"
case "$word" in
--)
# Good; we can assume that the following are only non
# option arguments.
((c = 0))
;;
"$1")
# Skip the specified git command and discard git
# main options
((c = 0))
;;
?*)
((c++))
;;
esac
done
printf "%d" $c
}
__git_whitespacelist="nowarn warn error error-all fix"
_git_am ()
{
local dir="$(__gitdir)"
if [ -d "$dir"/rebase-apply ]; then
__gitcomp "--skip --continue --resolved --abort"
return
fi
case "$cur" in
--whitespace=*)
__gitcomp "$__git_whitespacelist" "" "${cur##--whitespace=}"
return
;;
--*)
__gitcomp "
--3way --committer-date-is-author-date --ignore-date
--ignore-whitespace --ignore-space-change
--interactive --keep --no-utf8 --signoff --utf8
--whitespace= --scissors
"
return
esac
}
_git_apply ()
{
case "$cur" in
--whitespace=*)
__gitcomp "$__git_whitespacelist" "" "${cur##--whitespace=}"
return
;;
--*)
__gitcomp "
--stat --numstat --summary --check --index
--cached --index-info --reverse --reject --unidiff-zero
--apply --no-add --exclude=
--ignore-whitespace --ignore-space-change
--whitespace= --inaccurate-eof --verbose
"
return
esac
}
_git_add ()
{
case "$cur" in
--*)
__gitcomp "
--interactive --refresh --patch --update --dry-run
--ignore-errors --intent-to-add
"
return
esac
# XXX should we check for --update and --all options ?
__git_complete_index_file "--others --modified --directory --no-empty-directory"
}
_git_archive ()
{
case "$cur" in
--format=*)
__gitcomp "$(git archive --list)" "" "${cur##--format=}"
return
;;
--remote=*)
__gitcomp_nl "$(__git_remotes)" "" "${cur##--remote=}"
return
;;
--*)
__gitcomp "
--format= --list --verbose
--prefix= --remote= --exec=
"
return
;;
esac
__git_complete_file
}
_git_bisect ()
{
__git_has_doubledash && return
local subcommands="start bad good skip reset visualize replay log run"
local subcommand="$(__git_find_on_cmdline "$subcommands")"
if [ -z "$subcommand" ]; then
if [ -f "$(__gitdir)"/BISECT_START ]; then
__gitcomp "$subcommands"
else
__gitcomp "replay start"
fi
return
fi
case "$subcommand" in
bad|good|reset|skip|start)
__gitcomp_nl "$(__git_refs)"
;;
*)
;;
esac
}
_git_branch ()
{
local i c=1 only_local_ref="n" has_r="n"
while [ $c -lt $cword ]; do
i="${words[c]}"
case "$i" in
-d|-m) only_local_ref="y" ;;
-r) has_r="y" ;;
esac
((c++))
done
case "$cur" in
--set-upstream-to=*)
__gitcomp_nl "$(__git_refs)" "" "${cur##--set-upstream-to=}"
;;
--*)
__gitcomp "
--color --no-color --verbose --abbrev= --no-abbrev
--track --no-track --contains --merged --no-merged
--set-upstream-to= --edit-description --list
--unset-upstream
"
;;
*)
if [ $only_local_ref = "y" -a $has_r = "n" ]; then
__gitcomp_nl "$(__git_heads)"
else
__gitcomp_nl "$(__git_refs)"
fi
;;
esac
}
_git_bundle ()
{
local cmd="${words[2]}"
case "$cword" in
2)
__gitcomp "create list-heads verify unbundle"
;;
3)
# looking for a file
;;
*)
case "$cmd" in
create)
__git_complete_revlist
;;
esac
;;
esac
}
_git_checkout ()
{
__git_has_doubledash && return
case "$cur" in
--conflict=*)
__gitcomp "diff3 merge" "" "${cur##--conflict=}"
;;
--*)
__gitcomp "
--quiet --ours --theirs --track --no-track --merge
--conflict= --orphan --patch
"
;;
*)
# check if --track, --no-track, or --no-guess was specified
# if so, disable DWIM mode
local flags="--track --no-track --no-guess" track=1
if [ -n "$(__git_find_on_cmdline "$flags")" ]; then
track=''
fi
__gitcomp_nl "$(__git_refs '' $track)"
;;
esac
}
_git_cherry ()
{
__gitcomp_nl "$(__git_refs)"
}
_git_cherry_pick ()
{
local dir="$(__gitdir)"
if [ -f "$dir"/CHERRY_PICK_HEAD ]; then
__gitcomp "--continue --quit --abort"
return
fi
case "$cur" in
--*)
__gitcomp "--edit --no-commit --signoff --strategy= --mainline"
;;
*)
__gitcomp_nl "$(__git_refs)"
;;
esac
}
_git_clean ()
{
case "$cur" in
--*)
__gitcomp "--dry-run --quiet"
return
;;
esac
# XXX should we check for -x option ?
__git_complete_index_file "--others --directory"
}
_git_clone ()
{
case "$cur" in
--*)
__gitcomp "
--local
--no-hardlinks
--shared
--reference
--quiet
--no-checkout
--bare
--mirror
--origin
--upload-pack
--template=
--depth
--single-branch
--branch
"
return
;;
esac
}
_git_commit ()
{
case "$prev" in
-c|-C)
__gitcomp_nl "$(__git_refs)" "" "${cur}"
return
;;
esac
case "$cur" in
--cleanup=*)
__gitcomp "default scissors strip verbatim whitespace
" "" "${cur##--cleanup=}"
return
;;
--reuse-message=*|--reedit-message=*|\
--fixup=*|--squash=*)
__gitcomp_nl "$(__git_refs)" "" "${cur#*=}"
return
;;
--untracked-files=*)
__gitcomp "all no normal" "" "${cur##--untracked-files=}"
return
;;
--*)
__gitcomp "
--all --author= --signoff --verify --no-verify
--edit --no-edit
--amend --include --only --interactive
--dry-run --reuse-message= --reedit-message=
--reset-author --file= --message= --template=
--cleanup= --untracked-files --untracked-files=
--verbose --quiet --fixup= --squash=
"
return
esac
if git rev-parse --verify --quiet HEAD >/dev/null; then
__git_complete_index_file "--committable"
else
# This is the first commit
__git_complete_index_file "--cached"
fi
}
_git_describe ()
{
case "$cur" in
--*)
__gitcomp "
--all --tags --contains --abbrev= --candidates=
--exact-match --debug --long --match --always
"
return
esac
__gitcomp_nl "$(__git_refs)"
}
__git_diff_algorithms="myers minimal patience histogram"
__git_diff_common_options="--stat --numstat --shortstat --summary
--patch-with-stat --name-only --name-status --color
--no-color --color-words --no-renames --check
--full-index --binary --abbrev --diff-filter=
--find-copies-harder
--text --ignore-space-at-eol --ignore-space-change
--ignore-all-space --ignore-blank-lines --exit-code
--quiet --ext-diff --no-ext-diff
--no-prefix --src-prefix= --dst-prefix=
--inter-hunk-context=
--patience --histogram --minimal
--raw --word-diff
--dirstat --dirstat= --dirstat-by-file
--dirstat-by-file= --cumulative
--diff-algorithm=
"
_git_diff ()
{
__git_has_doubledash && return
case "$cur" in
--diff-algorithm=*)
__gitcomp "$__git_diff_algorithms" "" "${cur##--diff-algorithm=}"
return
;;
--*)
__gitcomp "--cached --staged --pickaxe-all --pickaxe-regex
--base --ours --theirs --no-index
$__git_diff_common_options
"
return
;;
esac
__git_complete_revlist_file
}
__git_mergetools_common="diffuse diffmerge ecmerge emerge kdiff3 meld opendiff
tkdiff vimdiff gvimdiff xxdiff araxis p4merge bc codecompare
"
_git_difftool ()
{
__git_has_doubledash && return
case "$cur" in
--tool=*)
__gitcomp "$__git_mergetools_common kompare" "" "${cur##--tool=}"
return
;;
--*)
__gitcomp "--cached --staged --pickaxe-all --pickaxe-regex
--base --ours --theirs
--no-renames --diff-filter= --find-copies-harder
--relative --ignore-submodules
--tool="
return
;;
esac
__git_complete_revlist_file
}
__git_fetch_recurse_submodules="yes on-demand no"
__git_fetch_options="
--quiet --verbose --append --upload-pack --force --keep --depth=
--tags --no-tags --all --prune --dry-run --recurse-submodules=
"
_git_fetch ()
{
case "$cur" in
--recurse-submodules=*)
__gitcomp "$__git_fetch_recurse_submodules" "" "${cur##--recurse-submodules=}"
return
;;
--*)
__gitcomp "$__git_fetch_options"
return
;;
esac
__git_complete_remote_or_refspec
}
__git_format_patch_options="
--stdout --attach --no-attach --thread --thread= --no-thread
--numbered --start-number --numbered-files --keep-subject --signoff
--signature --no-signature --in-reply-to= --cc= --full-index --binary
--not --all --cover-letter --no-prefix --src-prefix= --dst-prefix=
--inline --suffix= --ignore-if-in-upstream --subject-prefix=
--output-directory --reroll-count --to= --quiet --notes
"
_git_format_patch ()
{
case "$cur" in
--thread=*)
__gitcomp "
deep shallow
" "" "${cur##--thread=}"
return
;;
--*)
__gitcomp "$__git_format_patch_options"
return
;;
esac
__git_complete_revlist
}
_git_fsck ()
{
case "$cur" in
--*)
__gitcomp "
--tags --root --unreachable --cache --no-reflogs --full
--strict --verbose --lost-found
"
return
;;
esac
}
_git_gc ()
{
case "$cur" in
--*)
__gitcomp "--prune --aggressive"
return
;;
esac
}
_git_gitk ()
{
_gitk
}
__git_match_ctag() {
awk "/^${1//\//\\/}/ { print \$1 }" "$2"
}
_git_grep ()
{
__git_has_doubledash && return
case "$cur" in
--*)
__gitcomp "
--cached
--text --ignore-case --word-regexp --invert-match
--full-name --line-number
--extended-regexp --basic-regexp --fixed-strings
--perl-regexp
--files-with-matches --name-only
--files-without-match
--max-depth
--count
--and --or --not --all-match
"
return
;;
esac
case "$cword,$prev" in
2,*|*,-*)
if test -r tags; then
__gitcomp_nl "$(__git_match_ctag "$cur" tags)"
return
fi
;;
esac
__gitcomp_nl "$(__git_refs)"
}
_git_help ()
{
case "$cur" in
--*)
__gitcomp "--all --info --man --web"
return
;;
esac
__git_compute_all_commands
__gitcomp "$__git_all_commands $(__git_aliases)
attributes cli core-tutorial cvs-migration
diffcore gitk glossary hooks ignore modules
namespaces repository-layout tutorial tutorial-2
workflows
"
}
_git_init ()
{
case "$cur" in
--shared=*)
__gitcomp "
false true umask group all world everybody
" "" "${cur##--shared=}"
return
;;
--*)
__gitcomp "--quiet --bare --template= --shared --shared="
return
;;
esac
}
_git_ls_files ()
{
case "$cur" in
--*)
__gitcomp "--cached --deleted --modified --others --ignored
--stage --directory --no-empty-directory --unmerged
--killed --exclude= --exclude-from=
--exclude-per-directory= --exclude-standard
--error-unmatch --with-tree= --full-name
--abbrev --ignored --exclude-per-directory
"
return
;;
esac
# XXX ignore options like --modified and always suggest all cached
# files.
__git_complete_index_file "--cached"
}
_git_ls_remote ()
{
__gitcomp_nl "$(__git_remotes)"
}
_git_ls_tree ()
{
__git_complete_file
}
# Options that go well for log, shortlog and gitk
__git_log_common_options="
--not --all
--branches --tags --remotes
--first-parent --merges --no-merges
--max-count=
--max-age= --since= --after=
--min-age= --until= --before=
--min-parents= --max-parents=
--no-min-parents --no-max-parents
"
# Options that go well for log and gitk (not shortlog)
__git_log_gitk_options="
--dense --sparse --full-history
--simplify-merges --simplify-by-decoration
--left-right --notes --no-notes
"
# Options that go well for log and shortlog (not gitk)
__git_log_shortlog_options="
--author= --committer= --grep=
--all-match --invert-grep
"
__git_log_pretty_formats="oneline short medium full fuller email raw format:"
__git_log_date_formats="relative iso8601 rfc2822 short local default raw"
_git_log ()
{
__git_has_doubledash && return
local g="$(git rev-parse --git-dir 2>/dev/null)"
local merge=""
if [ -f "$g/MERGE_HEAD" ]; then
merge="--merge"
fi
case "$cur" in
--pretty=*|--format=*)
__gitcomp "$__git_log_pretty_formats $(__git_pretty_aliases)
" "" "${cur#*=}"
return
;;
--date=*)
__gitcomp "$__git_log_date_formats" "" "${cur##--date=}"
return
;;
--decorate=*)
__gitcomp "full short no" "" "${cur##--decorate=}"
return
;;
--*)
__gitcomp "
$__git_log_common_options
$__git_log_shortlog_options
$__git_log_gitk_options
--root --topo-order --date-order --reverse
--follow --full-diff
--abbrev-commit --abbrev=
--relative-date --date=
--pretty= --format= --oneline
--show-signature
--cherry-pick
--graph
--decorate --decorate=
--walk-reflogs
--parents --children
$merge
$__git_diff_common_options
--pickaxe-all --pickaxe-regex
"
return
;;
esac
__git_complete_revlist
}
# Common merge options shared by git-merge(1) and git-pull(1).
__git_merge_options="
--no-commit --no-stat --log --no-log --squash --strategy
--commit --stat --no-squash --ff --no-ff --ff-only --edit --no-edit
--verify-signatures --no-verify-signatures --gpg-sign
--quiet --verbose --progress --no-progress
"
_git_merge ()
{
__git_complete_strategy && return
case "$cur" in
--*)
__gitcomp "$__git_merge_options
--rerere-autoupdate --no-rerere-autoupdate --abort"
return
esac
__gitcomp_nl "$(__git_refs)"
}
_git_mergetool ()
{
case "$cur" in
--tool=*)
__gitcomp "$__git_mergetools_common tortoisemerge" "" "${cur##--tool=}"
return
;;
--*)
__gitcomp "--tool="
return
;;
esac
}
_git_merge_base ()
{
case "$cur" in
--*)
__gitcomp "--octopus --independent --is-ancestor --fork-point"
return
;;
esac
__gitcomp_nl "$(__git_refs)"
}
_git_mv ()
{
case "$cur" in
--*)
__gitcomp "--dry-run"
return
;;
esac
if [ $(__git_count_arguments "mv") -gt 0 ]; then
# We need to show both cached and untracked files (including
# empty directories) since this may not be the last argument.
__git_complete_index_file "--cached --others --directory"
else
__git_complete_index_file "--cached"
fi
}
_git_name_rev ()
{
__gitcomp "--tags --all --stdin"
}
_git_notes ()
{
local subcommands='add append copy edit list prune remove show'
local subcommand="$(__git_find_on_cmdline "$subcommands")"
case "$subcommand,$cur" in
,--*)
__gitcomp '--ref'
;;
,*)
case "$prev" in
--ref)
__gitcomp_nl "$(__git_refs)"
;;
*)
__gitcomp "$subcommands --ref"
;;
esac
;;
add,--reuse-message=*|append,--reuse-message=*|\
add,--reedit-message=*|append,--reedit-message=*)
__gitcomp_nl "$(__git_refs)" "" "${cur#*=}"
;;
add,--*|append,--*)
__gitcomp '--file= --message= --reedit-message=
--reuse-message='
;;
copy,--*)
__gitcomp '--stdin'
;;
prune,--*)
__gitcomp '--dry-run --verbose'
;;
prune,*)
;;
*)
case "$prev" in
-m|-F)
;;
*)
__gitcomp_nl "$(__git_refs)"
;;
esac
;;
esac
}
_git_pull ()
{
__git_complete_strategy && return
case "$cur" in
--recurse-submodules=*)
__gitcomp "$__git_fetch_recurse_submodules" "" "${cur##--recurse-submodules=}"
return
;;
--*)
__gitcomp "
--rebase --no-rebase
$__git_merge_options
$__git_fetch_options
"
return
;;
esac
__git_complete_remote_or_refspec
}
__git_push_recurse_submodules="check on-demand"
__git_complete_force_with_lease ()
{
local cur_=$1
case "$cur_" in
--*=)
;;
*:*)
__gitcomp_nl "$(__git_refs)" "" "${cur_#*:}"
;;
*)
__gitcomp_nl "$(__git_refs)" "" "$cur_"
;;
esac
}
_git_push ()
{
case "$prev" in
--repo)
__gitcomp_nl "$(__git_remotes)"
return
;;
--recurse-submodules)
__gitcomp "$__git_push_recurse_submodules"
return
;;
esac
case "$cur" in
--repo=*)
__gitcomp_nl "$(__git_remotes)" "" "${cur##--repo=}"
return
;;
--recurse-submodules=*)
__gitcomp "$__git_push_recurse_submodules" "" "${cur##--recurse-submodules=}"
return
;;
--force-with-lease=*)
__git_complete_force_with_lease "${cur##--force-with-lease=}"
return
;;
--*)
__gitcomp "
--all --mirror --tags --dry-run --force --verbose
--quiet --prune --delete --follow-tags
--receive-pack= --repo= --set-upstream
--force-with-lease --force-with-lease= --recurse-submodules=
"
return
;;
esac
__git_complete_remote_or_refspec
}
_git_rebase ()
{
local dir="$(__gitdir)"
if [ -f "$dir"/rebase-merge/interactive ]; then
__gitcomp "--continue --skip --abort --edit-todo"
return
elif [ -d "$dir"/rebase-apply ] || [ -d "$dir"/rebase-merge ]; then
__gitcomp "--continue --skip --abort"
return
fi
__git_complete_strategy && return
case "$cur" in
--whitespace=*)
__gitcomp "$__git_whitespacelist" "" "${cur##--whitespace=}"
return
;;
--*)
__gitcomp "
--onto --merge --strategy --interactive
--preserve-merges --stat --no-stat
--committer-date-is-author-date --ignore-date
--ignore-whitespace --whitespace=
--autosquash --fork-point --no-fork-point
--autostash
"
return
esac
__gitcomp_nl "$(__git_refs)"
}
_git_reflog ()
{
local subcommands="show delete expire"
local subcommand="$(__git_find_on_cmdline "$subcommands")"
if [ -z "$subcommand" ]; then
__gitcomp "$subcommands"
else
__gitcomp_nl "$(__git_refs)"
fi
}
__git_send_email_confirm_options="always never auto cc compose"
__git_send_email_suppresscc_options="author self cc bodycc sob cccmd body all"
_git_send_email ()
{
case "$cur" in
--confirm=*)
__gitcomp "
$__git_send_email_confirm_options
" "" "${cur##--confirm=}"
return
;;
--suppress-cc=*)
__gitcomp "
$__git_send_email_suppresscc_options
" "" "${cur##--suppress-cc=}"
return
;;
--smtp-encryption=*)
__gitcomp "ssl tls" "" "${cur##--smtp-encryption=}"
return
;;
--thread=*)
__gitcomp "
deep shallow
" "" "${cur##--thread=}"
return
;;
--*)
__gitcomp "--annotate --bcc --cc --cc-cmd --chain-reply-to
--compose --confirm= --dry-run --envelope-sender
--from --identity
--in-reply-to --no-chain-reply-to --no-signed-off-by-cc
--no-suppress-from --no-thread --quiet
--signed-off-by-cc --smtp-pass --smtp-server
--smtp-server-port --smtp-encryption= --smtp-user
--subject --suppress-cc= --suppress-from --thread --to
--validate --no-validate
$__git_format_patch_options"
return
;;
esac
__git_complete_revlist
}
_git_stage ()
{
_git_add
}
__git_config_get_set_variables ()
{
local prevword word config_file= c=$cword
while [ $c -gt 1 ]; do
word="${words[c]}"
case "$word" in
--system|--global|--local|--file=*)
config_file="$word"
break
;;
-f|--file)
config_file="$word $prevword"
break
;;
esac
prevword=$word
c=$((--c))
done
git --git-dir="$(__gitdir)" config $config_file --list 2>/dev/null |
while read -r line
do
case "$line" in
*.*=*)
echo "${line/=*/}"
;;
esac
done
}
_git_config ()
{
case "$prev" in
branch.*.remote|branch.*.pushremote)
__gitcomp_nl "$(__git_remotes)"
return
;;
branch.*.merge)
__gitcomp_nl "$(__git_refs)"
return
;;
branch.*.rebase)
__gitcomp "false true"
return
;;
remote.pushdefault)
__gitcomp_nl "$(__git_remotes)"
return
;;
remote.*.fetch)
local remote="${prev#remote.}"
remote="${remote%.fetch}"
if [ -z "$cur" ]; then
__gitcomp_nl "refs/heads/" "" "" ""
return
fi
__gitcomp_nl "$(__git_refs_remotes "$remote")"
return
;;
remote.*.push)
local remote="${prev#remote.}"
remote="${remote%.push}"
__gitcomp_nl "$(git --git-dir="$(__gitdir)" \
for-each-ref --format='%(refname):%(refname)' \
refs/heads)"
return
;;
pull.twohead|pull.octopus)
__git_compute_merge_strategies
__gitcomp "$__git_merge_strategies"
return
;;
color.branch|color.diff|color.interactive|\
color.showbranch|color.status|color.ui)
__gitcomp "always never auto"
return
;;
color.pager)
__gitcomp "false true"
return
;;
color.*.*)
__gitcomp "
normal black red green yellow blue magenta cyan white
bold dim ul blink reverse
"
return
;;
diff.submodule)
__gitcomp "log short"
return
;;
help.format)
__gitcomp "man info web html"
return
;;
log.date)
__gitcomp "$__git_log_date_formats"
return
;;
sendemail.aliasesfiletype)
__gitcomp "mutt mailrc pine elm gnus"
return
;;
sendemail.confirm)
__gitcomp "$__git_send_email_confirm_options"
return
;;
sendemail.suppresscc)
__gitcomp "$__git_send_email_suppresscc_options"
return
;;
sendemail.transferencoding)
__gitcomp "7bit 8bit quoted-printable base64"
return
;;
--get|--get-all|--unset|--unset-all)
__gitcomp_nl "$(__git_config_get_set_variables)"
return
;;
*.*)
return
;;
esac
case "$cur" in
--*)
__gitcomp "
--system --global --local --file=
--list --replace-all
--get --get-all --get-regexp
--add --unset --unset-all
--remove-section --rename-section
"
return
;;
branch.*.*)
local pfx="${cur%.*}." cur_="${cur##*.}"
__gitcomp "remote pushremote merge mergeoptions rebase" "$pfx" "$cur_"
return
;;
branch.*)
local pfx="${cur%.*}." cur_="${cur#*.}"
__gitcomp_nl "$(__git_heads)" "$pfx" "$cur_" "."
__gitcomp_nl_append $'autosetupmerge\nautosetuprebase\n' "$pfx" "$cur_"
return
;;
guitool.*.*)
local pfx="${cur%.*}." cur_="${cur##*.}"
__gitcomp "
argprompt cmd confirm needsfile noconsole norescan
prompt revprompt revunmerged title
" "$pfx" "$cur_"
return
;;
difftool.*.*)
local pfx="${cur%.*}." cur_="${cur##*.}"
__gitcomp "cmd path" "$pfx" "$cur_"
return
;;
man.*.*)
local pfx="${cur%.*}." cur_="${cur##*.}"
__gitcomp "cmd path" "$pfx" "$cur_"
return
;;
mergetool.*.*)
local pfx="${cur%.*}." cur_="${cur##*.}"
__gitcomp "cmd path trustExitCode" "$pfx" "$cur_"
return
;;
pager.*)
local pfx="${cur%.*}." cur_="${cur#*.}"
__git_compute_all_commands
__gitcomp_nl "$__git_all_commands" "$pfx" "$cur_"
return
;;
remote.*.*)
local pfx="${cur%.*}." cur_="${cur##*.}"
__gitcomp "
url proxy fetch push mirror skipDefaultUpdate
receivepack uploadpack tagopt pushurl
" "$pfx" "$cur_"
return
;;
remote.*)
local pfx="${cur%.*}." cur_="${cur#*.}"
__gitcomp_nl "$(__git_remotes)" "$pfx" "$cur_" "."
__gitcomp_nl_append "pushdefault" "$pfx" "$cur_"
return
;;
url.*.*)
local pfx="${cur%.*}." cur_="${cur##*.}"
__gitcomp "insteadOf pushInsteadOf" "$pfx" "$cur_"
return
;;
esac
__gitcomp "
add.ignoreErrors
advice.commitBeforeMerge
advice.detachedHead
advice.implicitIdentity
advice.pushNonFastForward
advice.resolveConflict
advice.statusHints
alias.
am.keepcr
apply.ignorewhitespace
apply.whitespace
branch.autosetupmerge
branch.autosetuprebase
browser.
clean.requireForce
color.branch
color.branch.current
color.branch.local
color.branch.plain
color.branch.remote
color.decorate.HEAD
color.decorate.branch
color.decorate.remoteBranch
color.decorate.stash
color.decorate.tag
color.diff
color.diff.commit
color.diff.frag
color.diff.func
color.diff.meta
color.diff.new
color.diff.old
color.diff.plain
color.diff.whitespace
color.grep
color.grep.context
color.grep.filename
color.grep.function
color.grep.linenumber
color.grep.match
color.grep.selected
color.grep.separator
color.interactive
color.interactive.error
color.interactive.header
color.interactive.help
color.interactive.prompt
color.pager
color.showbranch
color.status
color.status.added
color.status.changed
color.status.header
color.status.nobranch
color.status.unmerged
color.status.untracked
color.status.updated
color.ui
commit.status
commit.template
core.abbrev
core.askpass
core.attributesfile
core.autocrlf
core.bare
core.bigFileThreshold
core.compression
core.createObject
core.deltaBaseCacheLimit
core.editor
core.eol
core.excludesfile
core.fileMode
core.fsyncobjectfiles
core.gitProxy
core.ignoreStat
core.ignorecase
core.logAllRefUpdates
core.loosecompression
core.notesRef
core.packedGitLimit
core.packedGitWindowSize
core.pager
core.preferSymlinkRefs
core.preloadindex
core.quotepath
core.repositoryFormatVersion
core.safecrlf
core.sharedRepository
core.sparseCheckout
core.symlinks
core.trustctime
core.warnAmbiguousRefs
core.whitespace
core.worktree
diff.autorefreshindex
diff.external
diff.ignoreSubmodules
diff.mnemonicprefix
diff.noprefix
diff.renameLimit
diff.renames
diff.statGraphWidth
diff.submodule
diff.suppressBlankEmpty
diff.tool
diff.wordRegex
diff.algorithm
difftool.
difftool.prompt
fetch.recurseSubmodules
fetch.unpackLimit
format.attach
format.cc
format.coverLetter
format.headers
format.numbered
format.pretty
format.signature
format.signoff
format.subjectprefix
format.suffix
format.thread
format.to
gc.
gc.aggressiveWindow
gc.auto
gc.autopacklimit
gc.packrefs
gc.pruneexpire
gc.reflogexpire
gc.reflogexpireunreachable
gc.rerereresolved
gc.rerereunresolved
gitcvs.allbinary
gitcvs.commitmsgannotation
gitcvs.dbTableNamePrefix
gitcvs.dbdriver
gitcvs.dbname
gitcvs.dbpass
gitcvs.dbuser
gitcvs.enabled
gitcvs.logfile
gitcvs.usecrlfattr
guitool.
gui.blamehistoryctx
gui.commitmsgwidth
gui.copyblamethreshold
gui.diffcontext
gui.encoding
gui.fastcopyblame
gui.matchtrackingbranch
gui.newbranchtemplate
gui.pruneduringfetch
gui.spellingdictionary
gui.trustmtime
help.autocorrect
help.browser
help.format
http.lowSpeedLimit
http.lowSpeedTime
http.maxRequests
http.minSessions
http.noEPSV
http.postBuffer
http.proxy
http.sslCipherList
http.sslCAInfo
http.sslCAPath
http.sslCert
http.sslCertPasswordProtected
http.sslKey
http.sslVerify
http.useragent
i18n.commitEncoding
i18n.logOutputEncoding
imap.authMethod
imap.folder
imap.host
imap.pass
imap.port
imap.preformattedHTML
imap.sslverify
imap.tunnel
imap.user
init.templatedir
instaweb.browser
instaweb.httpd
instaweb.local
instaweb.modulepath
instaweb.port
interactive.singlekey
log.date
log.decorate
log.showroot
mailmap.file
man.
man.viewer
merge.
merge.conflictstyle
merge.log
merge.renameLimit
merge.renormalize
merge.stat
merge.tool
merge.verbosity
mergetool.
mergetool.keepBackup
mergetool.keepTemporaries
mergetool.prompt
notes.displayRef
notes.rewrite.
notes.rewrite.amend
notes.rewrite.rebase
notes.rewriteMode
notes.rewriteRef
pack.compression
pack.deltaCacheLimit
pack.deltaCacheSize
pack.depth
pack.indexVersion
pack.packSizeLimit
pack.threads
pack.window
pack.windowMemory
pager.
pretty.
pull.octopus
pull.twohead
push.default
push.followTags
rebase.autosquash
rebase.stat
receive.autogc
receive.denyCurrentBranch
receive.denyDeleteCurrent
receive.denyDeletes
receive.denyNonFastForwards
receive.fsckObjects
receive.unpackLimit
receive.updateserverinfo
remote.pushdefault
remotes.
repack.usedeltabaseoffset
rerere.autoupdate
rerere.enabled
sendemail.
sendemail.aliasesfile
sendemail.aliasfiletype
sendemail.bcc
sendemail.cc
sendemail.cccmd
sendemail.chainreplyto
sendemail.confirm
sendemail.envelopesender
sendemail.from
sendemail.identity
sendemail.multiedit
sendemail.signedoffbycc
sendemail.smtpdomain
sendemail.smtpencryption
sendemail.smtppass
sendemail.smtpserver
sendemail.smtpserveroption
sendemail.smtpserverport
sendemail.smtpuser
sendemail.suppresscc
sendemail.suppressfrom
sendemail.thread
sendemail.to
sendemail.validate
showbranch.default
status.relativePaths
status.showUntrackedFiles
status.submodulesummary
submodule.
tar.umask
transfer.unpackLimit
url.
user.email
user.name
user.signingkey
web.browser
branch. remote.
"
}
_git_remote ()
{
local subcommands="add rename remove set-head set-branches set-url show prune update"
local subcommand="$(__git_find_on_cmdline "$subcommands")"
if [ -z "$subcommand" ]; then
__gitcomp "$subcommands"
return
fi
case "$subcommand" in
rename|remove|set-url|show|prune)
__gitcomp_nl "$(__git_remotes)"
;;
set-head|set-branches)
__git_complete_remote_or_refspec
;;
update)
__gitcomp "$(__git_get_config_variables "remotes")"
;;
*)
;;
esac
}
_git_replace ()
{
__gitcomp_nl "$(__git_refs)"
}
_git_reset ()
{
__git_has_doubledash && return
case "$cur" in
--*)
__gitcomp "--merge --mixed --hard --soft --patch"
return
;;
esac
__gitcomp_nl "$(__git_refs)"
}
_git_revert ()
{
local dir="$(__gitdir)"
if [ -f "$dir"/REVERT_HEAD ]; then
__gitcomp "--continue --quit --abort"
return
fi
case "$cur" in
--*)
__gitcomp "--edit --mainline --no-edit --no-commit --signoff"
return
;;
esac
__gitcomp_nl "$(__git_refs)"
}
_git_rm ()
{
case "$cur" in
--*)
__gitcomp "--cached --dry-run --ignore-unmatch --quiet"
return
;;
esac
__git_complete_index_file "--cached"
}
_git_shortlog ()
{
__git_has_doubledash && return
case "$cur" in
--*)
__gitcomp "
$__git_log_common_options
$__git_log_shortlog_options
--numbered --summary
"
return
;;
esac
__git_complete_revlist
}
_git_show ()
{
__git_has_doubledash && return
case "$cur" in
--pretty=*|--format=*)
__gitcomp "$__git_log_pretty_formats $(__git_pretty_aliases)
" "" "${cur#*=}"
return
;;
--diff-algorithm=*)
__gitcomp "$__git_diff_algorithms" "" "${cur##--diff-algorithm=}"
return
;;
--*)
__gitcomp "--pretty= --format= --abbrev-commit --oneline
--show-signature
$__git_diff_common_options
"
return
;;
esac
__git_complete_revlist_file
}
_git_show_branch ()
{
case "$cur" in
--*)
__gitcomp "
--all --remotes --topo-order --current --more=
--list --independent --merge-base --no-name
--color --no-color
--sha1-name --sparse --topics --reflog
"
return
;;
esac
__git_complete_revlist
}
_git_stash ()
{
local save_opts='--keep-index --no-keep-index --quiet --patch'
local subcommands='save list show apply clear drop pop create branch'
local subcommand="$(__git_find_on_cmdline "$subcommands")"
if [ -z "$subcommand" ]; then
case "$cur" in
--*)
__gitcomp "$save_opts"
;;
*)
if [ -z "$(__git_find_on_cmdline "$save_opts")" ]; then
__gitcomp "$subcommands"
fi
;;
esac
else
case "$subcommand,$cur" in
save,--*)
__gitcomp "$save_opts"
;;
apply,--*|pop,--*)
__gitcomp "--index --quiet"
;;
show,--*|drop,--*|branch,--*)
;;
show,*|apply,*|drop,*|pop,*|branch,*)
__gitcomp_nl "$(git --git-dir="$(__gitdir)" stash list \
| sed -n -e 's/:.*//p')"
;;
*)
;;
esac
fi
}
_git_submodule ()
{
__git_has_doubledash && return
local subcommands="add status init deinit update summary foreach sync"
if [ -z "$(__git_find_on_cmdline "$subcommands")" ]; then
case "$cur" in
--*)
__gitcomp "--quiet --cached"
;;
*)
__gitcomp "$subcommands"
;;
esac
return
fi
}
_git_svn ()
{
local subcommands="
init fetch clone rebase dcommit log find-rev
set-tree commit-diff info create-ignore propget
proplist show-ignore show-externals branch tag blame
migrate mkdirs reset gc
"
local subcommand="$(__git_find_on_cmdline "$subcommands")"
if [ -z "$subcommand" ]; then
__gitcomp "$subcommands"
else
local remote_opts="--username= --config-dir= --no-auth-cache"
local fc_opts="
--follow-parent --authors-file= --repack=
--no-metadata --use-svm-props --use-svnsync-props
--log-window-size= --no-checkout --quiet
--repack-flags --use-log-author --localtime
--ignore-paths= --include-paths= $remote_opts
"
local init_opts="
--template= --shared= --trunk= --tags=
--branches= --stdlayout --minimize-url
--no-metadata --use-svm-props --use-svnsync-props
--rewrite-root= --prefix= --use-log-author
--add-author-from $remote_opts
"
local cmt_opts="
--edit --rmdir --find-copies-harder --copy-similarity=
"
case "$subcommand,$cur" in
fetch,--*)
__gitcomp "--revision= --fetch-all $fc_opts"
;;
clone,--*)
__gitcomp "--revision= $fc_opts $init_opts"
;;
init,--*)
__gitcomp "$init_opts"
;;
dcommit,--*)
__gitcomp "
--merge --strategy= --verbose --dry-run
--fetch-all --no-rebase --commit-url
--revision --interactive $cmt_opts $fc_opts
"
;;
set-tree,--*)
__gitcomp "--stdin $cmt_opts $fc_opts"
;;
create-ignore,--*|propget,--*|proplist,--*|show-ignore,--*|\
show-externals,--*|mkdirs,--*)
__gitcomp "--revision="
;;
log,--*)
__gitcomp "
--limit= --revision= --verbose --incremental
--oneline --show-commit --non-recursive
--authors-file= --color
"
;;
rebase,--*)
__gitcomp "
--merge --verbose --strategy= --local
--fetch-all --dry-run $fc_opts
"
;;
commit-diff,--*)
__gitcomp "--message= --file= --revision= $cmt_opts"
;;
info,--*)
__gitcomp "--url"
;;
branch,--*)
__gitcomp "--dry-run --message --tag"
;;
tag,--*)
__gitcomp "--dry-run --message"
;;
blame,--*)
__gitcomp "--git-format"
;;
migrate,--*)
__gitcomp "
--config-dir= --ignore-paths= --minimize
--no-auth-cache --username=
"
;;
reset,--*)
__gitcomp "--revision= --parent"
;;
*)
;;
esac
fi
}
_git_tag ()
{
local i c=1 f=0
while [ $c -lt $cword ]; do
i="${words[c]}"
case "$i" in
-d|-v)
__gitcomp_nl "$(__git_tags)"
return
;;
-f)
f=1
;;
esac
((c++))
done
case "$prev" in
-m|-F)
;;
-*|tag)
if [ $f = 1 ]; then
__gitcomp_nl "$(__git_tags)"
fi
;;
*)
__gitcomp_nl "$(__git_refs)"
;;
esac
case "$cur" in
--*)
__gitcomp "
--list --delete --verify --annotate --message --file
--sign --cleanup --local-user --force --column --sort
--contains --points-at
"
;;
esac
}
_git_whatchanged ()
{
_git_log
}
__git_main ()
{
local i c=1 command __git_dir
while [ $c -lt $cword ]; do
i="${words[c]}"
case "$i" in
--git-dir=*) __git_dir="${i#--git-dir=}" ;;
--git-dir) ((c++)) ; __git_dir="${words[c]}" ;;
--bare) __git_dir="." ;;
--help) command="help"; break ;;
-c|--work-tree|--namespace) ((c++)) ;;
-*) ;;
*) command="$i"; break ;;
esac
((c++))
done
if [ -z "$command" ]; then
case "$cur" in
--*) __gitcomp "
--paginate
--no-pager
--git-dir=
--bare
--version
--exec-path
--exec-path=
--html-path
--man-path
--info-path
--work-tree=
--namespace=
--no-replace-objects
--help
"
;;
*) __git_compute_porcelain_commands
__gitcomp "$__git_porcelain_commands $(__git_aliases)" ;;
esac
return
fi
local completion_func="_git_${command//-/_}"
declare -f $completion_func >/dev/null && $completion_func && return
local expansion=$(__git_aliased_command "$command")
if [ -n "$expansion" ]; then
words[1]=$expansion
completion_func="_git_${expansion//-/_}"
declare -f $completion_func >/dev/null && $completion_func
fi
}
__gitk_main ()
{
__git_has_doubledash && return
local g="$(__gitdir)"
local merge=""
if [ -f "$g/MERGE_HEAD" ]; then
merge="--merge"
fi
case "$cur" in
--*)
__gitcomp "
$__git_log_common_options
$__git_log_gitk_options
$merge
"
return
;;
esac
__git_complete_revlist
}
if [[ -n ${ZSH_VERSION-} ]]; then
echo "WARNING: this script is deprecated, please see git-completion.zsh" 1>&2
autoload -U +X compinit && compinit
__gitcomp ()
{
emulate -L zsh
local cur_="${3-$cur}"
case "$cur_" in
--*=)
;;
*)
local c IFS=$' \t\n'
local -a array
for c in ${=1}; do
c="$c${4-}"
case $c in
--*=*|*.) ;;
*) c="$c " ;;
esac
array[${#array[@]}+1]="$c"
done
compset -P '*[=:]'
compadd -Q -S '' -p "${2-}" -a -- array && _ret=0
;;
esac
}
__gitcomp_nl ()
{
emulate -L zsh
local IFS=$'\n'
compset -P '*[=:]'
compadd -Q -S "${4- }" -p "${2-}" -- ${=1} && _ret=0
}
__gitcomp_file ()
{
emulate -L zsh
local IFS=$'\n'
compset -P '*[=:]'
compadd -Q -p "${2-}" -f -- ${=1} && _ret=0
}
_git ()
{
local _ret=1 cur cword prev
cur=${words[CURRENT]}
prev=${words[CURRENT-1]}
let cword=CURRENT-1
emulate ksh -c __${service}_main
let _ret && _default && _ret=0
return _ret
}
compdef _git git gitk
return
fi
__git_func_wrap ()
{
local cur words cword prev
_get_comp_words_by_ref -n =: cur words cword prev
$1
}
# Setup completion for certain functions defined above by setting common
# variables and workarounds.
# This is NOT a public function; use at your own risk.
__git_complete ()
{
local wrapper="__git_wrap${2}"
eval "$wrapper () { __git_func_wrap $2 ; }"
complete -o bashdefault -o default -o nospace -F $wrapper $1 2>/dev/null \
|| complete -o default -o nospace -F $wrapper $1
}
# wrapper for backwards compatibility
_git ()
{
__git_wrap__git_main
}
# wrapper for backwards compatibility
_gitk ()
{
__git_wrap__gitk_main
}
__git_complete git __git_main
__git_complete gitk __gitk_main
# The following are necessary only for Cygwin, and only are needed
# when the user has tab-completed the executable name and consequently
# included the '.exe' suffix.
#
if [ Cygwin = "$(uname -o 2>/dev/null)" ]; then
__git_complete git.exe __git_main
fi
|
KunalGautam/shell-scripts
|
custom-bashrc-config/.git-completion.bash
|
Shell
|
gpl-3.0
| 56,670 |
#!/bin/bash
original_dir=`pwd`
script=`dirname $0`
cd $script
source params
export script=`pwd`
cd $original_dir
curl -v -X 'PUT' -H "$VERSION" -H 'Content-Type: application/xml' -E $CERT -d@$1 $ENDPOINT/$SUBSCRIPTION/services/extensions?action=update
|
Azure/azure-linux-extensions
|
registration-scripts/api/update-extension.sh
|
Shell
|
apache-2.0
| 253 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies the dSYM of a vendored framework
install_dsym() {
local source="$1"
if [ -r "$source" ]; then
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DWARF_DSYM_FOLDER_PATH}"
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/FBSnapshotTestCase/FBSnapshotTestCase.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/FBSnapshotTestCase/FBSnapshotTestCase.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
fonkadelic/Macoun
|
Pods/Target Support Files/Pods-WoidKonfTests/Pods-WoidKonfTests-frameworks.sh
|
Shell
|
mit
| 4,702 |
#!/bin/sh
# ensure that ls does not stat a symlink in an unusual case
# Copyright (C) 2007-2015 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ ls
require_strace_ stat
touch x || framework_failure_
chmod a+x x || framework_failure_
ln -s x link-to-x || framework_failure_
# ls from coreutils 6.9 would unnecessarily stat a symlink in an unusual case:
# When not coloring orphan and missing entries, and without ln=target,
# ensure that ls -F (or -d, or -l: i.e., when not dereferencing)
# does not stat a symlink to directory, and does still color that
# symlink and an executable file properly.
LS_COLORS='or=0:mi=0:ex=01;32:ln=01;35' \
strace -qe stat ls -F --color=always x link-to-x > out.tmp 2> err || fail
# Elide info messages strace can send to stdout of the form:
# [ Process PID=1234 runs in 32 bit mode. ]
sed '/Process PID=/d' out.tmp > out
# With coreutils 6.9 and earlier, this file would contain a
# line showing ls had called stat on "x".
grep '^stat("x"' err && fail=1
# Check that output is colorized, as requested, too.
{
printf '\033[0m\033[01;35mlink-to-x\033[0m@\n'
printf '\033[01;32mx\033[0m*\n'
} > exp || fail=1
compare exp out || fail=1
Exit $fail
|
projectgnu/coreutils
|
tests/ls/stat-free-symlinks.sh
|
Shell
|
gpl-3.0
| 1,871 |
#! /bin/bash
cd gdc_2/Gdc2/
if [[ ${target_platform} =~ linux.* ]]; then
makefile=makefile.linux
elif [[ ${target_platform} =~ osx.* ]]; then
makefile=makefile.mac
else
echo "operating system not found or not supported"
exit 1
fi
# Object files/libraries in wrong order => can't use --as-needed.
# (and clange does not seem to support --no-as-needed).
export LDFLAGS="${LDFLAGS//-Wl,--as-needed/}"
make \
-f "${makefile}" \
CC="${CXX} ${CXXFLAGS} ${CPPFLAGS} ${LDFLAGS}"
install -d "${PREFIX}/bin"
install gdc2 "${PREFIX}/bin/"
|
cokelaer/bioconda-recipes
|
recipes/gdc/build.sh
|
Shell
|
mit
| 553 |
#!/bin/bash
DIR=/lib/modules/`uname -r`
for i in `find $DIR/kernel -type f -name "*.ko" | grep -v test_nx | grep -v ring_buffer_benchmark`
do
MOD=`basename $i | sed s/\.ko//`
echo Loading $MOD
/sbin/modprobe $MOD
done
|
rantala/trinity
|
scripts/load-all-modules.sh
|
Shell
|
gpl-2.0
| 244 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
# shellcheck source=./config-common.sh
source "${KUBE_ROOT}/cluster/photon-controller/config-common.sh"
# shellcheck source=./config-default.sh
source "${KUBE_ROOT}/cluster/photon-controller/${KUBE_CONFIG_FILE-"config-default.sh"}"
# shellcheck source=../common.sh
source "${KUBE_ROOT}/cluster/common.sh"
readonly PHOTON="photon -n"
# Naming scheme for VMs (masters & nodes)
readonly MASTER_NAME="${INSTANCE_PREFIX}-master"
# shell check claims this doesn't work because you can't use a variable in a brace
# range. It does work because we're calling eval.
# shellcheck disable=SC2051
readonly NODE_NAMES=($(eval echo "${INSTANCE_PREFIX}"-node-{1.."${NUM_NODES}"}))
#####################################################################
#
# Public API
#
#####################################################################
#
# detect-master will query Photon Controller for the Kubernetes master.
# It assumes that the VM name for the master is unique.
# It will set KUBE_MASTER_ID to be the VM ID of the master
# It will set KUBE_MASTER_IP to be the IP address of the master
# If the silent parameter is passed, it will not print when the master
# is found: this is used internally just to find the MASTER
#
function detect-master {
local silent=${1:-""}
local tenant_args="--tenant ${PHOTON_TENANT} --project ${PHOTON_PROJECT}"
KUBE_MASTER=${MASTER_NAME}
KUBE_MASTER_ID=${KUBE_MASTER_ID:-""}
KUBE_MASTER_IP=${KUBE_MASTER_IP:-""}
# We don't want silent failure: we check for failure
set +o pipefail
if [[ -z ${KUBE_MASTER_ID} ]]; then
KUBE_MASTER_ID=$(${PHOTON} vm list ${tenant_args} | grep $'\t'"kubernetes-master"$'\t' | awk '{print $1}')
fi
if [[ -z ${KUBE_MASTER_ID} ]]; then
kube::log::error "Could not find Kubernetes master node ID. Make sure you've launched a cluster with kube-up.sh"
exit 1
fi
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
# Make sure to ignore lines where it's not attached to a portgroup
# Make sure to ignore lines that have a network interface but no address
KUBE_MASTER_IP=$(${PHOTON} vm networks "${KUBE_MASTER_ID}" | grep -v "^-" | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1 | awk -F'\t' '{print $3}')
fi
if [[ -z "${KUBE_MASTER_IP-}" ]]; then
kube::log::error "Could not find Kubernetes master node IP. Make sure you've launched a cluster with 'kube-up.sh'" >&2
exit 1
fi
if [[ -z ${silent} ]]; then
kube::log::status "Master: $KUBE_MASTER ($KUBE_MASTER_IP)"
fi
# Reset default set in common.sh
set -o pipefail
}
#
# detect-nodes will query Photon Controller for the Kubernetes nodes
# It assumes that the VM name for the nodes are unique.
# It assumes that NODE_NAMES has been set
# It will set KUBE_NODE_IP_ADDRESSES to be the VM IPs of the nodes
# It will set the KUBE_NODE_IDS to be the VM IDs of the nodes
# If the silent parameter is passed, it will not print when the nodes
# are found: this is used internally just to find the MASTER
#
function detect-nodes {
local silent=${1:-""}
local failure=0
local tenant_args="--tenant ${PHOTON_TENANT} --project ${PHOTON_PROJECT}"
KUBE_NODE_IP_ADDRESSES=()
KUBE_NODE_IDS=()
# We don't want silent failure: we check for failure
set +o pipefail
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
local node_id
node_id=$(${PHOTON} vm list ${tenant_args} | grep $'\t'"${NODE_NAMES[${i}]}"$'\t' | awk '{print $1}')
if [[ -z ${node_id} ]]; then
kube::log::error "Could not find ${NODE_NAMES[${i}]}"
failure=1
fi
KUBE_NODE_IDS+=("${node_id}")
# Make sure to ignore lines where it's not attached to a portgroup
# Make sure to ignore lines that have a network interface but no address
node_ip=$(${PHOTON} vm networks "${node_id}" | grep -v "^-" | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1 | awk -F'\t' '{print $3}')
KUBE_NODE_IP_ADDRESSES+=("${node_ip}")
if [[ -z ${silent} ]]; then
kube::log::status "Node: ${NODE_NAMES[${i}]} (${KUBE_NODE_IP_ADDRESSES[${i}]})"
fi
done
if [[ ${failure} -ne 0 ]]; then
exit 1
fi
# Reset default set in common.sh
set -o pipefail
}
# Get node names if they are not static.
function detect-node-names {
echo "TODO: detect-node-names" 1>&2
}
#
# Verifies that this computer has sufficient software installed
# so that it can run the rest of the script.
#
function verify-prereqs {
verify-cmd-in-path photon
verify-cmd-in-path ssh
verify-cmd-in-path scp
verify-cmd-in-path ssh-add
verify-cmd-in-path openssl
verify-cmd-in-path mkisofs
}
#
# The entry point for bringing up a Kubernetes cluster
#
function kube-up {
verify-prereqs
verify-ssh-prereqs
verify-photon-config
ensure-temp-dir
find-release-tars
find-image-id
load-or-gen-kube-basicauth
gen-cloud-init-iso
gen-master-start
create-master-vm
install-salt-on-master
gen-node-start
install-salt-on-nodes
detect-nodes -s
install-kubernetes-on-master
install-kubernetes-on-nodes
wait-master-api
wait-node-apis
setup-pod-routes
copy-kube-certs
kube::log::status "Creating kubeconfig..."
create-kubeconfig
}
# Delete a kubernetes cluster
function kube-down {
detect-master
detect-nodes
pc-delete-vm "${KUBE_MASTER}" "${KUBE_MASTER_ID}"
for (( node=0; node<${#KUBE_NODE_IDS[@]}; node++)); do
pc-delete-vm "${NODE_NAMES[${node}]}" "${KUBE_NODE_IDS[${node}]}"
done
}
# Update a kubernetes cluster
function kube-push {
echo "TODO: kube-push" 1>&2
}
# Prepare update a kubernetes component
function prepare-push {
echo "TODO: prepare-push" 1>&2
}
# Update a kubernetes master
function push-master {
echo "TODO: push-master" 1>&2
}
# Update a kubernetes node
function push-node {
echo "TODO: push-node" 1>&2
}
# Execute prior to running tests to build a release if required for env
function test-build-release {
echo "TODO: test-build-release" 1>&2
}
# Execute prior to running tests to initialize required structure
function test-setup {
echo "TODO: test-setup" 1>&2
}
# Execute after running tests to perform any required clean-up
function test-teardown {
echo "TODO: test-teardown" 1>&2
}
#####################################################################
#
# Internal functions
#
#####################################################################
#
# Uses Photon Controller to make a VM
# Takes two parameters:
# - The name of the VM (Assumed to be unique)
# - The name of the flavor to create the VM (Assumed to be unique)
#
# It assumes that the variables in config-common.sh (PHOTON_TENANT, etc)
# are set correctly.
#
# It also assumes the cloud-init ISO has been generated
#
# When it completes, it sets two environment variables for use by the
# caller: _VM_ID (the ID of the created VM) and _VM_IP (the IP address
# of the created VM)
#
function pc-create-vm {
local vm_name="${1}"
local vm_flavor="${2}"
local rc=0
local i=0
# Create the VM
local tenant_args="--tenant ${PHOTON_TENANT} --project ${PHOTON_PROJECT}"
local vm_args="--name ${vm_name} --image ${PHOTON_IMAGE_ID} --flavor ${vm_flavor}"
local disk_args="disk-1 ${PHOTON_DISK_FLAVOR} boot=true"
rc=0
_VM_ID=$(${PHOTON} vm create ${tenant_args} ${vm_args} --disks "${disk_args}" 2>&1) || rc=$?
if [[ ${rc} -ne 0 ]]; then
kube::log::error "Failed to create VM. Error output:"
echo "${_VM_ID}"
exit 1
fi
kube::log::status "Created VM ${vm_name}: ${_VM_ID}"
# Start the VM
# Note that the VM has cloud-init in it, and we attach an ISO that
# contains a user-data.txt file for cloud-init. When the VM starts,
# cloud-init will temporarily mount the ISO and configure the VM
# Our user-data will configure the 'kube' user and set up the ssh
# authorized keys to allow us to ssh to the VM and do further work.
run-cmd "${PHOTON} vm attach-iso -p ${KUBE_TEMP}/cloud-init.iso ${_VM_ID}"
run-cmd "${PHOTON} vm start ${_VM_ID}"
kube::log::status "Started VM ${vm_name}, waiting for network address..."
# Wait for the VM to be started and connected to the network
have_network=0
for i in $(seq 120); do
# photon -n vm networks print several fields:
# NETWORK MAC IP GATEWAY CONNECTED?
# We wait until CONNECTED is True
rc=0
networks=$(${PHOTON} vm networks "${_VM_ID}") || rc=$?
if [[ ${rc} -ne 0 ]]; then
kube::log::error "'${PHOTON} vm networks ${_VM_ID}' failed. Error output: "
echo "${networks}"
fi
networks=$(echo "${networks}" | grep True) || rc=$?
if [[ ${rc} -eq 0 ]]; then
have_network=1
break;
fi
sleep 1
done
# Fail if the VM didn't come up
if [[ ${have_network} -eq 0 ]]; then
kube::log::error "VM ${vm_name} failed to start up: no IP was found"
exit 1
fi
# Find the IP address of the VM
_VM_IP=$(${PHOTON} vm networks "${_VM_ID}" | head -1 | awk -F'\t' '{print $3}')
kube::log::status "VM ${vm_name} has IP: ${_VM_IP}"
}
#
# Delete one of our VMs
# If it is STARTED, it will be stopped first.
#
function pc-delete-vm {
local vm_name="${1}"
local vm_id="${2}"
local rc=0
kube::log::status "Deleting VM ${vm_name}"
${PHOTON} vm show "${vm_id}" | head -1 | grep STARTED > /dev/null 2>&1 || rc=$?
if [[ ${rc} -eq 0 ]]; then
${PHOTON} vm stop "${vm_id}" > /dev/null 2>&1 || rc=$?
if [[ ${rc} -ne 0 ]]; then
kube::log::error "Error: could not stop ${vm_name} ($vm_id)"
kube::log::error "Please investigate and stop manually"
return
fi
fi
rc=0
${PHOTON} vm delete "${vm_id}" > /dev/null 2>&1 || rc=$?
if [[ ${rc} -ne 0 ]]; then
kube::log::error "Error: could not delete ${vm_name} ($vm_id)"
kube::log::error "Please investigate and delete manually"
fi
}
#
# Looks for the image named PHOTON_IMAGE
# Sets PHOTON_IMAGE_ID to be the id of that image.
# We currently assume there is exactly one image with name
#
function find-image-id {
local rc=0
PHOTON_IMAGE_ID=$(${PHOTON} image list | grep $'\t'"${PHOTON_IMAGE}"$'\t' | head -1 | grep READY | awk -F'\t' '{print $1}')
if [[ ${rc} -ne 0 ]]; then
kube::log::error "Cannot find image \"${PHOTON_IMAGE}\""
fail=1
fi
}
#
# Generate an ISO with a single file called user-data.txt
# This ISO will be used to configure cloud-init (which is already
# on the VM). We will tell cloud-init to create the kube user/group
# and give ourselves the ability to ssh to the VM with ssh. We also
# allow people to ssh with the same password that was randomly
# generated for access to Kubernetes as a backup method.
#
# Assumes environment variables:
# - VM_USER
# - KUBE_PASSWORD (randomly generated password)
#
function gen-cloud-init-iso {
local password_hash
password_hash=$(openssl passwd -1 "${KUBE_PASSWORD}")
local ssh_key
ssh_key=$(ssh-add -L | head -1)
# Make the user-data file that will be used by cloud-init
(
echo "#cloud-config"
echo ""
echo "groups:"
echo " - ${VM_USER}"
echo ""
echo "users:"
echo " - name: ${VM_USER}"
echo " gecos: Kubernetes"
echo " primary-group: ${VM_USER}"
echo " lock-passwd: false"
echo " passwd: ${password_hash}"
echo " ssh-authorized-keys: "
echo " - ${ssh_key}"
echo " sudo: ALL=(ALL) NOPASSWD:ALL"
echo " shell: /bin/bash"
echo ""
echo "hostname:"
echo " - hostname: kube"
) > "${KUBE_TEMP}/user-data.txt"
# Make the ISO that will contain the user-data
# The -rock option means that we'll generate real filenames (long and with case)
run-cmd "mkisofs -rock -o ${KUBE_TEMP}/cloud-init.iso ${KUBE_TEMP}/user-data.txt"
}
#
# Generate a script used to install salt on the master
# It is placed into $KUBE_TEMP/master-start.sh
#
function gen-master-start {
python "${KUBE_ROOT}/third_party/htpasswd/htpasswd.py" \
-b -c "${KUBE_TEMP}/htpasswd" "${KUBE_USER}" "${KUBE_PASSWORD}"
local htpasswd
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
# This calculation of the service IP should work, but if you choose an
# alternate subnet, there's a small chance you'd need to modify the
# service_ip, below. We'll choose an IP like 10.244.240.1 by taking
# the first three octets of the SERVICE_CLUSTER_IP_RANGE and tacking
# on a .1
local octets
local service_ip
octets=($(echo "${SERVICE_CLUSTER_IP_RANGE}" | sed -e 's|/.*||' -e 's/\./ /g'))
((octets[3]+=1))
service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
MASTER_EXTRA_SANS="IP:${service_ip},DNS:${MASTER_NAME},${MASTER_EXTRA_SANS}"
(
echo "#! /bin/bash"
echo "readonly MY_NAME=${MASTER_NAME}"
grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/hostname.sh"
echo "cd /home/kube/cache/kubernetes-install"
echo "readonly MASTER_NAME='${MASTER_NAME}'"
echo "readonly MASTER_IP_RANGE='${MASTER_IP_RANGE}'"
echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-node'"
echo "readonly NODE_IP_RANGES='${NODE_IP_RANGES}'"
echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
echo "readonly ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
echo "readonly ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI:-false}'"
echo "readonly DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
echo "readonly DNS_DOMAIN='${DNS_DOMAIN:-}'"
echo "readonly KUBE_USER='${KUBE_USER:-}'"
echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD:-}'"
echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'"
echo "readonly SALT_TAR='${SALT_TAR##*/}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'"
echo "readonly MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/install-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/salt-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
}
#
# Generate the scripts for each node to install salt
#
function gen-node-start {
local i
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
(
echo "#! /bin/bash"
echo "readonly MY_NAME=${NODE_NAMES[${i}]}"
grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/hostname.sh"
echo "KUBE_MASTER=${KUBE_MASTER}"
echo "KUBE_MASTER_IP=${KUBE_MASTER_IP}"
echo "NODE_IP_RANGE=$NODE_IP_RANGES"
grep -v "^#" "${KUBE_ROOT}/cluster/photon-controller/templates/salt-minion.sh"
) > "${KUBE_TEMP}/node-start-${i}.sh"
done
}
#
# Create a script that will run on the Kubernetes master and will run salt
# to configure the master. We make it a script instead of just running a
# single ssh command so that we can get logging.
#
function gen-master-salt {
gen-salt "kubernetes-master"
}
#
# Create scripts that will be run on the Kubernetes master. Each of these
# will invoke salt to configure one of the nodes
#
function gen-node-salt {
local i
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
gen-salt "${NODE_NAMES[${i}]}"
done
}
#
# Shared implementation for gen-master-salt and gen-node-salt
# Writes a script that installs Kubernetes with salt
# The core of the script is simple (run 'salt ... state.highstate')
# We also do a bit of logging so we can debug problems
#
# There is also a funky workaround for an issue with docker 1.9
# (elsewhere we peg ourselves to docker 1.9). It's fixed in 1.10,
# so we should be able to remove it in the future
# https://github.com/docker/docker/issues/18113
# The problem is that sometimes the install (with apt-get) of
# docker fails. Deleting a file and retrying fixes it.
#
# Tell shellcheck to ignore our variables within single quotes:
# We're writing a script, not executing it, so this is normal
# shellcheck disable=SC2016
function gen-salt {
node_name=${1}
(
echo '#!/bin/bash'
echo ''
echo "node=${node_name}"
echo 'out=/tmp/${node}-salt.out'
echo 'log=/tmp/${node}-salt.log'
echo ''
echo 'echo $(date) >> $log'
echo 'salt ${node} state.highstate -t 30 --no-color > ${out}'
echo 'grep -E "Failed:[[:space:]]+0" ${out}'
echo 'success=$?'
echo 'cat ${out} >> ${log}'
echo ''
echo 'if [[ ${success} -ne 0 ]]; then'
echo ' # Did we try to install docker-engine?'
echo ' attempted=$(grep docker-engine ${out} | wc -l)'
echo ' # Is docker-engine installed?'
echo ' installed=$(salt --output=txt ${node} pkg.version docker-engine | wc -l)'
echo ' if [[ ${attempted} -ne 0 && ${installed} -eq 0 ]]; then'
echo ' echo "Unwedging docker-engine install" >> ${log}'
echo ' salt ${node} cmd.run "rm -f /var/lib/docker/network/files/local-kv.db"'
echo ' fi'
echo 'fi'
echo 'exit ${success}'
) > "${KUBE_TEMP}/${node_name}-salt.sh"
}
#
# Create the Kubernetes master VM
# Sets global variables:
# - KUBE_MASTER (Name)
# - KUBE_MASTER_ID (Photon VM ID)
# - KUBE_MASTER_IP (IP address)
#
function create-master-vm {
kube::log::status "Starting master VM..."
pc-create-vm "${MASTER_NAME}" "${PHOTON_MASTER_FLAVOR}"
KUBE_MASTER=${MASTER_NAME}
KUBE_MASTER_ID=${_VM_ID}
KUBE_MASTER_IP=${_VM_IP}
}
#
# Install salt on the Kubernetes master
# Relies on the master-start.sh script created in gen-master-start
#
function install-salt-on-master {
kube::log::status "Installing salt on master..."
upload-server-tars "${MASTER_NAME}" "${KUBE_MASTER_IP}"
run-script-remotely "${KUBE_MASTER_IP}" "${KUBE_TEMP}/master-start.sh"
}
#
# Installs salt on Kubernetes nodes in parallel
# Relies on the node-start script created in gen-node-start
#
function install-salt-on-nodes {
kube::log::status "Creating nodes and installing salt on them..."
# Start each of the VMs in parallel
# In the future, we'll batch this because it doesn't scale well
# past 10 or 20 nodes
local node
for (( node=0; node<${#NODE_NAMES[@]}; node++)); do
(
pc-create-vm "${NODE_NAMES[${node}]}" "${PHOTON_NODE_FLAVOR}"
run-script-remotely "${_VM_IP}" "${KUBE_TEMP}/node-start-${node}.sh"
) &
done
# Wait for the node VM startups to complete
local fail=0
local job
for job in $(jobs -p); do
wait "${job}" || fail=$((fail + 1))
done
if (( fail != 0 )); then
kube::log::error "Failed to start ${fail}/${NUM_NODES} nodes"
exit 1
fi
}
#
# Install Kubernetes on the master.
# This uses the kubernetes-master-salt.sh script created by gen-master-salt
# That script uses salt to install Kubernetes
#
function install-kubernetes-on-master {
# Wait until salt-master is running: it may take a bit
try-until-success-ssh "${KUBE_MASTER_IP}" \
"Waiting for salt-master to start on ${KUBE_MASTER}" \
"pgrep salt-master"
gen-master-salt
copy-file-to-vm "${_VM_IP}" "${KUBE_TEMP}/kubernetes-master-salt.sh" "/tmp/kubernetes-master-salt.sh"
try-until-success-ssh "${KUBE_MASTER_IP}" \
"Installing Kubernetes on ${KUBE_MASTER} via salt" \
"sudo /bin/bash /tmp/kubernetes-master-salt.sh"
}
#
# Install Kubernetes on the the nodes in parallel
# This uses the kubernetes-master-salt.sh script created by gen-node-salt
# That script uses salt to install Kubernetes
#
function install-kubernetes-on-nodes {
gen-node-salt
# Run in parallel to bring up the cluster faster
# TODO: Batch this so that we run up to N in parallel, so
# we don't overload this machine or the salt master
local node
for (( node=0; node<${#NODE_NAMES[@]}; node++)); do
(
copy-file-to-vm "${_VM_IP}" "${KUBE_TEMP}/${NODE_NAMES[${node}]}-salt.sh" "/tmp/${NODE_NAMES[${node}]}-salt.sh"
try-until-success-ssh "${KUBE_NODE_IP_ADDRESSES[${node}]}" \
"Waiting for salt-master to start on ${NODE_NAMES[${node}]}" \
"pgrep salt-minion"
try-until-success-ssh "${KUBE_MASTER_IP}" \
"Installing Kubernetes on ${NODE_NAMES[${node}]} via salt" \
"sudo /bin/bash /tmp/${NODE_NAMES[${node}]}-salt.sh"
) &
done
# Wait for the Kubernetes installations to complete
local fail=0
local job
for job in $(jobs -p); do
wait "${job}" || fail=$((fail + 1))
done
if (( fail != 0 )); then
kube::log::error "Failed to start install Kubernetes on ${fail} out of ${NUM_NODES} nodess"
exit 1
fi
}
#
# Upload the Kubernetes tarballs to the master
#
function upload-server-tars {
vm_name=${1}
vm_ip=${2}
run-ssh-cmd "${vm_ip}" "mkdir -p /home/kube/cache/kubernetes-install"
local tar
for tar in "${SERVER_BINARY_TAR}" "${SALT_TAR}"; do
local base_tar
base_tar=$(basename "${tar}")
kube::log::status "Uploading ${base_tar} to ${vm_name}..."
copy-file-to-vm "${vm_ip}" "${tar}" "/home/kube/cache/kubernetes-install/${tar##*/}"
done
}
#
# Wait for the Kubernets healthz API to be responsive on the master
#
function wait-master-api {
local curl_creds="--insecure --user ${KUBE_USER}:${KUBE_PASSWORD}"
local curl_output="--fail --output /dev/null --silent"
local curl_net="--max-time 1"
try-until-success "Waiting for Kubernetes API on ${KUBE_MASTER}" \
"curl ${curl_creds} ${curl_output} ${curl_net} https://${KUBE_MASTER_IP}/healthz"
}
#
# Wait for the Kubernetes healthz API to be responsive on each node
#
function wait-node-apis {
local curl_output="--fail --output /dev/null --silent"
local curl_net="--max-time 1"
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
try-until-success "Waiting for Kubernetes API on ${NODE_NAMES[${i}]}..." \
"curl ${curl_output} ${curl_net} http://${KUBE_NODE_IP_ADDRESSES[${i}]}:10250/healthz"
done
}
#
# Configure the nodes so the pods can communicate
# Each node will have a bridge named cbr0 for the NODE_IP_RANGES
# defined in config-default.sh. This finds the IP subnet (assigned
# by Kubernetes) to nodes and configures routes so they can communicate
#
# Also configure the master to be able to talk to the nodes. This is
# useful so that you can get to the UI from the master.
#
function setup-pod-routes {
local node
KUBE_NODE_BRIDGE_NETWORK=()
for (( node=0; node<${#NODE_NAMES[@]}; node++)); do
# This happens in two steps (wait for an address, wait for a non 172.x.x.x address)
# because it's both simpler and more clear what's happening.
try-until-success-ssh "${KUBE_NODE_IP_ADDRESSES[${node}]}" \
"Waiting for cbr0 bridge on ${NODE_NAMES[${node}]} to have an address" \
'sudo ifconfig cbr0 | grep -oP "inet addr:\K\S+"'
try-until-success-ssh "${KUBE_NODE_IP_ADDRESSES[${node}]}" \
"Waiting for cbr0 bridge on ${NODE_NAMES[${node}]} to have correct address" \
'sudo ifconfig cbr0 | grep -oP "inet addr:\K\S+" | grep -v "^172."'
run-ssh-cmd "${KUBE_NODE_IP_ADDRESSES[${node}]}" 'sudo ip route show | grep -E "dev cbr0" | cut -d " " -f1'
KUBE_NODE_BRIDGE_NETWORK+=(${_OUTPUT})
kube::log::status "cbr0 on ${NODE_NAMES[${node}]} is ${_OUTPUT}"
done
local i
local j
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
kube::log::status "Configuring pod routes on ${NODE_NAMES[${i}]}..."
run-ssh-cmd "${KUBE_MASTER_IP}" "sudo route add -net ${KUBE_NODE_BRIDGE_NETWORK[${i}]} gw ${KUBE_NODE_IP_ADDRESSES[${i}]}"
for (( j=0; j<${#NODE_NAMES[@]}; j++)); do
if [[ "${i}" != "${j}" ]]; then
run-ssh-cmd "${KUBE_NODE_IP_ADDRESSES[${i}]}" "sudo route add -net ${KUBE_NODE_BRIDGE_NETWORK[${j}]} gw ${KUBE_NODE_IP_ADDRESSES[${j}]}"
fi
done
done
}
#
# Copy the certificate/key from the Kubernetes master
# These are used to create the kubeconfig file, which allows
# users to use kubectl easily
#
# We also set KUBE_CERT, KUBE_KEY, CA_CERT, and CONTEXT because they
# are needed by create-kubeconfig from common.sh to generate
# the kube config file.
#
function copy-kube-certs {
local cert="kubecfg.crt"
local key="kubecfg.key"
local ca="ca.crt"
local cert_dir="/srv/kubernetes"
kube::log::status "Copying credentials from ${KUBE_MASTER}"
# Set global environment variables: needed by create-kubeconfig
# in common.sh
export KUBE_CERT="${KUBE_TEMP}/${cert}"
export KUBE_KEY="${KUBE_TEMP}/${key}"
export CA_CERT="${KUBE_TEMP}/${ca}"
export CONTEXT="photon-${INSTANCE_PREFIX}"
run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 644 ${cert_dir}/${cert}"
run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 644 ${cert_dir}/${key}"
run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 644 ${cert_dir}/${ca}"
copy-file-from-vm "${KUBE_MASTER_IP}" "${cert_dir}/${cert}" "${KUBE_CERT}"
copy-file-from-vm "${KUBE_MASTER_IP}" "${cert_dir}/${key}" "${KUBE_KEY}"
copy-file-from-vm "${KUBE_MASTER_IP}" "${cert_dir}/${ca}" "${CA_CERT}"
run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 600 ${cert_dir}/${cert}"
run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 600 ${cert_dir}/${key}"
run-ssh-cmd "${KUBE_MASTER_IP}" "sudo chmod 600 ${cert_dir}/${ca}"
}
#
# Copies a script to a VM and runs it
# Parameters:
# - IP of VM
# - Path to local file
#
function run-script-remotely {
local vm_ip=${1}
local local_file="${2}"
local base_file
local remote_file
base_file=$(basename "${local_file}")
remote_file="/tmp/${base_file}"
copy-file-to-vm "${vm_ip}" "${local_file}" "${remote_file}"
run-ssh-cmd "${vm_ip}" "chmod 700 ${remote_file}"
run-ssh-cmd "${vm_ip}" "nohup sudo ${remote_file} < /dev/null 1> ${remote_file}.out 2>&1 &"
}
#
# Runs an command on a VM using ssh
# Parameters:
# - (optional) -i to ignore failure
# - IP address of the VM
# - Command to run
# Assumes environment variables:
# - VM_USER
# - SSH_OPTS
#
function run-ssh-cmd {
local ignore_failure=""
if [[ "${1}" = "-i" ]]; then
ignore_failure="-i"
shift
fi
local vm_ip=${1}
shift
local cmd=${1}
run-cmd ${ignore_failure} "ssh ${SSH_OPTS} $VM_USER@${vm_ip} $1"
}
#
# Uses scp to copy file to VM
# Parameters:
# - IP address of the VM
# - Path to local file
# - Path to remote file
# Assumes environment variables:
# - VM_USER
# - SSH_OPTS
#
function copy-file-to-vm {
local vm_ip=${1}
local local_file=${2}
local remote_file=${3}
run-cmd "scp ${SSH_OPTS} ${local_file} ${VM_USER}@${vm_ip}:${remote_file}"
}
function copy-file-from-vm {
local vm_ip=${1}
local remote_file=${2}
local local_file=${3}
run-cmd "scp ${SSH_OPTS} ${VM_USER}@${vm_ip}:${remote_file} ${local_file}"
}
#
# Run a command, print nice error output
# Used by copy-file-to-vm and run-ssh-cmd
#
function run-cmd {
local rc=0
local ignore_failure=""
if [[ "${1}" = "-i" ]]; then
ignore_failure=${1}
shift
fi
local cmd=$1
local output
output=$(${cmd} 2>&1) || rc=$?
if [[ ${rc} -ne 0 ]]; then
if [[ -z "${ignore_failure}" ]]; then
kube::log::error "Failed to run command: ${cmd} Output:"
echo "${output}"
exit 1
fi
fi
_OUTPUT=${output}
return ${rc}
}
#
# After the initial VM setup, we use SSH with keys to access the VMs
# This requires an SSH agent, so we verify that it's running
#
function verify-ssh-prereqs {
kube::log::status "Validating SSH configuration..."
local rc
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc=$?
# "Could not open a connection to your authentication agent."
if [[ "${rc}" -eq 2 ]]; then
# ssh agent wasn't running, so start it and ensure we stop it
eval "$(ssh-agent)" > /dev/null
trap-add "kill ${SSH_AGENT_PID}" EXIT
fi
rc=0
ssh-add -L 1> /dev/null 2> /dev/null || rc=$?
# "The agent has no identities."
if [[ "${rc}" -eq 1 ]]; then
# Try adding one of the default identities, with or without passphrase.
ssh-add || true
fi
# Expect at least one identity to be available.
if ! ssh-add -L 1> /dev/null 2> /dev/null; then
kube::log::error "Could not find or add an SSH identity."
kube::log::error "Please start ssh-agent, add your identity, and retry."
exit 1
fi
}
#
# Verify that Photon Controller has been configured in the way we expect. Specifically
# - Have the flavors been created?
# - Has the image been uploaded?
# TODO: Check the tenant and project as well.
function verify-photon-config {
kube::log::status "Validating Photon configuration..."
# We don't want silent failure: we check for failure
set +o pipefail
verify-photon-flavors
verify-photon-image
verify-photon-tenant
# Reset default set in common.sh
set -o pipefail
}
#
# Verify that the VM and disk flavors have been created
#
function verify-photon-flavors {
local rc=0
${PHOTON} flavor list | awk -F'\t' '{print $2}' | grep -q "^${PHOTON_MASTER_FLAVOR}$" > /dev/null 2>&1 || rc=$?
if [[ ${rc} -ne 0 ]]; then
kube::log::error "ERROR: Cannot find VM flavor named ${PHOTON_MASTER_FLAVOR}"
exit 1
fi
if [[ "${PHOTON_MASTER_FLAVOR}" != "${PHOTON_NODE_FLAVOR}" ]]; then
rc=0
${PHOTON} flavor list | awk -F'\t' '{print $2}' | grep -q "^${PHOTON_NODE_FLAVOR}$" > /dev/null 2>&1 || rc=$?
if [[ ${rc} -ne 0 ]]; then
kube::log::error "ERROR: Cannot find VM flavor named ${PHOTON_NODE_FLAVOR}"
exit 1
fi
fi
${PHOTON} flavor list | awk -F'\t' '{print $2}' | grep -q "^${PHOTON_DISK_FLAVOR}$" > /dev/null 2>&1 || rc=$?
if [[ ${rc} -ne 0 ]]; then
kube::log::error "ERROR: Cannot find disk flavor named ${PHOTON_DISK_FLAVOR}"
exit 1
fi
}
#
# Verify that we have the image we need, and it's not in error state or
# multiple copies
#
function verify-photon-image {
local rc
rc=0
${PHOTON} image list | grep -q $'\t'"${PHOTON_IMAGE}"$'\t' > /dev/null 2>&1 || rc=$?
if [[ ${rc} -ne 0 ]]; then
kube::log::error "ERROR: Cannot find image \"${PHOTON_IMAGE}\""
exit 1
fi
rc=0
${PHOTON} image list | grep $'\t'"${PHOTON_IMAGE}"$'\t' | grep ERROR > /dev/null 2>&1 || rc=$?
if [[ ${rc} -eq 0 ]]; then
echo "Warning: You have at least one ${PHOTON_IMAGE} image in the ERROR state. You may want to investigate."
echo "Images in the ERROR state will be ignored."
fi
rc=0
num_images=$(${PHOTON} image list | grep $'\t'"${PHOTON_IMAGE}"$'\t' | grep -c READY)
if [[ "${num_images}" -gt 1 ]]; then
echo "ERROR: You have more than one READY ${PHOTON_IMAGE} image. Ensure there is only one"
exit 1
fi
}
function verify-photon-tenant {
local rc
rc=0
${PHOTON} tenant list | grep -q $'\t'"${PHOTON_TENANT}" > /dev/null 2>&1 || rc=$?
if [[ ${rc} -ne 0 ]]; then
echo "ERROR: Cannot find tenant \"${PHOTON_TENANT}\""
exit 1
fi
${PHOTON} project list --tenant "${PHOTON_TENANT}" | grep -q $'\t'"${PHOTON_PROJECT}"$'\t' > /dev/null 2>&1 || rc=$?
if [[ ${rc} -ne 0 ]]; then
echo "ERROR: Cannot find project \"${PHOTON_PROJECT}\""
exit 1
fi
}
#
# Verifies that a given command is in the PATH
#
function verify-cmd-in-path {
cmd=${1}
which "${cmd}" >/dev/null || {
kube::log::error "Can't find ${cmd} in PATH, please install and retry."
exit 1
}
}
#
# Checks that KUBE_TEMP is set, or sets it
# If it sets it, it also creates the temporary directory
# and sets up a trap so that we delete it when we exit
#
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap-add "rm -rf '${KUBE_TEMP}'" EXIT
fi
}
#
# Repeatedly try a command over ssh until it succeeds or until five minutes have passed
# The timeout isn't exact, since we assume the command runs instantaneously, and
# it doesn't.
#
function try-until-success-ssh {
local vm_ip=${1}
local cmd_description=${2}
local cmd=${3}
local timeout=600
local sleep_time=5
local max_attempts
((max_attempts=timeout/sleep_time))
kube::log::status "${cmd_description} for up to 10 minutes..."
local attempt=0
while true; do
local rc=0
run-ssh-cmd -i "${vm_ip}" "${cmd}" || rc=1
if [[ ${rc} != 0 ]]; then
if (( attempt == max_attempts )); then
kube::log::error "Failed, cannot proceed: you may need to retry to log into the VM to debug"
exit 1
fi
else
break
fi
attempt=$((attempt+1))
sleep ${sleep_time}
done
}
function try-until-success {
local cmd_description=${1}
local cmd=${2}
local timeout=600
local sleep_time=5
local max_attempts
((max_attempts=timeout/sleep_time))
kube::log::status "${cmd_description} for up to 10 minutes..."
local attempt=0
while true; do
local rc=0
run-cmd -i "${cmd}" || rc=1
if [[ ${rc} != 0 ]]; then
if (( attempt == max_attempts )); then
kube::log::error "Failed, cannot proceed"
exit 1
fi
else
break
fi
attempt=$((attempt+1))
sleep ${sleep_time}
done
}
#
# Sets up a trap handler
#
function trap-add {
local handler="${1}"
local signal="${2-EXIT}"
local cur
cur="$(eval "sh -c 'echo \$3' -- $(trap -p ${signal})")"
if [[ -n "${cur}" ]]; then
handler="${cur}; ${handler}"
fi
# We want ${handler} to expand now, so tell shellcheck
# shellcheck disable=SC2064
trap "${handler}" ${signal}
}
|
vulpecula/kubernetes
|
cluster/photon-controller/util.sh
|
Shell
|
apache-2.0
| 33,914 |
#!/usr/bin/env bash
${IDRIS:-idris} $@ -o reg052 reg052.idr
./reg052
rm -f reg052 *.ibc
|
kojiromike/Idris-dev
|
test/reg052/run.sh
|
Shell
|
bsd-3-clause
| 88 |
#!/bin/bash
BIN="${PREFIX}/bin"
mv Gblocks "$BIN/"
chmod +x "$BIN/Gblocks"
|
matthdsm/bioconda-recipes
|
recipes/gblocks/build.sh
|
Shell
|
mit
| 75 |
#!/bin/bash
export CXXFLAGS="${CXXFLAGS} -std=c++14"
python -m pip install . --ignore-installed --no-deps -vv
|
cokelaer/bioconda-recipes
|
recipes/porechop/build.sh
|
Shell
|
mit
| 110 |
CPUPROFILE_REALTIME=1 CPUPROFILE_FREQUENCY=1000 bundle exec ruby perf/profile.rb
|
abhirao/mongoid
|
perf/profile.sh
|
Shell
|
mit
| 81 |
#!/bin/bash
# Script Name: AtoMiC Nginx Locations Configurator
echo
echo -e "${YELLOW}--->configuring Nginx Locations $ENDCOLOR"
# Copy any missing location files over but doesnt enable them. Enabled as required by each service)
for f in "$SCRIPTPATH"/utils/nginx/locations-available/*.conf; do
filename=$(basename $f)
if [[ ! -f /etc/nginx/locations-available/$filename ]]; then
if cp $f "/etc/nginx/locations-available/$filename" || \
{ echo -e "${RED}Could not move location file $filename over.$ENDCOLOR"; exit 1; }; then
echo "Location file $filename copied over"
LOCCHANGEREQ=1
fi
else
# Does the file need updating with a new version
NEWFILEVER=$(grep Version $f | cut -d= -f2)
EXISTINGFILEVER=$(grep Version /etc/nginx/locations-available/$filename | cut -d= -f2)
vercomp "$EXISTINGFILEVER" "$NEWFILEVER"
if [[ $? = 2 ]]; then
if cp $f "/etc/nginx/locations-available/$filename" || \
{ echo -e "${RED}Could not move location file $filename over.$ENDCOLOR"; exit 1; }; then
echo "Location file $filename copied over"
LOCCHANGEREQ=1
fi
fi
# Reset the variables to be safe.
NEWFILEVER=''
EXISTINGFILEVER=''
fi
done
if [[ -z $LOCCHANGEREQ ]]; then
echo 'No changes required'
LOCCHANGEREQ=''
fi
|
htpcBeginner/AtoMiC-ToolKit
|
utils/nginx/nginx-locations-configurator.sh
|
Shell
|
mit
| 1,421 |
#!/bin/bash
# Add the user
if [[ ! $(id jenkins) ]]; then
useradd jenkins
usermod -a -G sudo jenkins
fi
# Get the package if we don't have the folder ready to go
if [[ ! -e /home/jenkins/jenkins ]]; then
su -l jenkins -c 'wget "https://bintray.com/artifact/download/davidk01/generic/stashbot-unplugged.tar.gz"'
su -l jenkins -c 'tar xf stashbot-unplugged.tar.gz'
su -l jenkins -c 'mkdir bin'
su -l jenkins -c 'cd jenkins && cp jq ~/bin'
su -l jenkins -c 'cd jenkins; export JENKINS_HOME=/home/jenkins/jenkins; nohup java -jar jenkins.war --httpPort=4442 &'
fi
# Make sure it is started in case there is a restart and Jenkins isn't running
if [[ ! $(ps aux | grep jenkins | grep -v grep) ]]; then
su -l jenkins -c 'cd jenkins; export JENKINS_HOME=/home/jenkins/jenkins; nohup java -jar jenkins.war --httpPort=4442 &'
fi
|
davidk01/stashbot-unplugged
|
puppet-module/stashbot_unplugged/files/installer.sh
|
Shell
|
mit
| 835 |
#!/bin/sh -ex
VERSION=`echo $TRAVIS_TAG | sed -e "s/v//g"`
echo "*** $VERSION deploy start ***"
goxc \
-arch="386 amd64" \
-os="linux darwin" \
-+tasks=clean,compile,archive \
-o="{{.Dest}}{{.PS}}{{.Version}}{{.PS}}packer-provisioner-mitamae-{{.Os}}-{{.Arch}}{{.Ext}}" \
-resources-exclude=".go,LICENSE,README.md" \
-pv=$VERSION \
publish-github \
-owner=hatappi \
-repository=packer-provisioner-mitamae \
-apikey=$GITHUB_TOKEN \
-include="*"
echo "*** $VERSION deploy end ***"
|
hatappi/packer-provisioner-mitamae
|
script/travisci/deploy.sh
|
Shell
|
mit
| 521 |
#!/bin/bash
#
# Report changed WordPress website files.
#
# It is an hourly cron job.
declare -r DOCUMENT_ROOT="${HOME}/website/code/"
Tripwire_fake() {
# Core
nice /usr/local/bin/wp --no-debug --quiet core verify-checksums 2>&1 \
|| echo "ERROR: '$(/usr/local/bin/wp --no-debug option get blogname)' Core files"
# Theme (child theme)
THEME="$(/usr/local/bin/wp --no-debug --quiet eval 'echo get_stylesheet_directory();')"
(
cd "$THEME"
nice git status --short 2>&1 || echo "ERROR: '${THEME}' Theme files"
)
## Parent theme
#THEME_PARENT="$(/usr/local/bin/wp --no-debug --quiet eval 'echo get_template_directory();')"
#(
# cd "$THEME_PARENT"
# nice git status --short 2>&1 || echo "ERROR: '${THEME_PARENT}' Parent Theme files"
#)
# Site content (excludes core and theme)
# See /webserver/wordpress.gitignore
nice git status --short 2>&1 || echo "ERROR: Site files"
}
set -e
cd "$DOCUMENT_ROOT"
Tripwire_fake | sed -e "1s|^.|[${LOGNAME}] Website has changed.\\n&|" 1>&2
|
szepeviktor/debian-server-tools
|
monitoring/tripwire-fake.sh
|
Shell
|
mit
| 1,067 |
#!/bin/bash
# Announces a release after artifacts have been built:
#
# - Publishes release notes to GitHub
# - Announces in the #announce stream of Zulip
# - Adds a note about the release to LWIP
#
# Tools required in the environment that runs this:
#
# - bash
# - changelog-tool
# - curl
# - git
# - jq
set -o errexit
# Pull in shared configuration specific to this repo
base=$(dirname "$0")
source "${base}/config.bash"
# Verify ENV is set up correctly
# We validate all that need to be set in case, in an absolute emergency,
# we need to run this by hand. Otherwise the GitHub actions environment should
# provide all of these if properly configured
if [[ -z "${LIBRARY_NAME}" ]]; then
echo -e "\e[31mName of the library being announced needs to be set in LIBRARY_NAME."
echo -e "Exiting.\e[0m"
exit 1
fi
if [[ -z "${RELEASE_TOKEN}" ]]; then
echo -e "\e[31mA personal access token needs to be set in RELEASE_TOKEN."
echo -e "\e[31mIt should not be secrets.GITHUB_TOKEN. It has to be a"
echo -e "\e[31mpersonal access token otherwise next steps in the release"
echo -e "\e[31mprocess WILL NOT trigger."
echo -e "\e[31mPersonal access tokens are in the form:"
echo -e "\e[31m TOKEN"
echo -e "\e[31mfor example:"
echo -e "\e[31m 1234567890"
echo -e "\e[31mExiting.\e[0m"
exit 1
fi
if [[ -z "${GITHUB_REF}" ]]; then
echo -e "\e[31mThe release tag needs to be set in GITHUB_REF."
echo -e "\e[31mThe tag should be in the following GitHub specific form:"
echo -e "\e[31m /refs/tags/announce-X.Y.Z"
echo -e "\e[31mwhere X.Y.Z is the version we are announcing"
echo -e "\e[31mExiting.\e[0m"
exit 1
fi
if [[ -z "${GITHUB_REPOSITORY}" ]]; then
echo -e "\e[31mName of this repository needs to be set in GITHUB_REPOSITORY."
echo -e "\e[31mShould be in the form OWNER/REPO, for example:"
echo -e "\e[31m ponylang/ponyup"
echo -e "\e[31mExiting.\e[0m"
exit 1
fi
if [[ -z "${ZULIP_TOKEN}" ]]; then
echo -e "\e[31mA Zulip access token needs to be set in ZULIP_TOKEN."
echo -e "Exiting.\e[0m"
exit 1
fi
# no unset variables allowed from here on out
# allow above so we can display nice error messages for expected unset variables
set -o nounset
# Set up .netrc file with GitHub credentials
git config --global user.name 'Ponylang Main Bot'
git config --global user.email '[email protected]'
git config --global push.default simple
PUSH_TO="https://${RELEASE_TOKEN}@github.com/${GITHUB_REPOSITORY}.git"
# Extract version from tag reference
# Tag ref version: "refs/tags/announce-1.0.0"
# Version: "1.0.0"
VERSION="${GITHUB_REF/refs\/tags\/announce-/}"
# Prepare release notes
echo -e "\e[34mPreparing to update GitHub release notes...\e[0m"
body=$(changelog-tool get "${VERSION}")
jsontemplate="
{
\"tag_name\":\$version,
\"name\":\$version,
\"body\":\$body
}
"
json=$(jq -n \
--arg version "$VERSION" \
--arg body "$body" \
"${jsontemplate}")
# Upload release notes
echo -e "\e[34mUploading release notes...\e[0m"
result=$(curl -s -X POST "https://api.github.com/repos/${GITHUB_REPOSITORY}/releases" \
-H "Content-Type: application/x-www-form-urlencoded" \
-u "${RELEASE_TOKEN}" \
--data "${json}")
rslt_scan=$(echo "${result}" | jq -r '.id')
if [ "$rslt_scan" != null ]; then
echo -e "\e[34mRelease notes uploaded\e[0m"
else
echo -e "\e[31mUnable to upload release notes, here's the curl output..."
echo -e "\e[31m${result}\e[0m"
exit 1
fi
# Send announcement to Zulip
message="
Version ${VERSION} of ${LIBRARY_NAME} has been released.
See the [release notes](https://github.com/${GITHUB_REPOSITORY}/releases/tag/${VERSION}) for more details.
"
curl -s -X POST https://ponylang.zulipchat.com/api/v1/messages \
-u ${ZULIP_TOKEN} \
-d "type=stream" \
-d "to=announce" \
-d "topic=${LIBRARY_NAME}" \
-d "content=${message}"
# Update Last Week in Pony
echo -e "\e[34mAdding release to Last Week in Pony...\e[0m"
result=$(curl https://api.github.com/repos/ponylang/ponylang-website/issues?labels=last-week-in-pony)
lwip_url=$(echo "${result}" | jq -r '.[].url')
if [ "$lwip_url" != "" ]; then
body="
Version ${VERSION} of ${LIBRARY_NAME} has been released.
See the [release notes](https://github.com/${GITHUB_REPOSITORY}/releases/tag/${VERSION}) for more details.
"
jsontemplate="
{
\"body\":\$body
}
"
json=$(jq -n \
--arg body "$body" \
"${jsontemplate}")
result=$(curl -s -X POST "$lwip_url/comments" \
-H "Content-Type: application/x-www-form-urlencoded" \
-u "${RELEASE_TOKEN}" \
--data "${json}")
rslt_scan=$(echo "${result}" | jq -r '.id')
if [ "$rslt_scan" != null ]; then
echo -e "\e[34mRelease notice posted to LWIP\e[0m"
else
echo -e "\e[31mUnable to post to LWIP, here's the curl output..."
echo -e "\e[31m${result}\e[0m"
fi
else
echo -e "\e[31mUnable to post to Last Week in Pony."
echo -e "Can't find the issue.\e[0m"
fi
# delete announce-VERSION tag
echo -e "\e[34mDeleting no longer needed remote tag announce-${VERSION}\e[0m"
git push --delete ${PUSH_TO} "announce-${VERSION}"
|
btab/pony-semver
|
.ci-scripts/release/announce-a-release.bash
|
Shell
|
mit
| 5,067 |
#!/bin/bash
set -eo pipefail -o nounset
if [[ -z $(conda info --envs | grep "*" | grep -o "\/.*") ]]; then
export CONDA_ROOT=$(conda info --root)
env_dir=$CONDA_ROOT
export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/hg19-clinically-associated-variants-ensembl-v1/1
elif [[ $(conda info --envs | grep "*" | grep -o "\/.*") == "base" ]]; then
export CONDA_ROOT=$(conda info --root)
env_dir=$CONDA_ROOT
export RECIPE_DIR=$CONDA_ROOT/share/ggd/Homo_sapiens/hg19/hg19-clinically-associated-variants-ensembl-v1/1
else
env_dir=$(conda info --envs | grep "*" | grep -o "\/.*")
export CONDA_ROOT=$env_dir
export RECIPE_DIR=$env_dir/share/ggd/Homo_sapiens/hg19/hg19-clinically-associated-variants-ensembl-v1/1
fi
PKG_DIR=`find "$CONDA_SOURCE_PREFIX/pkgs/" -name "$PKG_NAME-$PKG_VERSION*" | grep -v ".tar.bz2" | grep "$PKG_VERSION.*$PKG_BUILDNUM$"`
if [ -d $RECIPE_DIR ]; then
rm -r $RECIPE_DIR
fi
mkdir -p $RECIPE_DIR
(cd $RECIPE_DIR && bash $PKG_DIR/info/recipe/recipe.sh)
cd $RECIPE_DIR
## Iterate over new files and replace file name with data package name and data version
for f in *; do
ext="${f#*.}"
filename="{f%%.*}"
if [[ ! -f "hg19-clinically-associated-variants-ensembl-v1.$ext" ]]
then
(mv $f "hg19-clinically-associated-variants-ensembl-v1.$ext")
fi
done
## Add environment variables
#### File
if [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 1 ]] ## If only one file
then
recipe_env_file_name="ggd_hg19-clinically-associated-variants-ensembl-v1_file"
recipe_env_file_name="$(echo "$recipe_env_file_name" | sed 's/-/_/g' | sed 's/\./_/g')"
file_path="$(find $RECIPE_DIR -type f -maxdepth 1)"
elif [[ `find $RECIPE_DIR -type f -maxdepth 1 | wc -l | sed 's/ //g'` == 2 ]] ## If two files
then
indexed_file=`find $RECIPE_DIR -type f \( -name "*.tbi" -or -name "*.fai" -or -name "*.bai" -or -name "*.crai" -or -name "*.gzi" \) -maxdepth 1`
if [[ ! -z "$indexed_file" ]] ## If index file exists
then
recipe_env_file_name="ggd_hg19-clinically-associated-variants-ensembl-v1_file"
recipe_env_file_name="$(echo "$recipe_env_file_name" | sed 's/-/_/g' | sed 's/\./_/g')"
file_path="$(echo $indexed_file | sed 's/\.[^.]*$//')" ## remove index extension
fi
fi
#### Dir
recipe_env_dir_name="ggd_hg19-clinically-associated-variants-ensembl-v1_dir"
recipe_env_dir_name="$(echo "$recipe_env_dir_name" | sed 's/-/_/g' | sed 's/\./_/g')"
activate_dir="$env_dir/etc/conda/activate.d"
deactivate_dir="$env_dir/etc/conda/deactivate.d"
mkdir -p $activate_dir
mkdir -p $deactivate_dir
echo "export $recipe_env_dir_name=$RECIPE_DIR" >> $activate_dir/env_vars.sh
echo "unset $recipe_env_dir_name">> $deactivate_dir/env_vars.sh
#### File
## If the file env variable exists, set the env file var
if [[ ! -z "${recipe_env_file_name:-}" ]]
then
echo "export $recipe_env_file_name=$file_path" >> $activate_dir/env_vars.sh
echo "unset $recipe_env_file_name">> $deactivate_dir/env_vars.sh
fi
echo 'Recipe successfully built!'
|
gogetdata/ggd-recipes
|
recipes/genomics/Homo_sapiens/hg19/hg19-clinically-associated-variants-ensembl-v1/post-link.sh
|
Shell
|
mit
| 3,092 |
#!/bin/zsh
alias dgres="/bin/grep -e '^\[=*\]' -e '^.*\[.*\(OK\|PASSED\|RUN\|ERROR\).*\]'"
alias dgv="dgres -e '^\[.*DEBUG.*$'"
alias dgvv="dgres -e '^\[.*\(TRACE\|DEBUG\).*$'"
alias dgo="grep -v -e '^.*\(DEBUG\|TRACE\).*'"
|
rkowalewski/dotfiles
|
zsh/scripts/dash.zsh
|
Shell
|
mit
| 226 |
# To install source this file from your .zshrc file
# Change this to reflect your installation directory
export __GIT_PROMPT_DIR=~/.oh-my-zsh/
# Initialize colors.
autoload -U colors
colors
# Allow for functions in the prompt.
setopt PROMPT_SUBST
autoload -U add-zsh-hook
add-zsh-hook chpwd chpwd_update_git_vars
add-zsh-hook preexec preexec_update_git_vars
add-zsh-hook precmd precmd_update_git_vars
## Function definitions
function preexec_update_git_vars() {
case "$2" in
git*)
__EXECUTED_GIT_COMMAND=1
;;
esac
}
function precmd_update_git_vars() {
if [ -n "$__EXECUTED_GIT_COMMAND" ] || [ -n "$ZSH_THEME_GIT_PROMPT_NOCACHE" ]; then
update_current_git_vars
unset __EXECUTED_GIT_COMMAND
fi
}
function chpwd_update_git_vars() {
update_current_git_vars
}
function update_current_git_vars() {
unset __CURRENT_GIT_STATUS
local gitstatus="$__GIT_PROMPT_DIR/gitstatus.py"
_GIT_STATUS=`python ${gitstatus}`
__CURRENT_GIT_STATUS=("${(@f)_GIT_STATUS}")
GIT_BRANCH=$__CURRENT_GIT_STATUS[1]
GIT_REMOTE=$__CURRENT_GIT_STATUS[2]
GIT_STAGED=$__CURRENT_GIT_STATUS[3]
GIT_CONFLICTS=$__CURRENT_GIT_STATUS[4]
GIT_CHANGED=$__CURRENT_GIT_STATUS[5]
GIT_UNTRACKED=$__CURRENT_GIT_STATUS[6]
GIT_CLEAN=$__CURRENT_GIT_STATUS[7]
}
git_super_status() {
precmd_update_git_vars
if [ -n "$__CURRENT_GIT_STATUS" ]; then
STATUS="($GIT_BRANCH"
STATUS="$ZSH_THEME_GIT_PROMPT_PREFIX$ZSH_THEME_GIT_PROMPT_BRANCH$GIT_BRANCH%{${reset_color}%}"
if [ -n "$GIT_REMOTE" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_REMOTE$GIT_REMOTE%{${reset_color}%}"
fi
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_SEPARATOR"
if [ "$GIT_STAGED" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_STAGED$GIT_STAGED%{${reset_color}%}"
fi
if [ "$GIT_CONFLICTS" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_CONFLICTS$GIT_CONFLICTS%{${reset_color}%}"
fi
if [ "$GIT_CHANGED" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_CHANGED$GIT_CHANGED%{${reset_color}%}"
fi
if [ "$GIT_UNTRACKED" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_UNTRACKED%{${reset_color}%}"
fi
if [ "$GIT_CLEAN" -eq "1" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_CLEAN"
fi
STATUS="$STATUS%{${reset_color}%}$ZSH_THEME_GIT_PROMPT_SUFFIX"
echo "$STATUS"
fi
}
# Default values for the appearance of the prompt. Configure at will.
ZSH_THEME_GIT_PROMPT_PREFIX="("
ZSH_THEME_GIT_PROMPT_SUFFIX=")"
ZSH_THEME_GIT_PROMPT_SEPARATOR="|"
ZSH_THEME_GIT_PROMPT_BRANCH="%{$fg_bold[magenta]%}"
ZSH_THEME_GIT_PROMPT_STAGED="%{$fg[red]%} +"
ZSH_THEME_GIT_PROMPT_CONFLICTS="%{$fg[red]%} !"
ZSH_THEME_GIT_PROMPT_CHANGED="%{$fg[blue]%} *"
ZSH_THEME_GIT_PROMPT_REMOTE=""
ZSH_THEME_GIT_PROMPT_UNTRACKED=" …"
ZSH_THEME_GIT_PROMPT_CLEAN="%{$fg_bold[green]%}✔"
ZSH_THEME_GIT_PROMPT_NOCACHE="fuck yeah"
|
bak1an/oh-my-zsh
|
git_prompt.sh
|
Shell
|
mit
| 2,883 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Alamofire/Alamofire.framework"
install_framework "$BUILT_PRODUCTS_DIR/AlamofireImage/AlamofireImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/Cartography/Cartography.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Alamofire/Alamofire.framework"
install_framework "$BUILT_PRODUCTS_DIR/AlamofireImage/AlamofireImage.framework"
install_framework "$BUILT_PRODUCTS_DIR/Cartography/Cartography.framework"
fi
|
jeantimex/swift-photo-viewer
|
Pods/Target Support Files/Pods-Swift Photo Viewer/Pods-Swift Photo Viewer-frameworks.sh
|
Shell
|
mit
| 3,929 |
#!/bin/sh
if test ! $(which pyenv)
then
echo " Installing pyenv for you."
git clone git://github.com/yyuu/pyenv.git ~/.pyenv
eval "$(~/.pyenv/bin/pyenv init -)"
fi
if test ! -d ~/.pyenv/plugins/pyenv-update
then
echo " Installing pyenv-update for you."
git clone git://github.com/yyuu/pyenv-update.git ~/.pyenv/plugins/pyenv-update
fi
|
jawshooah/dotfiles
|
python/install.sh
|
Shell
|
mit
| 349 |
# Start new tmux session or reattach to an existing session, but only if not
# already inside a tmux session.
function tm() {
local is_source=; [[ "$1" == "SOURCE" ]] && is_source=1 && shift
local tmux_no_logout=~/.dotfiles/caches/tmux-no-logout
if [[ ! "$TMUX" ]]; then
# Clean up any orphaned "no logout" file.
[[ -e $tmux_no_logout ]] && rm $tmux_no_logout
# Actually start tmux.
tmux ls >/dev/null 2>&1 && tmux attach "$@" || tmux "$@"
# If "no logout" doesn't exist, exit.
[[ -e $tmux_no_logout ]] && rm $tmux_no_logout || exit
elif [[ ! "$is_source" ]]; then
echo "Already in a tmux session!"
fi
}
# Start tmux now (at login), but only if in a login shell and not already
# started (and possibly detached) in this shell.
# if shopt -q login_shell && [[ ! "$TMUX_AUTO_STARTED" ]]; then
# TMUX_AUTO_STARTED=1
# tm SOURCE
# fi
# Run an arbitrary command in the current tmux window (if only one pane)
# otherwise create a new window and run the command there.
function run_in_fresh_tmux_window() {
local panes="$(tmux list-panes | wc -l)"
if [[ "$panes" != 1 ]]; then
tmux new-window "bash --rcfile <(echo '. ~/.bashrc; $*')"
else
"$@"
fi
}
# Open editor and shell in new window using main-vertical layout.
# Usage: qq [num-panes] [working-directory] [...other-args]
function qq() {
local panes=1; [[ "$1" =~ ^[0-9]+$ ]] && panes=$1 && shift
local dir="$PWD"; [[ -d "$1" ]] && dir="$(cd "$1" && pwd)" && shift
local win=$(tmux new-window -P -a -c "$dir" -n "$(basename "$dir")")
n_times $panes tmux split-window -t $win -c "$dir"
tmux select-layout -t $win main-vertical
tmux select-pane -t $win
tmux send-keys -t $win "$EDITOR $@" Enter
}
alias q2='qq 2'
alias q3='qq 3'
alias tls='tmux ls'
|
ricardorsierra/dotfiles
|
source/10_tmux.sh
|
Shell
|
mit
| 1,770 |
#!/bin/bash
set -e
# Exercise 3.3 Find out working set size for the benchmark programs.
for benchmark in equake vortex parser
do
for cache_lines in 64 4096 8192
do
BENCHMARK=$benchmark CACHE_LINES=$cache_lines ./simics -stall -no-stc -c ${benchmark}.conf -no-win -q -p Lab1_3-5/cache_hierarchy.py
done
done
|
myrjola/comparch_labs
|
Lab1_3-5/cache_hierarchy.sh
|
Shell
|
mit
| 330 |
#!/bin/bash
# Copyright (c) 2013 - 2014 blackchip.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
BASEDIR=$(dirname "$0")
. $BASEDIR/config
step
|
blackchip-org/step
|
test/fixtures/run_no_step.sh
|
Shell
|
mit
| 1,163 |
#!/usr/bin/env bash
cd target && java -Dspring.profiles.active=DEV -jar *.war
|
xelita/spring-boot-web-mongo-archetype
|
src/main/resources/archetype-resources/launch_jar.sh
|
Shell
|
mit
| 77 |
function git_prompt_info() {
if [[ "$(command git config --get oh-my-zsh.hide-status 2>/dev/null)" != "1" ]]; then
ref=$(command git symbolic-ref HEAD 2> /dev/null) || \
ref=$(command git rev-parse --short HEAD 2> /dev/null) || return 0
ref="$(command echo ${ref#refs/heads/})"
length=${#ref}
maxLength=$(command git config --get oh-my-zsh.max-branch-length 2>/dev/null)
if [[ -z ${maxLength} ]]; then
maxLength=20
fi
if [[ ${length} -gt ${maxLength} ]]; then
regex=$(command git config --get oh-my-zsh.prefix-regex 2>/dev/null)
if [[ -n ${regex} ]]; then
ref=$(command echo ${ref} | sed "s/${regex}//1" ) #${regex})
fi
prefixLength=$(command git config --get oh-my-zsh.prefix-length 2>/dev/null)
if [[ -z ${prefixLength} ]]; then
prefixLength=0
fi
if [[ ${prefixLength} -gt 0 ]]; then
prefix=$(command echo ${ref} | cut -c ${prefixLength})
ref=$(command echo ${ref} | cut -c `expr ${prefixLength} + 1`-)
length=${#ref}
fi
fi
if [[ ${length} -gt ${maxLength} ]]; then
suffixLength=$(command git config --get oh-my-zsh.suffix-length 2>/dev/null)
if [[ -z ${suffixLength} ]]; then
suffixLength=0
fi
length=${#ref}
suffixStart=`expr ${length} - ${suffixLength} + 1`
separatorLength=3 #3 dots...
nameEnd=`expr ${maxLength} - ${suffixLength} - ${separatorLength}`
ref="$(command echo ${ref} | cut -c 1-${nameEnd})...$(command echo ${ref} | cut -c ${suffixStart}-)"
fi
echo "$ZSH_THEME_GIT_PROMPT_PREFIX${ref}$(parse_git_dirty)$ZSH_THEME_GIT_PROMPT_SUFFIX"
fi
}
|
tnwinc/omz-git
|
omz-git.plugin.zsh
|
Shell
|
mit
| 1,665 |
#!/bin/sh
# base16-shell (https://github.com/chriskempson/base16-shell)
# Base16 Shell template by Chris Kempson (http://chriskempson.com)
# Monokai scheme by Wimer Hazenberg (http://www.monokai.nl)
# This script doesn't support linux console (use 'vconsole' template instead)
if [ "${TERM%%-*}" = 'linux' ]; then
return 2>/dev/null || exit 0
fi
color00="27/28/22" # Base 00 - Black
color01="f9/26/72" # Base 08 - Red
color02="a6/e2/2e" # Base 0B - Green
color03="f4/bf/75" # Base 0A - Yellow
color04="66/d9/ef" # Base 0D - Blue
color05="ae/81/ff" # Base 0E - Magenta
color06="a1/ef/e4" # Base 0C - Cyan
color07="f8/f8/f2" # Base 05 - White
color08="75/71/5e" # Base 03 - Bright Black
color09=$color01 # Base 08 - Bright Red
color10=$color02 # Base 0B - Bright Green
color11=$color03 # Base 0A - Bright Yellow
color12=$color04 # Base 0D - Bright Blue
color13=$color05 # Base 0E - Bright Magenta
color14=$color06 # Base 0C - Bright Cyan
color15="f9/f8/f5" # Base 07 - Bright White
color16="fd/97/1f" # Base 09
color17="cc/66/33" # Base 0F
color18="38/38/30" # Base 01
color19="49/48/3e" # Base 02
color20="a5/9f/85" # Base 04
color21="f5/f4/f1" # Base 06
color_foreground="f8/f8/f2" # Base 05
color_background="27/28/22" # Base 00
if [ -n "$TMUX" ]; then
# Tell tmux to pass the escape sequences through
# (Source: http://permalink.gmane.org/gmane.comp.terminal-emulators.tmux.user/1324)
printf_template='\033Ptmux;\033\033]4;%d;rgb:%s\033\033\\\033\\'
printf_template_var='\033Ptmux;\033\033]%d;rgb:%s\033\033\\\033\\'
printf_template_custom='\033Ptmux;\033\033]%s%s\033\033\\\033\\'
elif [ "${TERM%%-*}" = "screen" ]; then
# GNU screen (screen, screen-256color, screen-256color-bce)
printf_template='\033P\033]4;%d;rgb:%s\033\\'
printf_template_var='\033P\033]%d;rgb:%s\033\\'
printf_template_custom='\033P\033]%s%s\033\\'
else
printf_template='\033]4;%d;rgb:%s\033\\'
printf_template_var='\033]%d;rgb:%s\033\\'
printf_template_custom='\033]%s%s\033\\'
fi
# 16 color space
printf $printf_template 0 $color00
printf $printf_template 1 $color01
printf $printf_template 2 $color02
printf $printf_template 3 $color03
printf $printf_template 4 $color04
printf $printf_template 5 $color05
printf $printf_template 6 $color06
printf $printf_template 7 $color07
printf $printf_template 8 $color08
printf $printf_template 9 $color09
printf $printf_template 10 $color10
printf $printf_template 11 $color11
printf $printf_template 12 $color12
printf $printf_template 13 $color13
printf $printf_template 14 $color14
printf $printf_template 15 $color15
# 256 color space
printf $printf_template 16 $color16
printf $printf_template 17 $color17
printf $printf_template 18 $color18
printf $printf_template 19 $color19
printf $printf_template 20 $color20
printf $printf_template 21 $color21
# foreground / background / cursor color
if [ -n "$ITERM_SESSION_ID" ]; then
# iTerm2 proprietary escape codes
printf $printf_template_custom Pg f8f8f2 # foreground
printf $printf_template_custom Ph 272822 # background
printf $printf_template_custom Pi f8f8f2 # bold color
printf $printf_template_custom Pj 49483e # selection color
printf $printf_template_custom Pk f8f8f2 # selected text color
printf $printf_template_custom Pl f8f8f2 # cursor
printf $printf_template_custom Pm 272822 # cursor text
else
printf $printf_template_var 10 $color_foreground
if [ "$BASE16_SHELL_SET_BACKGROUND" != false ]; then
printf $printf_template_var 11 $color_background
if [ "${TERM%%-*}" = "rxvt" ]; then
printf $printf_template_var 708 $color_background # internal border (rxvt)
fi
fi
printf $printf_template_custom 12 ";7" # cursor (reverse video)
fi
# clean up
unset printf_template
unset printf_template_var
unset color00
unset color01
unset color02
unset color03
unset color04
unset color05
unset color06
unset color07
unset color08
unset color09
unset color10
unset color11
unset color12
unset color13
unset color14
unset color15
unset color16
unset color17
unset color18
unset color19
unset color20
unset color21
unset color_foreground
unset color_background
|
goude/runcom
|
home/.local/share/base16/templates/shell/scripts/base16-monokai.sh
|
Shell
|
mit
| 4,124 |
#!/bin/bash
function now() {
date +"%m-%d-%Y %H-%M"
}
echo "$(now): backup-all-indexes.sh - Verifying required environment variables"
: ${DATABASE_URL:?"Error: DATABASE_URL environment variable not set"}
: ${S3_BUCKET:?"Error: S3_BUCKET environment variable not set"}
: ${S3_ACCESS_KEY_ID:?"Error: S3_ACCESS_KEY_ID environment variable not set"}
: ${S3_SECRET_ACCESS_KEY:?"Error: S3_SECRET_ACCESS_KEY environment variable not set"}
# Normalize DATABASE_URL by removing the trailing slash.
DATABASE_URL="${DATABASE_URL%/}"
# Set some defaults
REPOSITORY_NAME=${REPOSITORY_NAME:-logstash_snapshots}
WAIT_SECONDS=${WAIT_SECONDS:-1800}
MAX_DAYS_TO_KEEP=${MAX_DAYS_TO_KEEP:-30}
REPOSITORY_URL=${DATABASE_URL}/_snapshot/${REPOSITORY_NAME}
# Ensure that we don't delete indices that are being logged. Using 1 should
# actually be fine here as long as everyone's on the same timezone, but let's
# be safe and require at least 2 days.
if [[ "$MAX_DAYS_TO_KEEP" -lt 2 ]]; then
echo "$(now): MAX_DAYS_TO_KEEP must be an integer >= 2."
echo "$(now): Using lower values may break archiving."
exit 1
fi
ES_VERSION=$(curl -sS $DATABASE_URL?format=yaml | grep number | cut -d'"' -f2)
ES_VERSION_COMPARED_TO_50=$(apk version -t "$ES_VERSION" "4.9")
if [ $ES_VERSION_COMPARED_TO_50 = '<' ]; then
REPOSITORY_PLUGIN=cloud-aws
else
REPOSITORY_PLUGIN=repository-s3
fi
backup_index ()
{
: ${1:?"Error: expected index name passed as parameter"}
local INDEX_NAME=$1
local SNAPSHOT_URL=${REPOSITORY_URL}/${INDEX_NAME}
local INDEX_URL=${DATABASE_URL}/${INDEX_NAME}
grep -q SUCCESS <(curl -sS ${SNAPSHOT_URL})
if [ $? -ne 0 ]; then
echo "$(now): Scheduling snapshot."
# If the snapshot exists but isn't in a success state, delete it so that we can try again.
grep -qE "FAILED|PARTIAL|IN_PROGRESS" <(curl -sS ${SNAPSHOT_URL}) && curl -sS -XDELETE ${SNAPSHOT_URL}
# Indexes have to be open for snapshots to work.
curl -sS -XPOST "${INDEX_URL}/_open"
curl -H "Content-Type: application/json" --fail -w "\n" -sS -XPUT ${SNAPSHOT_URL} -d "{
\"indices\": \"${INDEX_NAME}\",
\"include_global_state\": false
}" || return 1
echo "$(now): Waiting for snapshot to finish..."
timeout "${WAIT_SECONDS}" bash -c "until grep -q SUCCESS <(curl -sS ${SNAPSHOT_URL}); do sleep 1; done" || return 1
fi
echo "Deleting ${INDEX_NAME} from Elasticsearch."
curl -w "\n" -sS -XDELETE ${INDEX_URL}
}
# Ensure that Elasticsearch has the cloud-aws plugin.
grep -q $REPOSITORY_PLUGIN <(curl -sS ${DATABASE_URL}/_cat/plugins)
if [ $? -ne 0 ]; then
echo "$(now): Elasticsearch server does not have the ${REPOSITORY_PLUGIN} plugin installed. Exiting."
exit 1
fi
echo "$(now): Ensuring Elasticsearch snapshot repository ${REPOSITORY_NAME} exists..."
curl -H "Content-Type: application/json" -w "\n" -sS -XPUT ${REPOSITORY_URL} -d "{
\"type\": \"s3\",
\"settings\": {
\"bucket\" : \"${S3_BUCKET}\",
\"base_path\": \"${S3_BUCKET_BASE_PATH}\",
\"access_key\": \"${S3_ACCESS_KEY_ID}\",
\"secret_key\": \"${S3_SECRET_ACCESS_KEY}\",
\"protocol\": \"https\",
\"server_side_encryption\": true
}
}"
CUTOFF_DATE=$(date --date="${MAX_DAYS_TO_KEEP} days ago" +"%Y.%m.%d")
echo "$(now) Archiving all indexes with logs before ${CUTOFF_DATE}."
SUBSTITUTION='s/.*\(logstash-[0-9\.]\{10\}\).*/\1/'
for index_name in $(curl -sS ${DATABASE_URL}/_cat/indices | grep logstash- | sed $SUBSTITUTION | sort); do
if [[ "${index_name:9}" < "${CUTOFF_DATE}" ]]; then
echo "$(now): Ensuring ${index_name} is archived..."
backup_index ${index_name}
if [ $? -eq 0 ]; then
echo "$(now): ${index_name} archived."
else
echo "$(now): ${index_name} archival failed."
fi
fi
done
echo "$(now): Finished archiving."
|
aptible/elasticsearch-logstash-s3-backup
|
src/backup-all-indexes.sh
|
Shell
|
mit
| 3,797 |
#!/bin/bash
## Files Address
readonly SOURCES_LIST="/etc/apt/sources.list"
#Get sudo permission from user
get_sudo_permission() {
if [ "$(whoami)" != "root" ];then
echo -e "Hi, I need sudo permission\n"
sudo su
fi
}
#Run "apt-key update apt-get update && apt-get upgrade"
#About true in line 32:
#==> When end "apt-get update" job, all commands after it ignored.So reason using true is continue commands
update_repositories() {
if [ -s $SOURCES_LIST ];then
notification "Updating Keys...";
sudo apt-key update > ../log/update_key.log 2>&1;
warning "For more info see update_key.log\n" &&
notification "Updating Repositories...";
sudo apt-get -y update > ../log/update_repo.log 2>&1;
warning "For more info update_repo.log\n" &&
question "Do you want upgrade your packages? (y/n)"
read ANSWER
if [ "$ANSWER" == "Y" ] || [ "$ANSWER" == "y" ];then
notification "Upgrading all programs...";
sudo apt-get -y upgrade > ../log/upgrade_programs.log 2>&1|| true;
warning "For more info upgrade_programs.log\n"
fi
else
notification "\"source.list\" file is empty.Please add repositories to install softwares."
fi
}
#Make directory with checking exists or not
#make_directory "name"
make_directory() {
DIRECTORY="$1"
if [ ! -d "${DIRECTORY}" ];then
$(mkdir "${DIRECTORY}")
fi
}
|
nimaebrazi/debian-base-packeage-installer
|
src/helpers.sh
|
Shell
|
mit
| 1,465 |
#!/bin/sh
# This install script is intended to download and install the latest available
# release of the dep dependency manager for Golang.
#
# It attempts to identify the current platform and an error will be thrown if
# the platform is not supported.
#
# Environment variables:
# - INSTALL_DIRECTORY (optional): defaults to $GOPATH/bin
# - DEP_RELEASE_TAG (optional): defaults to fetching the latest release
# - DEP_OS (optional): use a specific value for OS (mostly for testing)
# - DEP_ARCH (optional): use a specific value for ARCH (mostly for testing)
#
# You can install using this script:
# $ curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
set -e
if [[ $EUID -ne 0 ]]; then
echo "This script must be run with root/sudo "
exit 1
fi
DEP_RELEASE_TAG="0.0.1"
RELEASES_URL="https://github.com/testthedocs/redactor/releases"
INSTALL_DIRECTORY="/usr/local/bin"
downloadJSON() {
url="$2"
echo "Fetching $url.."
if test -x "$(command -v curl)"; then
response=$(curl -s -L -w 'HTTPSTATUS:%{http_code}' -H 'Accept: application/json' "$url")
body=$(echo "$response" | sed -e 's/HTTPSTATUS\:.*//g')
code=$(echo "$response" | tr -d '\n' | sed -e 's/.*HTTPSTATUS://')
elif test -x "$(command -v wget)"; then
temp=$(mktemp)
body=$(wget -q --header='Accept: application/json' -O - --server-response "$url" 2> "$temp")
code=$(awk '/^ HTTP/{print $2}' < "$temp" | tail -1)
rm "$temp"
else
echo "Neither curl nor wget was available to perform http requests."
exit 1
fi
if [ "$code" != 200 ]; then
echo "Request failed with code $code"
exit 1
fi
eval "$1='$body'"
}
downloadFile() {
url="$1"
destination="$2"
echo "Fetching $url.."
if test -x "$(command -v curl)"; then
code=$(curl -s -w '%{http_code}' -L "$url" -o "$destination")
elif test -x "$(command -v wget)"; then
code=$(wget -q -O "$destination" --server-response "$url" 2>&1 | awk '/^ HTTP/{print $2}' | tail -1)
else
echo "Neither curl nor wget was available to perform http requests."
exit 1
fi
if [ "$code" != 200 ]; then
echo "Request failed with code $code"
exit 1
fi
}
initArch() {
ARCH=$(uname -m)
if [ -n "$DEP_ARCH" ]; then
echo "Using DEP_ARCH"
ARCH="$DEP_ARCH"
fi
case $ARCH in
amd64) ARCH="amd64";;
x86_64) ARCH="amd64";;
i386) ARCH="386";;
ppc64) ARCH="ppc64";;
ppc64le) ARCH="ppc64le";;
*) echo "Architecture ${ARCH} is not supported by this installation script"; exit 1;;
esac
echo "ARCH = $ARCH"
}
initOS() {
OS=$(uname | tr '[:upper:]' '[:lower:]')
OS_CYGWIN=0
if [ -n "$DEP_OS" ]; then
echo "Using DEP_OS"
OS="$DEP_OS"
fi
case "$OS" in
darwin) OS='darwin';;
linux) OS='linux';;
freebsd) OS='freebsd';;
mingw*) OS='windows';;
msys*) OS='windows';;
cygwin*)
OS='windows'
OS_CYGWIN=1
;;
*) echo "OS ${OS} is not supported by this installation script"; exit 1;;
esac
echo "OS = $OS"
}
# identify platform based on uname output
initArch
initOS
# determine install directory if required
echo "Will install into $INSTALL_DIRECTORY"
# assemble expected release artifact name
if [ "${OS}" != "linux" ] && { [ "${ARCH}" = "ppc64" ] || [ "${ARCH}" = "ppc64le" ];}; then
# ppc64 and ppc64le are only supported on Linux.
echo "${OS}_${ARCH} is not supported by this instalation script"
else
BINARY="redactor_${OS}_${ARCH}"
fi
# add .exe if on windows
if [ "$OS" = "windows" ]; then
BINARY="$BINARY.exe"
fi
# if DEP_RELEASE_TAG was not provided, assume latest
if [ -z "$DEP_RELEASE_TAG" ]; then
downloadJSON LATEST_RELEASE "$RELEASES_URL/latest"
DEP_RELEASE_TAG=$(echo "${LATEST_RELEASE}" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//' )
fi
echo "Release Tag = $DEP_RELEASE_TAG"
# fetch the real release data to make sure it exists before we attempt a download
downloadJSON RELEASE_DATA "$RELEASES_URL/tag/$DEP_RELEASE_TAG"
BINARY_URL="$RELEASES_URL/download/$DEP_RELEASE_TAG/$BINARY"
DOWNLOAD_FILE=$(mktemp)
downloadFile "$BINARY_URL" "$DOWNLOAD_FILE"
echo "Setting executable permissions."
chmod +x "$DOWNLOAD_FILE"
INSTALL_NAME="redactor"
if [ "$OS" = "windows" ]; then
INSTALL_NAME="$INSTALL_NAME.exe"
fi
echo "Moving executable to $INSTALL_DIRECTORY/$INSTALL_NAME"
mv "$DOWNLOAD_FILE" "$INSTALL_DIRECTORY/$INSTALL_NAME"
|
testthedocs/redactor
|
install.sh
|
Shell
|
mit
| 4,580 |
#!/usr/bin/expect -f
#
# This Expect script was generated by autoexpect on Thu May 29 02:49:46 2014
# Expect and autoexpect were both written by Don Libes, NIST.
#
# Note that autoexpect does not guarantee a working script. It
# necessarily has to guess about certain things. Two reasons a script
# might fail are:
#
# 1) timing - A surprising number of programs (rn, ksh, zsh, telnet,
# etc.) and devices discard or ignore keystrokes that arrive "too
# quickly" after prompts. If you find your new script hanging up at
# one spot, try adding a short sleep just before the previous send.
# Setting "force_conservative" to 1 (see below) makes Expect do this
# automatically - pausing briefly before sending each character. This
# pacifies every program I know of. The -c flag makes the script do
# this in the first place. The -C flag allows you to define a
# character to toggle this mode off and on.
set force_conservative 0 ;# set to 1 to force conservative mode even if
;# script wasn't run conservatively originally
if {$force_conservative} {
set send_slow {1 .1}
proc send {ignore arg} {
sleep .1
exp_send -s -- $arg
}
}
#
# 2) differing output - Some programs produce different output each time
# they run. The "date" command is an obvious example. Another is
# ftp, if it produces throughput statistics at the end of a file
# transfer. If this causes a problem, delete these patterns or replace
# them with wildcards. An alternative is to use the -p flag (for
# "prompt") which makes Expect only look for the last line of output
# (i.e., the prompt). The -P flag allows you to define a character to
# toggle this mode off and on.
#
# Read the man page for more info.
#
# -Don
set timeout -1
spawn vncpasswd
match_max 100000
expect -exact "Using password file /root/.vnc/passwd\r
Password: "
send -- $env(PASSWORD)"\r"
expect -exact "\r
Verify: "
send -- $env(PASSWORD)"\r"
expect -exact "\r
Would you like to enter a view-only password (y/n)? "
send -- "n\r"
expect eof
|
mrl1n/Dockerfiles
|
NoVNC/start-vnc-expect-script.sh
|
Shell
|
mit
| 2,077 |
#!/bin/bash
OUT_FILE="./src/data/brew-packages.json"
PACKAGES=$(brew search)
#PACKAGES="apollo git"
TAPS=$(brew tap)
for TAP in $TAPS
do
brew untap $TAP
done
JSON=""
echo "{\"options\": [" > $OUT_FILE
for PACKAGE in $PACKAGES
do
JSON+="{\"name\": \"$PACKAGE\"},"
echo $PACKAGE
done
echo $JSON >> $OUT_FILE
echo "]}" >> $OUT_FILE
for TAP in $TAPS
do
brew tap $TAP
done
|
osxstrap/osxstrap-web
|
bin/get-brew-packages.sh
|
Shell
|
mit
| 392 |
#!/bin/sh
xrandr --output eDP-1 --mode 1920x1080 --pos 0x0 --rotate normal --output HDMI-1 --off --output DP-1 --off --output HDMI-2 --off --output DP-1-1 --mode 1920x1080 --pos 1920x0 --rotate normal --output DP-1-2 --off --output DP-1-3 --off
|
klaxalk/linux-setup
|
miscellaneous/arandr_scripts/petr/dell_lab.sh
|
Shell
|
mit
| 245 |
#!/bin/bash
date ; sudo service ntp stop ; sudo ntpdate -s time.nist.gov ; sudo service ntp start ; date
|
ThisIs-MyName/vps
|
update_time.sh
|
Shell
|
mit
| 105 |
#!/bin/bash
# ----------------------------------------------------
# Setup
# ----------------------------------------------------
# Some fun colors
COLOREND=$(tty -s && tput sgr0)
GREEN=$(tty -s && tput setaf 2)
RED=$(tty -s && tput setaf 1)
BOLD=$(tty -s && tput bold)
# Get the script dir
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Executable file
TEST_FILE="${DIR}/../src/01_05_NutTrouble.out"
# Results are saved in this file
TEST_OUTPUT="${DIR}/tests_1_5.txt"
# Were all tests successful?
SUCCESS=true
# Intro!
echo "\n${BOLD}01_05_NutTrouble Tests${COLOREND}"
# ----------------------------------------------------
# Test 1
# ----------------------------------------------------
${TEST_FILE} > ${TEST_OUTPUT} <<EOF
3
PVP
VVV
PVV
EOF
# Compare results of test #1
OUTPUT=`tail -n1 < ${TEST_OUTPUT}`
CORRECT="4"
if [ "${OUTPUT}" != "${CORRECT}" ]; then
echo "${RED}✖︎${COLOREND} Test #1 failed. (Expected result was ${CORRECT}, instead got ${OUTPUT}.)"
SUCCESS=false
fi
# ----------------------------------------------------
# Test 2
# ----------------------------------------------------
${TEST_FILE} > ${TEST_OUTPUT} <<EOF
4
VPPP
VVVV
VVVV
PVVP
EOF
# Compare results of test #2
OUTPUT=`tail -n1 < ${TEST_OUTPUT}`
CORRECT="8"
if [ "${OUTPUT}" != "${CORRECT}" ]; then
echo "${RED}✖︎${COLOREND} Test #2 failed. (Expected result was ${CORRECT}, instead got ${OUTPUT}.)"
SUCCESS=false
fi
# ----------------------------------------------------
# Test 3
# Special case, testing spaces between input numbers...
# ----------------------------------------------------
${TEST_FILE} > ${TEST_OUTPUT} <<EOF
4
V P P P
V V V V
V V V V
P V V P
EOF
# Compare results of test #3
OUTPUT=`tail -n1 < ${TEST_OUTPUT}`
CORRECT="8"
if [ "${OUTPUT}" != "${CORRECT}" ]; then
echo "${RED}✖︎${COLOREND} Test #3 failed. (Expected result was ${CORRECT}, instead got ${OUTPUT}.)\n"
SUCCESS=false
fi
# ----------------------------------------------------
# Finish
# ----------------------------------------------------
# Remove the test temp file
rm -rf "${TEST_OUTPUT}"
if $SUCCESS; then
echo "${GREEN}✔${COLOREND} All tests passed.\n"
else
exit 1
fi
|
nickskull/FIKS-Tasks
|
tests/01_05_NutTrouble.sh
|
Shell
|
mit
| 2,180 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-1717-1
#
# Security announcement date: 2013-02-12 00:00:00 UTC
# Script generation date: 2017-01-01 21:03:08 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - postgresql-9.1:9.1.8-0ubuntu12.04
#
# Last versions recommanded by security team:
# - postgresql-9.1:9.1.23-0ubuntu0.12.04
#
# CVE List:
# - CVE-2013-0255
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade postgresql-9.1=9.1.23-0ubuntu0.12.04 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/x86_64/2013/USN-1717-1.sh
|
Shell
|
mit
| 652 |
#!/bin/bash
MAC=[MAC_ADDRESS]
Broadcast=[BRODCAST_OR_TARGET_IP]
PortNumber=9
echo -e $(echo $(printf 'f%.0s' {1..12}; printf "$(echo $MAC | sed 's/://g')%.0s" {1..16}) | sed -e 's/../\\x&/g') | nc -u $Broadcast $PortNumber -v
|
ecraciun/VariousScripts
|
Bash/wol.sh
|
Shell
|
mit
| 229 |
#Runs java app
#Limits number of cores used
if [ "$(id -u)" != "0" ]; then
echo "Sorry, you need to be root."
exit 1
fi
set_num_cpus()
{
arg=$1
dec=1
taskset -c -a -p 0-$((arg - dec)) $(pidof java) > /dev/null
#taskset -c -a -p 0-$((arg - dec)) $(pidof java) > /dev/null
}
# Runs java App
set_num_cpus $1
java -classpath ".:/home/OpenMemDb/voltdb/voltdb/*" Run $1
|
Dar13/OpenMemDB
|
database/tests/db_testing/volt_test/run.sh
|
Shell
|
mit
| 393 |
#!/usr/bin/env bash
# Download and install requirements for documentation auto-deployment.
#
# This script does the following:
#
# - Installs mkdocs under the current user.
# - Installs pymdown-extensions under the current user.
# - If the zf-mkdoc-theme is not present under the current directory, downloads
# and installs the latest tarball.
#
# In order to work, it needs the following environment variables defined:
#
# This script should be fetched from the master branch by any project opting
# into the documentation auto-deployment workflow.
#
# @license http://opensource.org/licenses/BSD-3-Clause BSD-3-Clause
# @copyright Copyright (c) 2016 Zend Technologies USA Inc. (http://www.zend.com)
SCRIPT_PATH="$(pwd)"
# Install mkdocs and required extensions.
pip install --user mkdocs
pip install --user pymdown-extensions
pip install --user markdown-fenced-code-tabs
# Conditionally install zf-mkdoc-theme.
if [[ ! -d "zf-mkdoc-theme/theme" ]];then
echo "Downloading zf-mkdoc-theme..." ;
mkdir -p zf-mkdoc-theme ;
curl -s -L https://github.com/zendframework/zf-mkdoc-theme/releases/latest | egrep -o '/zendframework/zf-mkdoc-theme/archive/[0-9]*\.[0-9]*\.[0-9]*\.tar\.gz' | head -n1 | wget -O zf-mkdoc-theme.tgz --base=https://github.com/ -i - ;
(
cd zf-mkdoc-theme ;
tar xzf ../zf-mkdoc-theme.tgz --strip-components=1 ;
);
echo "Finished downloading and installing zf-mkdoc-theme" ;
fi
exit 0;
|
zendframework/zf-mkdoc-theme
|
theme-installer.sh
|
Shell
|
mit
| 1,449 |
#!/bin/bash
# Usage: sync_publish /path/to/crate -f
#
# Publish a crate and wait for it to become available.
#
# https://gist.github.com/Riateche/a1c500fe760a2b9190beb0a7134db82d
set -e
set -o pipefail
TMP_DIR=/tmp/test1
DIR="$1"
FORCE="$2"
NAME=$(grep '^name' "$DIR/Cargo.toml" | head -n 1 | sed 's/name = "\([^"]*\)"/\1/')
cd "$DIR"
VERSION=$(cargo metadata --format-version 1 2>/dev/null | jq -r '.packages[] | select(.name=="'$NAME'").version')
rm -rf "$TMP_DIR"
cargo new "$TMP_DIR" > /dev/null 2>&1
cd "$TMP_DIR"
cargo add "$NAME" --vers "=$VERSION" > /dev/null 2>&1
if cargo generate-lockfile > /dev/null 2>&1; then
echo "$NAME=$VERSION already exists, skipping."
exit 0
fi
echo "Publishing $NAME=$VERSION"
if [ "$FORCE" != "-f" ]; then
echo "This is a dry run. Run with -f to publish."
exit 0
fi
cd "$DIR"
cargo publish
cd "$TMP_DIR"
while ! cargo generate-lockfile > /dev/null 2>&1; do
echo "Waiting for crate to be published..."
sleep 1
done
|
gluon-lang/gluon
|
scripts/sync_publish.sh
|
Shell
|
mit
| 986 |
ansible-playbook -i kube-inventory.ini kube-docker-registry.yml
./kube-docker.sh
ansible-playbook -i kube-inventory.ini kube-cluster.yml
sleep 15
sudo rm /var/tmp/addon/*
ansible-playbook -i kube-inventory.ini kube-addon.yml
|
reza-rahim/microservice
|
ansible/provision/kube-build.sh
|
Shell
|
mit
| 229 |
#!/bin/bash
emmake make clean
emmake make
|
holland01/assessment-image-library
|
rebuild.sh
|
Shell
|
mit
| 43 |
#!/bin/bash
xcode-select --install
bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/master/binscripts/gvm-installer)
|
Zemnmez/configs
|
macos-gvm.sh
|
Shell
|
mit
| 137 |
#!/bin/sh
mkdir -p /usr/src/vim
curl -SL "ftp://ftp.vim.org/pub/vim/unix/vim-7.4.tar.bz2" | tar -xjC /usr/src/vim --strip-components=1
cd /usr/src/vim
echo '#define SYS_VIMRC_FILE "/etc/vimrc"' >> src/feature.h
echo '#define SYS_GVIMRC_FILE "/etc/gvimrc"' >> src/feature.h
./configure --prefix=/usr --with-features=huge --enable-gui=no --without-x --enable-rubyinterp --enable-pythoninterp
make -j"$(nproc)"
make install
cd /
rm -rf /usr/src/vim
|
jjasonclark/devbox
|
build/vim.sh
|
Shell
|
mit
| 452 |
#! /bin/bash -x
### superscript template to do the preprocessing
### $Id: do_Subaru_0025_V.sh,v 1.4 2008-08-16 22:38:01 dapple Exp $
. progs.ini
REDDIR=`pwd`
export SUBARUDIR=/nfs/slac/g/ki/ki02/xoc/anja/SUBARU
#run=2005-11-30
#filter=W-J-B
run=2007-07-18
filter=W-J-V
STANDARDSTARTYPE=3SEC
IMGSDSS=SUPA0055775
IMGTOCAL=SUPA0055774
export BONN_TARGET=${run}
export BONN_FILTER=${filter}
#only choice of flat
FLAT=SKYFLAT # SKYFLAT or DOMEFLAT
SET=SET3 # sets time period of flat to use
SKYBACK=32 # in case of SKYFLAT: size of background mesh for superflat
# illumination construction
# use 256 if no "blobs" due to stars are visible (in BVR?)
# 16 (or 32) if lots of blobs
FRINGE=NOFRINGE # FRINGE if fringing correction necessary; NOFRINGE otherwise
STANDARDSTARS=1 # process the STANDARD frames, too (1 if yes; 0 if no)
if [ ${FRINGE} == "FRINGE" ]; then
ending="OCFSF"
elif [ ${FRINGE} == "NOFRINGE" ]; then
ending="OCFS"
else
echo "You need to specify FRINGE or NOFRINGE for the fringing correction!"
exit 2;
fi
SCIENCEDIR=SCIENCE_${FLAT}_${SET}
export TEMPDIR='.'
#Comment out the lines as you progress through the script
########################################
### Reset Logger
./BonnLogger.py clear
##################################################################
### create and load the SUBARU.ini file
### !!! DO NOT COMMENT THIS BLOCK !!!
##################################################################
./setup_SUBARU.sh ${SUBARUDIR}/${run}_${filter}/STANDARD/ORIGINALS
export INSTRUMENT=SUBARU
. ${INSTRUMENT:?}.ini
##################################################################
### Capture Variables
./BonnLogger.py config \
run=${run} \
filter=${filter} \
FLAT=${FLAT} \
SET=${SET} \
SKYBACK=${SKYBACK} \
FRINGE=${FRINGE} \
STANDARDSTARS=${STANDARDSTARS} \
config=${config}
#
#################################################################################
#### STANDARD star processing
#################################################################################
#if [ ! -d ${SUBARUDIR}/${run}_${filter}/STANDARD ] && [ ${STANDARDSTARS} -eq 1 ]; then
if [ ${STANDARDSTARS} -eq 1 ]; then
./BonnLogger.py clear
./BonnLogger.py comment "STANDARD star processing"
case ${filter} in
"W-J-B" )
photfilter=B # corresponding Johnson filter
photcolor=BmV # color to use
EXTCOEFF=-0.2104 # guess for the extinction coefficient
COLCOEFF=0.0 # guess for the color coefficient
;;
"W-J-V" )
photfilter=V
photcolor=VmR
EXTCOEFF=-0.1202
COLCOEFF=0.0
;;
"W-C-RC" )
photfilter=R
photcolor=VmR
EXTCOEFF=-0.0925
COLCOEFF=0.0
;;
"W-C-IC" | "W-S-I+" )
photfilter=I
photcolor=RmI
EXTCOEFF=-0.02728
COLCOEFF=0.0
;;
"W-S-Z+" )
photfilter=z
photcolor=Imz
EXTCOEFF=0.0
COLCOEFF=0.0
;;
esac
### OC STANDARD frames
#./process_split_Subaru_eclipse.sh ${SUBARUDIR}/${run}_${filter} STANDARD
#./parallel_manager.sh ./process_standard_eclipse_para.sh ${SUBARUDIR}/${run}_${filter} BIAS ${FLAT} SCIENCE STANDARD RESCALE # BIAS AND FLAT
# IF IMAGES ARE ALREADY OC'd USE THIS ONE:
#./parallel_manager.sh ./process_standard_eclipse_para_OC.sh ${SUBARUDIR}/${run}_${filter} BIAS ${FLAT} SCIENCE STANDARD RESCALE # BIAS AND FLAT
#./create_binnedmosaics.sh ${SUBARUDIR}/${run}_${filter} STANDARD SUPA OCF 8 -32
#./maskBadOverscans.py ${SUBARUDIR}/${run}_${filter} STANDARD SUPA
#./maskAutotracker.py ${SUBARUDIR}/${run}_${filter} STANDARD
#./parallel_manager.sh ./create_illumfringe_stars_para.sh ${SUBARUDIR}/${run}_${filter} STANDARD ${SKYBACK}
#if [ ${FRINGE} == "NOFRINGE" ]; then
# ./parallel_manager.sh ./process_science_illum_eclipse_para.sh ${SUBARUDIR}/${run}_${filter} STANDARD RESCALE ILLUM ${SKYBACK}
#else
# ./parallel_manager.sh ./process_science_illum_fringe_eclipse_para.sh ${SUBARUDIR}/${run}_${filter} STANDARD RESCALE ${SKYBACK}
#fi
#./create_binnedmosaics.sh ${SUBARUDIR}/${run}_${filter} STANDARD SUPA ${ending} 8 -32
# ./parallel_manager.sh ./create_weights_flags_para.sh ${SUBARUDIR}/${run}_${filter} STANDARD ${ending} WEIGHTS_FLAGS
# the three-second calibrating routine
#if [ ${STANDARDTYPE} -eq '3SEC']; then
#print "python retrieve_sdss.py ${SUBARUDIR}/${run}_${filter}/STANDARD/${IMGSDSS}_1OCFS.fits ${IMGSDSS}.cat"
#./parallel_manager.sh ./create_astrom_3SEC_para.sh ${SUBARUDIR}/${run}_${filter} STANDARD ${ending} ${IMGSDSS}.cat ${IMGSDSS}
<<<<<<< do_Subaru_0025_V.sh
./create_abs_photo_3SEC.sh ${SUBARUDIR}/${run}_${filter} STANDARD SCIENCE \
${ending} ${filter} ${photfilter} ${photcolor} ${EXTCOEFF} ${COLCOEFF} ${IMGTOCAL}
=======
# ./create_abs_photo_3SEC.sh ${SUBARUDIR}/${run}_${filter} STANDARD SCIENCE \
# ${ending} ${filter} ${photfilter} ${photcolor} ${EXTCOEFF} ${COLCOEFF} ${IMGTOCAL}
>>>>>>> 1.3
./parallel_manager.sh ./create_astrom_3SEC_target_para.sh ${SUBARUDIR}/${run}_${filter} STANDARD ${ending} default ${IMGTOCAL}
exit 0
./create_zp_correct_header.sh ${SUBARUDIR}/${run}_${filter} SCIENCE ${ending}
# now work on 3-second exposure on MACS field
./parallel_manager.sh ./create_astrom_obs_para.sh ${SUBARUDIR}/${run}_${filter} STANDARD ${ending} default
./mk_standard_list.sh
#create a standard star list
./create_abs_photo_info.sh ${SUBARUDIR}/${run}_${filter} STANDARD SCIENCE \
${ending} ${filter} ${photfilter} ${photcolor} ${EXTCOEFF} ${COLCOEFF}
#./create_zp_correct_header.sh ${SUBARUDIR}/${run}_${filter} SCIENCE ${ending}
#fi
fi
exit 0
|
deapplegate/wtgpipeline
|
non_essentials/preprocess_scripts/do_Subaru_0025_V.sh
|
Shell
|
mit
| 5,749 |
#!/usr/bin/env bash
set -x
set -e
# determine the script path
# ref: http://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself
pushd `dirname $0` > /dev/null
SCRIPTPATH=`pwd -P`
popd > /dev/null
# get the browser version string
TARGET_BROWSER=`$SCRIPTPATH/node_modules/.bin/browser-version $BROWSER $BVER`
TARGET_URL=`echo $TARGET_BROWSER | cut -d'|' -f4`
TARGET_VERSION=`echo $TARGET_BROWSER | cut -d'|' -f3`
TARGET_PATH=$SCRIPTPATH/browsers/$BROWSER/$TARGET_VERSION
# make the local bin directory and include it in the path
BINPATH=./browsers/bin
mkdir -p $BINPATH
# install if required
if [ ! -d $TARGET_PATH ]; then
echo "getting $BROWSER $TARGET_VERSION"
source $SCRIPTPATH/install-$BROWSER.sh "$TARGET_URL" "$TARGET_PATH"
fi
# create the symbolic links
case $BROWSER in
chrome)
ln -sf $TARGET_PATH/chrome $BINPATH/chrome-$BVER
$BINPATH/chrome-$BVER --version
;;
firefox)
ln -sf $TARGET_PATH/firefox $BINPATH/firefox-$BVER
$BINPATH/firefox-$BVER --version
;;
esac
|
hanumesh/video-web
|
node_modules/travis-multirunner/setup.sh
|
Shell
|
mit
| 1,053 |
#!/bin/bash
#
#SBATCH --job-name=matrix_mult
#SBATCH --output=res_matrix_mult.out
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=8
#SBATCH --time=10:00
#SBATCH --mem-per-cpu=100
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
./matrix_mult
|
leiverandres/HPC_assignments
|
openmp/matrix_mult.sh
|
Shell
|
mit
| 234 |
#!/bin/bash
set -eux
apt-get install -y ntp
service ntp stop
|
ggiamarchi/rpi-debian-builder
|
modules/basic/provision/scripts/10-ntp.sh
|
Shell
|
mit
| 63 |
#!/bin/bash
# This program and the accompanying materials are made available under the
# terms of the MIT license (X11 license) which accompanies this distribution.
# author: M. Tasić, C. Bürger
# Array of CJava source files ordered w.r.t. their compilation dependencies:
declare -a cjava_racr_sources=(
exception-api
ast
lexer
parser
unparser
lexer-cl
parser-cl
support-api
name-analysis
compositions
well-formedness
main)
# Store current working directory:
old_pwd=`pwd`
if which javac
then
echo "=========================================>>> Install CJava for Java:"
cd cjava-jastadd
sh build.bash
cd $old_pwd
fi
cd $old_pwd
if which plt-r6rs
then
echo "=========================================>>> Install CJava for Racket:"
cd cjava-racr
rm -rf racket-bin
mkdir racket-bin
for f in ${cjava_racr_sources[@]}
do
plt-r6rs ++path ./../../racr/racr/racket-bin --install --collections ./racket-bin $f.scm
done
fi
cd $old_pwd
if which larceny
then
echo "=========================================>>> Install CJava for Larceny:"
cd cjava-racr
# Delete old binaries:
rm -rf larceny-bin
mkdir larceny-bin
mkdir larceny-bin/cjava-racr
# Copy source files:
for f in *.scm
do
cp -p $f larceny-bin/cjava-racr/${f%.*}.sls
done
# Create compile script
cd larceny-bin
cd cjava-racr
echo "#!r6rs" > compile-stale
echo "(import (larceny compiler))" >> compile-stale
echo "(compile-stale-libraries)" >> compile-stale
# Execute compile script: #TODO: Fix relative path to RACR binaries when merged into RACR git repository!
larceny --r6rs --path "./../../../../racr/racr/larceny-bin:./.." --program compile-stale
# Delete compile script:
rm compile-stale
fi
cd $old_pwd
|
christoff-buerger/racr-boneyard
|
invasive-composition/refactored-case-study-by-Christoff-Bürger/install.sh
|
Shell
|
mit
| 1,728 |
#!/bin/bash
#Author : Hemanth H.M
#Licence : GNU GPLv3
# Usage
show_help(){
echo "Usage is $0 a|m|n|c|h"
echo "-a or --all to plot cpu(c),mem(m) and net(n)"
}
# Make directory to store the results
setdir(){
mkdir -p zip_Stats
cd zip_Stats
}
# Use dstat to get the data set
gendata(){
echo "Collecting stats for sec with an interval of 60sec"
dstat -tmnc 60 > dstat.dat&
[ "$?" -ne 0 ] && echo "Please check if you have installed dstat" && exit 1
sleep 7200s
exec 2>/dev/null
kill $! >/dev/null 2>&1
#save header
head -2 dstat.dat > zip_dstat.dat
#Remove the headers
sed '1,2d' dstat.dat > body.dat
#left file
awk -F '|' '{print $1 "|" $2 "|" $3 "|"}' body.dat > left.dat
awk -F '|' '{print $4}' body.dat > tmp_right.dat
awk -F ' ' '{printf ("%3d %3d %3d %3d %3d %3d\n" ,$1*2 ,$2/2 ,100-$1*2-$2/2 ,$4 ,$5 , $6) }' tmp_right.dat > right.dat
awk 'NR==FNR{a[NR]=$0}NR>FNR{printf("%s%s\n", a[FNR],$0)}' left.dat right.dat >> zip_dstat.dat
awk -F ' ' 'BEGIN {i=0} {printf("%d %d %d %d\n",i ,$1, $2, $3)} {i=i+60}' right.dat > plot_stat.dat
}
#############################################
#MAIN BLOCK
#############################################
# Use GNU plot to plot the graph
graph (){
gnuplot << EOF
set terminal $fileType
set output $output
set title $title
set xlabel $xlabel
set xtics 0,60,7200
#set xdata time
set ylabel $ylabel
#set timefmt "%d-%m %H:%M:%S"
#set format x "%H:%M"
plot ${plot[*]}
EOF
}
# Plot CPU usage
plotcpu(){
fileType="png"
output='"cpu.png"'
title='"cpu-usage"'
xlabel='"time"'
ylabel='"percent"'
# Using an arry presrving the '"quotes"' is very much nessary
plot=( '"plot_stat.dat"' using 1:3 title '"system"' w lines ls 1 ,'"plot_stat.dat"' using 1:2 title '"user"' w lines ls 4 ,'"plot_stat.dat"' using 1:4 title '"idle"' w lines ls 5 )
graph
}
# Plot memory usage
plotmem(){
fileType="png"
output='"memory.png"'
title='"memory-usage"'
xlabel='"time"'
ylabel='"size(Mb)"'
plot=( '"stat.dat"' using 1:8 title '"used"' with lines,'"stat.dat"' using 1:9 title '"buff"' with lines, '"stat.dat"' using 1:10 title '"cach"' with lines,'"stat.dat"' using 1:11 title '"free"' with lines )
graph "png" '"memo.png"' '"cpu-usage"' '"time"' '"percent"' $plot
}
# Plot network usage
plotnet(){
fileType="png"
output='"network.png"'
title='"network-usage"'
xlabel='"time"'
ylabel='"size(k)"'
plot=( '"stat.dat"' using 1:11 title '"sent"' with lines,'"stat.dat"' using 1:12 title '"recvd"' with lines )
graph
}
# Clean up all the collected stats
clean(){
echo "Cleaning"
cd Stats
rm -r *.dat
echo "Done!"
}
#Check for the first input if it's zero show help
[ -z $1 ] && show_help && exit 1;
# Set dir and gen data
setdir
gendata
#wait
#clean
# Loop for different options
while [[ $1 == -* ]]; do
case "$1" in
-h|--help|-\?) show_help; exit 0;;
-a|--all) plotcpu ; plotmem ; plotnet ; exit 0;;
-m|--mem) plotmem ; exit 0 ;;
-n|--net) plotnet ; exit 0 ;;
-c|--cpu) plotcpu ; exit 0 ;;
--) shift; break;;
-*) echo "invalid option: $1"; show_help; exit 1;;
esac
done
|
CharlesZhong/Mobile-Celluar-Measure
|
bin/zip_stat.sh
|
Shell
|
mit
| 3,007 |
#!/bin/bash
cat >/app/config.js << EOF
'use strict';
angular.module('config', [])
.constant('config', {
'endpoint' : "/api",
'provider' : "$PROVIDER",
'client_id' : "$CLIENT_ID",
'gitlab_url' : "$GITLAB_URL",
'colors' : {}
});
EOF
|
satterly/docker-config
|
alerta-snmp/config.js.sh
|
Shell
|
mit
| 272 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2890-2
#
# Security announcement date: 2016-02-01 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:09 UTC
#
# Operating System: Ubuntu 14.04 LTS
# Architecture: i686
#
# Vulnerable packages fix on version:
# - linux-image-4.2.0-27-lowlatency:4.2.0-27.32~14.04.1
# - linux-image-4.2.0-27-generic-lpae:4.2.0-27.32~14.04.1
# - linux-image-4.2.0-27-powerpc-e500mc:4.2.0-27.32~14.04.1
# - linux-image-4.2.0-27-powerpc64-emb:4.2.0-27.32~14.04.1
# - linux-image-4.2.0-27-powerpc-smp:4.2.0-27.32~14.04.1
# - linux-image-4.2.0-27-powerpc64-smp:4.2.0-27.32~14.04.1
# - linux-image-4.2.0-27-generic:4.2.0-27.32~14.04.1
#
# Last versions recommanded by security team:
# - linux-image-4.2.0-27-lowlatency:4.2.0-27.32~14.04.1
# - linux-image-4.2.0-27-generic-lpae:4.2.0-27.32~14.04.1
# - linux-image-4.2.0-27-powerpc-e500mc:4.2.0-27.32~14.04.1
# - linux-image-4.2.0-27-powerpc64-emb:4.2.0-27.32~14.04.1
# - linux-image-4.2.0-27-powerpc-smp:4.2.0-27.32~14.04.1
# - linux-image-4.2.0-27-powerpc64-smp:4.2.0-27.32~14.04.1
# - linux-image-4.2.0-27-generic:4.2.0-27.32~14.04.1
#
# CVE List:
# - CVE-2013-7446
# - CVE-2015-7513
# - CVE-2015-7550
# - CVE-2015-7990
# - CVE-2015-8374
# - CVE-2015-8543
# - CVE-2015-8569
# - CVE-2015-8575
# - CVE-2015-8787
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade linux-image-4.2.0-27-lowlatency=4.2.0-27.32~14.04.1 -y
sudo apt-get install --only-upgrade linux-image-4.2.0-27-generic-lpae=4.2.0-27.32~14.04.1 -y
sudo apt-get install --only-upgrade linux-image-4.2.0-27-powerpc-e500mc=4.2.0-27.32~14.04.1 -y
sudo apt-get install --only-upgrade linux-image-4.2.0-27-powerpc64-emb=4.2.0-27.32~14.04.1 -y
sudo apt-get install --only-upgrade linux-image-4.2.0-27-powerpc-smp=4.2.0-27.32~14.04.1 -y
sudo apt-get install --only-upgrade linux-image-4.2.0-27-powerpc64-smp=4.2.0-27.32~14.04.1 -y
sudo apt-get install --only-upgrade linux-image-4.2.0-27-generic=4.2.0-27.32~14.04.1 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_14.04_LTS/i686/2016/USN-2890-2.sh
|
Shell
|
mit
| 2,130 |
#!/bin/bash
set -e
bundle exec rake db:migrate
echo "postdeploy.sh done"
|
themarshallproject/pony
|
postdeploy.sh
|
Shell
|
mit
| 73 |
#!/bin/sh
# (c) Copyright 2009 - 2010 Xilinx, Inc. All rights reserved.
#
# This file contains confidential and proprietary information
# of Xilinx, Inc. and is protected under U.S. and
# international copyright and other intellectual property
# laws.
#
# DISCLAIMER
# This disclaimer is not a license and does not grant any
# rights to the materials distributed herewith. Except as
# otherwise provided in a valid license issued to you by
# Xilinx, and to the maximum extent permitted by applicable
# law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND
# WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES
# AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING
# BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON-
# INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and
# (2) Xilinx shall not be liable (whether in contract or tort,
# including negligence, or under any other theory of
# liability) for any loss or damage of any kind or nature
# related to, arising under or in connection with these
# materials, including for any direct, or any indirect,
# special, incidental, or consequential loss or damage
# (including loss of data, profits, goodwill, or any type of
# loss or damage suffered as a result of any action brought
# by a third party) even if such damage or loss was
# reasonably foreseeable or Xilinx had been advised of the
# possibility of the same.
#
# CRITICAL APPLICATIONS
# Xilinx products are not designed or intended to be fail-
# safe, or for use in any application requiring fail-safe
# performance, such as life-support or safety devices or
# systems, Class III medical devices, nuclear facilities,
# applications related to the deployment of airbags, or any
# other applications that could lead to death, personal
# injury, or severe property or environmental damage
# (individually and collectively, "Critical
# Applications"). Customer assumes the sole risk and
# liability of any use of Xilinx products in Critical
# Applications, subject only to applicable laws and
# regulations governing limitations on product liability.
#
# THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS
# PART OF THIS FILE AT ALL TIMES.
#-----------------------------------------------------------------------------
# Script to synthesize and implement the Coregen FIFO Generator
#-----------------------------------------------------------------------------
rm -rf results
mkdir results
cd results
cp ../../../fifo_138x512.ngc .
planAhead -mode batch -source ../planAhead_rdn.tcl
|
kevintownsend/R3
|
coregen/fifo_138x512/implement/planAhead_rdn.sh
|
Shell
|
mit
| 2,529 |
#!/bin/bash
STATUSFILE=$TMPDIR/sleepwatcher-lync-status.txt
function _syslog-sleepwatcher() {
syslog -s -k Facility \
-k Sender com.nilswinkler.sleepwatcher-lync-status \
-k Level notice \
-k Message "Lync Status: $1"
}
function _lync-status() {
osascript 2>/dev/null <<EOF
if application "Microsoft Lync" is running then
-- Save your current application
tell application "System Events"
set currentApp to name of 1st process whose frontmost is true
end tell
-- Bring Lync to the front so we can use the menu
tell application "Microsoft Lync"
activate
end tell
-- Set your status to the provided parameter
tell application "System Events"
tell process "Microsoft Lync"
tell menu bar 1
tell menu bar item "Status"
tell menu "Status"
click menu item "$1"
end tell
end tell
end tell
end tell
end tell
-- Return to your previous application
tell application currentApp
activate
end tell
end if
EOF
_syslog-sleepwatcher "$1"
}
function _lync-store-status() {
osascript 2>/dev/null <<EOF
if application "Microsoft Lync" is running then
-- Save your current application
tell application "System Events"
set currentApp to name of 1st process whose frontmost is true
end tell
-- Bring Lync to the front so we can use the menu
tell application "Microsoft Lync"
activate
end tell
-- Store the status
tell application "System Events"
tell process "Microsoft Lync"
set statusMenu to menu bar item "Status" of menu bar 1
set allUiElements to entire contents of statusMenu
repeat with anElement in allUiElements
try
set checked to value of attribute "AXMenuItemMarkChar" of anElement
if checked is "✓" then
log checked
set menuItemName to name of anElement
log menuItemName
do shell script "echo " & quoted form of menuItemName & " > $STATUSFILE"
exit repeat
end if
end try
end repeat
end tell
end tell
-- Return to your previous application
tell application currentApp
activate
end tell
end if
EOF
}
function _lync-restore-status() {
local status="Reset Status"
if [ -f "$STATUSFILE" ]; then
status=$(cat $STATUSFILE)
rm $STATUSFILE
fi
_lync-status "$status"
}
|
nwinkler/sleepwatcher-lync-status
|
_lync-status.sh
|
Shell
|
mit
| 2,494 |
#!/bin/bash
# This script sets up an openaps environment to work with loop.sh,
# by defining the required devices, reports, and aliases.
#
# Released under MIT license. See the accompanying LICENSE.txt file for
# full terms and conditions
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
die() {
echo "$@"
exit 1
}
if [[ $# -lt 1 ]]; then
openaps device show pump 2>/dev/null >/dev/null || die "Usage: setup.sh <pump serial #> [max_iob] [nightscout_url]"
fi
serial=$1
( ( cd ~/openaps-dev 2>/dev/null && git status ) || ( cd && openaps init openaps-dev ) ) || die "Can't init openaps-dev"
cd ~/openaps-dev || die "Can't cd openaps-dev"
if [[ $# -lt 2 ]]; then
max_iob=0
else
max_iob=$2
fi
( ! grep -q max_iob max_iob.json 2>/dev/null || [[ $max_iob != "0" ]] ) && echo "{ \"max_iob\": $max_iob }" > max_iob.json
cat max_iob.json
git add max_iob.json
if [[ $# -gt 2 ]]; then
nightscout_url=$3
fi
if [[ $# -gt 3 ]]; then
azure_url=$4
fi
sudo cp ~/oref0/logrotate.openaps /etc/logrotate.d/openaps
sudo cp ~/oref0/logrotate.rsyslog /etc/logrotate.d/rsyslog
# don't re-create devices if they already exist
openaps device show 2>/dev/null > /tmp/openaps-devices
# add devices
grep -q pump.ini .gitignore 2>/dev/null || echo pump.ini >> .gitignore
git add .gitignore
grep pump /tmp/openaps-devices || openaps device add pump medtronic $serial || die "Can't add pump"
grep cgm /tmp/openaps-devices || openaps device add cgm dexcom || die "Can't add CGM"
git add cgm.ini
grep oref0 /tmp/openaps-devices || openaps device add oref0 process oref0 || die "Can't add oref0"
git add oref0.ini
grep iob /tmp/openaps-devices || openaps device add iob process --require "pumphistory profile clock" oref0 calculate-iob || die "Can't add iob"
git add iob.ini
grep get-profile /tmp/openaps-devices || openaps device add get-profile process --require "settings bg_targets isf basal_profile max_iob" oref0 get-profile || die "Can't add get-profile"
git add get-profile.ini
grep determine-basal /tmp/openaps-devices || openaps device add determine-basal process --require "iob temp_basal glucose profile" oref0 determine-basal || die "Can't add determine-basal"
git add determine-basal.ini
grep pebble /tmp/openaps-devices || openaps device add pebble process --require "glucose iob basal_profile temp_basal suggested enacted" oref0 pebble || die "Can't add pebble"
git add pebble.ini
grep ns-upload /tmp/openaps-devices || openaps device add ns-upload process --require "pumphistory" ns-upload-entries || die "Can't add ns-upload"
git add ns-upload.ini
grep azure-upload /tmp/openaps-devices || openaps device add azure-upload process --require "iob enactedBasal bgreading webapi" oref0 sendtempbasal-Azure || die "Can't add sendtempbasal-Azure"
git add azure-upload.ini
# don't re-create reports if they already exist
openaps report show 2>/dev/null > /tmp/openaps-reports
# add reports for frequently-refreshed monitoring data
ls monitor 2>/dev/null >/dev/null || mkdir monitor || die "Can't mkdir monitor"
grep monitor/glucose.json /tmp/openaps-reports || openaps report add monitor/glucose.json JSON cgm iter_glucose 5 || die "Can't add glucose.json"
grep model.json /tmp/openaps-reports || openaps report add model.json JSON pump model || die "Can't add model"
grep monitor/clock.json /tmp/openaps-reports || openaps report add monitor/clock.json JSON pump read_clock || die "Can't add clock.json"
grep monitor/temp_basal.json /tmp/openaps-reports || openaps report add monitor/temp_basal.json JSON pump read_temp_basal || die "Can't add temp_basal.json"
grep monitor/reservoir.json /tmp/openaps-reports || openaps report add monitor/reservoir.json JSON pump reservoir || die "Can't add reservoir.json"
grep monitor/pumphistory.json /tmp/openaps-reports || openaps report add monitor/pumphistory.json JSON pump iter_pump_hours 4 || die "Can't add pumphistory.json"
grep monitor/iob.json /tmp/openaps-reports || openaps report add monitor/iob.json text iob shell monitor/pumphistory.json settings/profile.json monitor/clock.json || die "Can't add iob.json"
# add reports for infrequently-refreshed settings data
ls settings 2>/dev/null >/dev/null || mkdir settings || die "Can't mkdir settings"
grep settings/bg_targets.json /tmp/openaps-reports || openaps report add settings/bg_targets.json JSON pump read_bg_targets || die "Can't add bg_targets.json"
grep settings/insulin_sensitivies.json /tmp/openaps-reports || openaps report add settings/insulin_sensitivies.json JSON pump read_insulin_sensitivies || die "Can't add insulin_sensitivies.json"
grep settings/basal_profile.json /tmp/openaps-reports || openaps report add settings/basal_profile.json JSON pump read_selected_basal_profile || die "Can't add basal_profile.json"
grep settings/settings.json /tmp/openaps-reports || openaps report add settings/settings.json JSON pump read_settings || die "Can't add settings.json"
grep settings/profile.json /tmp/openaps-reports || openaps report add settings/profile.json text get-profile shell settings/settings.json settings/bg_targets.json settings/insulin_sensitivies.json settings/basal_profile.json max_iob.json || die "Can't add profile.json"
# add suggest and enact reports
ls enact 2>/dev/null >/dev/null || mkdir enact || die "Can't mkdir enact"
grep enact/suggested.json /tmp/openaps-reports || openaps report add enact/suggested.json text determine-basal shell monitor/iob.json monitor/temp_basal.json monitor/glucose.json settings/profile.json || die "Can't add suggested.json"
grep enact/enacted.json /tmp/openaps-reports || openaps report add enact/enacted.json JSON pump set_temp_basal enact/suggested.json || die "Can't add enacted.json"
# upload results
ls upload 2>/dev/null >/dev/null || mkdir upload || die "Can't mkdir upload"
grep upload/pebble.json /tmp/openaps-reports || openaps report add upload/pebble.json text pebble shell monitor/glucose.json monitor/iob.json settings/basal_profile.json monitor/temp_basal.json enact/suggested.json enact/enacted.json || die "Can't add oref0.json"
#grep upload/azure-upload.json /tmp/openaps-reports || openaps report add upload/azure-upload.json text azure-upload shell monitor/iob.json enact/enacted.json monitor/glucose.json $azure_url || die "Can't add azure-upload.json"
# don't re-create aliases if they already exist
openaps alias show 2>/dev/null > /tmp/openaps-aliases
# add aliases
grep ^invoke /tmp/openaps-aliases || openaps alias add invoke "report invoke" || die "Can't add invoke"
grep ^preflight /tmp/openaps-aliases || openaps alias add preflight '! bash -c "rm -f model.json && openaps report invoke model.json && test -n $(json -f model.json) && echo \"PREFLIGHT OK\" || ( mm-stick warmup fail \"NO PUMP MODEL RESPONDED\" || mm-stick fail \"NO MEDTRONIC CARELINK STICK AVAILABLE\"; sudo oref0-reset-usb)"' || die "Can't add preflight"
grep ^monitor-cgm /tmp/openaps-aliases || openaps alias add monitor-cgm "report invoke monitor/glucose.json" || die "Can't add monitor-cgm"
grep ^monitor-pump /tmp/openaps-aliases || openaps alias add monitor-pump "report invoke monitor/clock.json monitor/temp_basal.json monitor/pumphistory.json monitor/iob.json" || die "Can't add monitor-pump"
grep ^get-settings /tmp/openaps-aliases || openaps alias add get-settings "report invoke settings/bg_targets.json settings/insulin_sensitivies.json settings/basal_profile.json settings/settings.json settings/profile.json" || die "Can't add get-settings"
if [ $nightscout_url ]; then
grep upload/ns-upload.json /tmp/openaps-reports || openaps report add upload/ns-upload.json text ns-upload shell monitor/pumphistory.json $nightscout_url || die "Can't add ns-upload.json"
grep ^ns-upload /tmp/openaps-aliases || openaps alias add ns-upload "report invoke upload/ns-upload.json" || die "Can't add ns-upload"
sgv_url=$nightscout_url/api/v1/entries.json?type=sgv
grep ns-glucose /tmp/openaps-devices || openaps device add ns-glucose process --require nightscout_url "bash -c \"curl -s $sgv_url | json -e 'this.glucose = this.sgv'\"" || die "Can't add ns-glucose"
git add ns-glucose.ini
grep ns-glucose.json /tmp/openaps-reports || openaps report add monitor/ns-glucose.json text ns-glucose shell $sgv_url || die "Can't add ns-glucose.json"
grep ^get-ns-glucose /tmp/openaps-aliases || openaps alias add get-ns-glucose "report invoke monitor/ns-glucose.json" || die "Can't add get-ns-glucose"
grep ^get-bg /tmp/openaps-aliases || openaps alias add get-bg '! bash -c "openaps monitor-cgm 2>/dev/null || ( openaps get-ns-glucose && mv monitor/ns-glucose.json monitor/glucose.json )"'
else
grep ^get-bg /tmp/openaps-aliases || openaps alias add get-bg "monitor-cgm"
fi
grep ^gather /tmp/openaps-aliases || openaps alias add gather '! bash -c "rm monitor/*; openaps get-bg && openaps monitor-pump && openaps get-settings"' || die "Can't add gather"
openaps alias add wait-for-bg '! bash -c "cp monitor/glucose.json monitor/last-glucose.json; while(diff -q monitor/last-glucose.json monitor/glucose.json); do echo -n .; openaps get-bg >/dev/null; sleep 10; done"'
grep ^enact /tmp/openaps-aliases || openaps alias add enact '! bash -c "rm enact/suggested.json; openaps invoke enact/suggested.json && cat enact/suggested.json && grep -q duration enact/suggested.json && ( openaps invoke enact/enacted.json && cat enact/enacted.json ) || echo No action required"' || die "Can't add enact"
grep ^wait-loop /tmp/openaps-aliases || openaps alias add wait-loop '! bash -c "openaps preflight && openaps gather && openaps wait-for-bg && openaps enact"' || die "Can't add wait-loop"
grep ^loop /tmp/openaps-aliases || openaps alias add loop '! bash -c "openaps preflight && openaps gather && openaps enact"' || die "Can't add loop"
grep ^pebble /tmp/openaps-aliases || openaps alias add pebble "report invoke upload/pebble.json" || die "Can't add pebble"
#grep ^azure-upload /tmp/openaps-aliases || openaps alias add azure-upload "report invoke upload/azure-upload.json" || die "Can't add azure-upload"
#grep ^upload /tmp/openaps-aliases || openaps alias add upload '! bash -c "openaps pebble; openaps ns-upload; openaps azure-upload"' || die "Can't add upload"
grep ^upload /tmp/openaps-aliases || openaps alias add upload '! bash -c "openaps pebble; openaps ns-upload"' || die "Can't add upload"
grep ^retry-loop /tmp/openaps-aliases || openaps alias add retry-loop '! bash -c "openaps wait-loop || until( ! mm-stick warmup || openaps loop); do sleep 10; done; openaps upload && openaps monitor-pump && openaps upload"' || die "Can't add retry-loop"
|
scottleibrand/openaps-sh
|
setup.sh
|
Shell
|
mit
| 10,969 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2013:0821
#
# Security announcement date: 2013-05-14 20:12:38 UTC
# Script generation date: 2017-02-02 21:21:30 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - thunderbird.x86_64:17.0.6-2.el6_4
# - thunderbird-debuginfo.x86_64:17.0.6-2.el6_4
#
# Last versions recommanded by security team:
# - thunderbird.x86_64:45.7.0-1.el6_8
# - thunderbird-debuginfo.x86_64:45.7.0-1.el6_8
#
# CVE List:
# - CVE-2013-0801
# - CVE-2013-1670
# - CVE-2013-1674
# - CVE-2013-1675
# - CVE-2013-1676
# - CVE-2013-1677
# - CVE-2013-1678
# - CVE-2013-1679
# - CVE-2013-1680
# - CVE-2013-1681
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install thunderbird.x86_64-45.7.0 -y
sudo yum install thunderbird-debuginfo.x86_64-45.7.0 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2013/RHSA-2013:0821.sh
|
Shell
|
mit
| 954 |
#!/bin/bash
rake build
sudo rvm all do gem install --local pkg/pidx-0.0.1.gem
|
xudeheng/pidx
|
installer.sh
|
Shell
|
mit
| 77 |
#!/usr/bin/env bash
source ~/.rvm/scripts/rvm
## Useage
## verbose_tester.sh ruby ruby2 ruby3
for ruby in $@
do
cd ./rails
rvm $ruby rake test
echo
cd ..
done
|
tsykoduk/Rails-Bugmasher
|
testers/verbose_tester.sh
|
Shell
|
mit
| 170 |
#!/bin/bash
#Do not edit. File automatically generated
#SBATCH --ntasks=128
#SBATCH --nodes=8
#SBATCH -A EvolvingAI
#SBATCH -t 120:00:00 # run time (hh:mm:ss) - 48 hours
#SBATCH [email protected]
#SBATCH --mail-type=begin # email me when the job starts
#SBATCH --mail-type=end # email me when the job finishes
module load gnu/4.8.3
module load openmpi/1.8.4
seed_number="${1}"
experimentDir="${2}"
export OMPI_MCA_orte_tmpdir_base=/gscratch/EvolvingAI/anguyen8/log/$SLURM_JOBID
export TEST_TMPDIR=/gscratch/EvolvingAI/anguyen8/log/$SLURM_JOBID
export TMPDIR=/gscratch/EvolvingAI/anguyen8/log/$SLURM_JOBID
export TMP=/gscratch/EvolvingAI/anguyen8/log/$SLURM_JOBID
mkdir /gscratch/EvolvingAI/anguyen8/log/$SLURM_JOBID
echo "Changing to directory: " $experimentDir
cd $experimentDir
echo " /project/EvolvingAI/anguyen8/x/all_layers/build/default/exp/images/images_sine $seed_num > thisistheoutput 2> err.log" > runToBe
srun /project/EvolvingAI/anguyen8/x/all_layers/build/default/exp/images/images_sine $seed_num > output.log 2> err.log
rm -rf /gscratch/EvolvingAI/anguyen8/log/$SLURM_JOBID
echo "Done with run"
|
Evolving-AI-Lab/innovation-engine
|
sferes/launchScript.sh
|
Shell
|
mit
| 1,151 |
#!/usr/bin/env bash
echo "Start HDFS and Yarn"
# get the script path http://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself
pushd `dirname $0` > /dev/null
SCRIPTPATH=`pwd -P`
popd > /dev/null
ORIGINAL_WD=${PWD}
cd ${SCRIPTPATH}
source ../common.sh
${HADOOP_HOME}/sbin/start-dfs.sh
${HADOOP_HOME}/sbin/start-yarn.sh
echo "Finish start hdfs"
cd ${ORIGINAL_WD}
|
at15/hadoop-spark-perf
|
provision/single/hadoop/start.sh
|
Shell
|
mit
| 414 |
#!/bin/bash
# script for awk to compute N50 for various contig split thresholds (ie number of consecutive N's)
# also will extend to do the same set of extensions for those records >= 1000 and >= 500
# 03Sep2016 changed to check if input file is a fasta file (by checking if first character is a ">")
# and if it is then calls scaffstruct_ex.py -ALL to create a file with Nbreaks extension
# that then is the input file. If input <fname> is fasta and <fname>.Nbreaks exists use this
# without creating it again
# 24Sep2016 start adding a feature to pass an genome size in so can calculate NG50
# along with regular N50
#
#input is file with each line referring to a scaffold with number of actg consec chars followed by number of N's etc.
# e.g. 1256 23N 4566 12N 233 100N 586
# above would have 2 contigs for C25N split, 3 for C20N split, 4 contigs for C10N split, and other N split arrays
# input file is created from an assembly fasta file by scaffstruct_ex.py -ALL <asm_fasta_file>
# arrays holding contigs are named C_1N, C_5N, C_10N, C_15N, C_20N, C_25N and C_ALL
# we will call this awk script for all scaffolds those scaffs >= 1000, >= 500, >= 300
declare -a arr=(0 1000 500 300)
NbreakFile=$1
first_ch=$(head -1 $1 | cut -c1)
if [ $first_ch == ">" ]; then
NbreakFile=$1.Nbreaks
if [ -e $NbreakFile ]; then
>&2 echo Using $NbreakFile
else
>&2 echo Creating $NbreakFile
scaffstruct_ex.py -ALL $1 > $NbreakFile
fi
fi
genome_size=0
if [[ ! -z $2 && $2 > 0 ]]; then
genome_size=$2
fi
for Scaffold_Cutoff_Val in "${arr[@]}"
do
awk -v Scaffold_Cutoff="$Scaffold_Cutoff_Val" -v genome_size="$genome_size" '
BEGIN { FS = " "
prefix = " "; szmsg = ""
if (Scaffold_Cutoff == 0) {
if (genome_size > 0)
szmsg = ". Genome size " genome_size " bp"
print "All Scaffolds" szmsg
} else {
print "Scaffolds " Scaffold_Cutoff " bp or greater "
genome_size = 0
}
}
{ # each line represents a scaffold and its runs of actg and Ns
scaf_sz = 0; for(f=1;f<=NF;f++){ scaf_sz += int($f) }
if (scaf_sz >= Scaffold_Cutoff) {
tot_scaf_sz += scaf_sz
C_ALL[0] = 0; C_25N[0] = 0; C_20N[0] = 0; C_15N[0] = 0; C_10N[0] = 0; C_5N[0] = 0; C_1N[0] = 0; # kludge so can pass array into function
addContigs(C_ALL, 0)
tot_25N_sz += addContigs(C_25N, 25)
tot_20N_sz += addContigs(C_20N, 20)
tot_15N_sz += addContigs(C_15N, 15)
tot_10N_sz += addContigs(C_10N, 10)
tot_5N_sz += addContigs(C_5N, 5)
tot_1N_sz += addContigs(C_1N, 1)
}
}
#NR >= 200000 { exit } # this is easy way to limit number of records for testing purposes
END {
delete C_ALL[0]; delete C_25N[0]; delete C_20N[0]; delete C_15N[0]; delete C_10N[0]; delete C_5N[0]; delete C_1N[0]; # remove kludge value put in to start things off
asort(C_ALL); asort(C_25N); asort(C_20N); asort(C_15N); asort(C_10N); asort(C_5N); asort(C_1N)
Calc_N50_set(tot_scaf_sz)
}
function addContigs(C_Ary, splitAt) {
if (splitAt == 0) { # no splits
C_Ary[length(C_Ary)] = scaf_sz # append size of entire scaffold
return scaf_sz
}
scaffold_contig_len = 0
cur_contig_len = 0 # length of current contig, acgt runs with N runs less than splitAt length
for (f=1; f<=NF; f++) {
cur_contig_len += int( $f ) # odd numbered fields always actg runs
f++
if (f < NF) { # if more fields, then there is an N run after this actg run
num_Ns = int( $f )
if (num_Ns >= splitAt) {
C_Ary[length(C_Ary)] = cur_contig_len
scaffold_contig_len += cur_contig_len
cur_contig_len = 0 # start on next contig
}
else # since Ns did not split it, consider them part of contig
cur_contig_len += num_Ns
}
}
scaffold_contig_len += cur_contig_len
C_Ary[length(C_Ary)] = cur_contig_len
return scaffold_contig_len
}
function Calc_N50_set(total_sz) { # total_sz is different when excluding smaller scaffolds
Calc_N50(C_ALL, length(C_ALL), total_sz) # N50 and L50 for scaffolds is in N50_L50_Values
Prt_Inf("Scaffold N50 ", length(C_ALL), total_sz)
Calc_N50(C_25N, length(C_25N), tot_25N_sz) # N50 and L50 for contigs split at 25Ns
Prt_Contig_Inf("25Ns", length(C_25N), tot_25N_sz)
Calc_N50(C_20N, length(C_20N), tot_20N_sz) # N50 and L50 for contigs split at 20Ns
Prt_Contig_Inf("20Ns", length(C_20N), tot_20N_sz)
Calc_N50(C_15N, length(C_15N), tot_15N_sz) # N50 and L50 for contigs split at 15Ns
Prt_Contig_Inf("15Ns", length(C_15N), tot_15N_sz)
Calc_N50(C_10N, length(C_10N), tot_10N_sz) # N50 and L50 for contigs split at 10Ns
Prt_Contig_Inf("10Ns", length(C_10N), tot_10N_sz)
Calc_N50(C_5N, length(C_5N), tot_5N_sz) # N50 and L50 for contigs split at 5Ns
Prt_Contig_Inf(" 5Ns", length(C_5N), tot_5N_sz)
Calc_N50(C_1N, length(C_1N), tot_1N_sz) # N50 and L50 for contigs split at a single N
Prt_Contig_Inf(" 1N", length(C_1N), tot_1N_sz)
}
function Calc_N50(C_Ary, lngth, tot_size) {
N50_cutoff = int( (tot_size+1) / 2 )
NG50_cutoff = int( (genome_size+1) / 2); NG50 = 0; LG50 = 0 # 24Sep2016
# print "N50 cutoff " N50_cutoff
contigs_lens_sofar = 0; num_contigs = 0; N50_contig = 0
for (i=lngth; i >= 1; i--) { # loop thru biggest contigs to smallest and stop when we are at N50_cutoff
contigs_lens_sofar += C_Ary[i]
num_contigs++
if (genome_size > 0 && contigs_lens_sofar >= NG50_cutoff && NG50==0) {
NG50 = C_Ary[i]
LG50 = num_contigs
}
if (contigs_lens_sofar >= N50_cutoff && N50_contig==0) {
N50_contig = C_Ary[i]
L50_contig = num_contigs
}
if (N50_contig > 0 && (genome_size==0 || NG50 > 0))
break
}
N50_L50_Values[1] = N50_contig; N50_L50_Values[2] = L50_contig
return N50_L50_Values[1]
}
function Prt_Contig_Inf(splitAtStr, lngth, size) {
ngmsg = ""
if (NG50 > 0)
ngmsg = " (NG50 " NG50 " LG50 " LG50 ")"
typ = "Contigs Split at " splitAtStr
print prefix typ ": N50 "N50_L50_Values[1] " L50 " N50_L50_Values[2] " out of " lngth " contigs in " size " bp" ngmsg
}
function Prt_Inf(typ, lngth, size) {
ngmsg = ""
if (NG50 > 0)
ngmsg = " (NG50 " NG50 " LG50 " LG50 ")"
print prefix typ N50_L50_Values[1] " L50 " N50_L50_Values[2] " out of " lngth " scaffolds in " size " bp" ngmsg
}
' $NbreakFile
done
|
calacademy-research/ScaffSplitN50s
|
ScaffSplitN50s.sh
|
Shell
|
mit
| 7,451 |
#!/bin/bash
MPC="/usr/bin/mpc"
if [ "$($MPC status | grep pause | cut -d ' ' -f1)" == "[paused]" ]; then
$MPC play 1>/dev/null
else
$MPC pause 1>/dev/null
fi
|
ggilestro/majordomo
|
bin/mpc_toggle.sh
|
Shell
|
mit
| 165 |
# Sets reasonable OS X defaults.
#
# Or, in other words, set shit how I like in OS X.
#
# The original idea (and a couple settings) were grabbed from:
# https://github.com/mathiasbynens/dotfiles/blob/master/.macos
#
# Run ./set-defaults.sh and you'll be good to go.
# Disable press-and-hold for keys in favor of key repeat.
defaults write -g ApplePressAndHoldEnabled -bool false
# Use AirDrop over every interface. srsly this should be a default.
defaults write com.apple.NetworkBrowser BrowseAllInterfaces 1
# Always open everything in Finder's list view. This is important.
defaults write com.apple.Finder FXPreferredViewStyle Nlsv
# Show the ~/Library folder.
chflags nohidden ~/Library
# Set a really fast key repeat.
defaults write NSGlobalDomain KeyRepeat -int 0
# Set the Finder prefs for showing a few different volumes on the Desktop.
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true
###############################################################################
# Safari & WebKit #
###############################################################################
# Privacy: don’t send search queries to Apple
defaults write com.apple.Safari UniversalSearchEnabled -bool false
defaults write com.apple.Safari SuppressSearchSuggestions -bool true
# Set up Safari for development.
defaults write com.apple.Safari IncludeInternalDebugMenu -bool true
defaults write com.apple.Safari IncludeDevelopMenu -bool true
defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true
defaults write com.apple.Safari "com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled" -bool true
defaults write NSGlobalDomain WebKitDeveloperExtras -bool true
# Set Safari’s home page to `about:blank` for faster loading
defaults write com.apple.Safari HomePage -string "about:blank"
###############################################################################
# Audio
###############################################################################
# Increase sound quality for Bluetooth headphones/headsets
defaults write com.apple.BluetoothAudioAgent "Apple Bitpool Min (editable)" -int 40
###############################################################################
# Terminal & iTerm 2 #
###############################################################################
# Only use UTF-8 in Terminal.app
defaults write com.apple.terminal StringEncodings -array 4
|
arnold-almeida/dotfiles
|
osx/set-defaults.sh
|
Shell
|
mit
| 2,630 |
#!/bin/bash
#SBATCH --partition=mono
#SBATCH --ntasks=1
#SBATCH --time=4-0:00
#SBATCH --mem-per-cpu=8000
#SBATCH -J Deep-RBM_MLP_6_inc_real_CD1_base
#SBATCH -e Deep-RBM_MLP_6_inc_real_CD1_base.err.txt
#SBATCH -o Deep-RBM_MLP_6_inc_real_CD1_base.out.txt
source /etc/profile.modules
module load gcc
module load matlab
cd ~/deepLearn && srun ./deepFunction 6 'RBM' 'MLP' '128 500 1000 1500 2000 10' '0 0 0 0 0 0' '6_inc_real' 'CD1_base' "'iteration.n_epochs', 'learning.lrate', 'learning.cd_k', 'learning.persistent_cd', 'parallel_tempering.use'" '200 1e-3 1 0 0' "'iteration.n_epochs'" '200 0'
|
aciditeam/matlab-ts
|
jobs/deepJobs_RBM_MLP_6_inc_real_CD1_base.sh
|
Shell
|
mit
| 607 |
#!/usr/bin/env bash
# installs node and yarn
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.0/install.sh | bash
source $NVM_DIR/nvm.sh
nvm install stable
curl -o- -L https://yarnpkg.com/install.sh | bash
source ~/.bashrc
|
vadimadr/generator-djdj
|
generators/app/templates/scripts/install_node.sh
|
Shell
|
mit
| 240 |
#!/bin/bash
echo "INFO: Init SSH for ${USER} user."
if ( id ${USER} ); then
echo "INFO: User ${USER} already exists"
echo "INFO: Change ${USER} password with environment:"
echo "${USER}:${PASS}"|chpasswd
else
echo "INFO: User ${USER} does not exist, we create it"
ENC_PASS=$(perl -e 'print crypt($ARGV[0], "password")' ${PASS})
groupadd --gid ${USER_UID} ${USER}
useradd -d /data -m -p ${ENC_PASS} --uid ${USER_UID} --gid ${USER_UID} -s /bin/bash ${USER}
fi
if [ ! -f /etc/ssh/ssh_host_rsa_key ]; then
echo "INFO: Host SSH keys do not exist, we create it"
ssh-keygen -A
fi
echo "INFO: Launching SSH Daemon"
exec /usr/sbin/sshd -D
|
ambroisemaupate/docker
|
light-ssh/start.sh
|
Shell
|
mit
| 671 |
# Copyright (C) 2017 by
# Thomas E. Gorochowski <[email protected]>, Voigt Lab, MIT
# All rights reserved
# Released under MIT license (see LICENSE.txt)
# Count mapped reads for each "gene" feature
BIN_PATH=../bin
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples tube_1 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples tube_2 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples tube_3 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples tube_4 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples tube_5 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples tube_6 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples tube_7 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples tube_8 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples flask_1 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples flask_2 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples flask_3 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples flask_4 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples flask_5 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples flask_6 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples flask_7 -feature gene -attribute Name -strand_opt reverse
python $BIN_PATH/count_reads.py -settings ./data/settings.txt -samples flask_8 -feature gene -attribute Name -strand_opt reverse
|
VoigtLab/MIT-BroadFoundry
|
genetic-analyzer/circuit_example/02_count_reads.sh
|
Shell
|
mit
| 2,275 |
#!/bin/bash
# Edit this if needed
BOWER=$HOME/node_modules/bower/bin/bower
#################
# Python stuff
echo "Installing Python modules."
pip3 install --user routes gunicorn ppp_core nltk ppp_questionparsing_grammatical
pip3 install --user ppp_datamodel_notation_parser ppp_oeis ppp_oracle ppp_hal
pip3 install --user ppp_natural_math ppp_french_parser
pip3 install --user git+https://github.com/WojciechMula/aspell-python.git
pip3 install --user sympy ply ppp_cas ppp_spell_checker
pip3 install --user git+https://github.com/ProjetPP/ExamplePPPModule-Python.git
pip3 install --user ppp_logger
###################
# QP ML standalone
pip3 install --user ppp_questionparsing_ml_standalone
cd Deployment/
export PPP_ML_STANDALONE_CONFIG=qp_ml_standalone_config.json
DATA_DIR=`/usr/bin/env python3 -c "print(__import__('ppp_questionparsing_ml_standalone.config').config.Config().data_dir)"`
mkdir -p $DATA_DIR
if [ ! -f $DATA_DIR/embeddings-scaled.EMBEDDING_SIZE=25.txt ]
then
wget http://metaoptimize.s3.amazonaws.com/cw-embeddings-ACL2010/embeddings-scaled.EMBEDDING_SIZE=25.txt.gz -c
gzip -d embeddings-scaled.EMBEDDING_SIZE=25.txt.gz
mv -v embeddings-scaled.EMBEDDING_SIZE=25.txt $DATA_DIR
fi
python3 -m ppp_questionparsing_ml_standalone bootstrap
cd ..
##################
# Web UI
echo "Installing WebUI."
git clone https://github.com/ProjetPP/PPP-WebUI.git
cd PPP-WebUI
$BOWER install
cd ..
cp Deployment/webui_config.js PPP-WebUI/config.js
#################
# PHP modules
for module in "Wikidata"
do
echo "Installing $module module."
git clone https://github.com/ProjetPP/PPP-${module}.git
cd PPP-${module}/
curl -sS https://getcomposer.org/installer | php
php composer.phar install
cd ..
done
|
ProjetPP/Deployment
|
bootstrap_modules.sh
|
Shell
|
cc0-1.0
| 1,745 |
#!/bin/bash
# $Id: sqlbackup.sh,v 2.0 2006/10/17 17:32:05 mistur Exp $
# ----------------------------------------------------------------------
# AlternC - Web Hosting System
# Copyright (C) 2002 by the AlternC Development Team.
# http://alternc.org/
# ----------------------------------------------------------------------
# Based on:
# Valentin Lacambre's web hosting softwares: http://altern.org/
# ----------------------------------------------------------------------
# LICENSE
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# To read the license please visit http://www.gnu.org/copyleft/gpl.html
# ----------------------------------------------------------------------
# Original Author of file: Benjamin Sonntag - 2003-03-23
# Purpose of file: MySQL Database backup shell script for AlternC
# ----------------------------------------------------------------------
# Changed by Yoann Moulin : 2006-10-16
# * Adding an other possibilty for name of the backup files which
# avoid renaming old backup files (name rotation methode)
# this methode include the date of the backup day in the name of the
# file
# Usefull for person who use rsync, rsnapshot, etc... this methode
# avoid to sync old files which just has been rename but seem diff
# for sync script
set -e
# Get mysql user and password :
. /etc/alternc/local.sh
# get the date of the day
DATE=`date +"%Y%m%d"`
# echo function, used for output wrapping when run in daemon
# mode.
# usage: print [option] <message>
# without option, print <message> in any case on the stdout
#
# options :
# error : print <message> in any case and indicate that an error message
# debug : print <message> if debug mode is active
# info : print <message> if verbose mode is active
#
# notes :
# if backup running in daemon mode, printing in log file if an otpion
# is gived to the function
print() {
# if a log level is given to the print function
# 'error', 'info' or 'debug'
log_level=""
if [ "$1" == "error" ] || [ "$1" == "info" ] || [ "$1" == "debug" ];
then
# read it and remove it for arg list
log_level="$1"
shift
fi
# if
# - No log level is specified
# - Log level equal to 'error'
# => print in any case on stdout
# => add to log file as well if $DAEMON set to 'ON'
# - Log level equal to 'debug' and $DEBUG is set to on
# - Log level equal to 'info' and $VERBOSE set to 'ON'
# => print on log file if $DAEMON set to 'ON', on stdout if not
if [ -z "$log_level" ] ||
[ "$log_level" == "error" ] ||
[ "$DEBUG" == "ON" -a "$log_level" == "debug" ] ||
[ "$log_level" == "info" -a "$VERBOSE" == "ON" ] ;
then
if [ "$DAEMON" == "ON" ] ; then
# function without option must be print on stdout in anycase
# even if print in the log file
if [ -z "$log_level" ] || [ "$log_level" == "error" ];
then
echo "$EXEC_CMD $log_level: $*"
fi
logger -p local0.$log_level -t sqlbackup "$*"
else
if [ -z "$log_level" ];
then
echo "$*"
else
echo "$log_level: $*"
fi
fi
fi
}
error() {
print "error" $*
}
info() {
print "info" $*
}
debug() {
print "debug" $*
}
function dobck() {
local ext
local i
local old_ifs
# mysql -B uses tab as a separator between fields, so we have to mess
# with IFS in order to get the correct behaviour
old_ifs="$IFS"
IFS=" "
# read parameter given by mysql
while read login pass db count compressed target_dir; do
debug "read $login \$pass $db $count $compressed $target_dir"
# restore $IFS after read parameter
IFS="$old_ifs"
# by default : DOBAKCUP set to yes
DO_BACKUP="YES"
if [ "$compressed" -eq 1 ]; then
ext=".gz"
else
ext=""
fi
# The target directory must exist
test -d "$target_dir" || mkdir -p "$target_dir"
# if $SQLBACKUP_TYPE is set to "rotate" classical rotation files methode will be used
# use incrementale number in the name of files where the highest number indicate
# the oldest files
# if the rotate type is not set or set to date, the name of the export file will contain the date
# of the backup on won't be rotate by the classic rotate number
# usefull if you're using rsync or rsnapshop or everything base on rsync to avoir to copy
# rotate files which just change name
#
# ------------------------------------------------------------------ #
# the variable SQLBACKUP_TYPE must be set in /etc/alternc/local.sh #
# ------------------------------------------------------------------ #
if [ $SQLBACKUP_TYPE == "rotate" ]; then
i="$count"
# rotate all backup
while [ $i -gt 1 ] ; do
next_i=$(($i - 1))
if [ -e "${target_dir}/${db}.sql.${next_i}${ext}" ]; then
mv -f "${target_dir}/${db}.sql.${next_i}${ext}" \
"${target_dir}/${db}.sql.${i}${ext}" 2>/dev/null || true
fi
i=$next_i # loop should end here
done
# move most recently backup with a rotate file name
if [ -e "${target_dir}/${db}.sql${ext}" ]; then
mv -f "${target_dir}/${db}.sql${ext}" \
"${target_dir}/${db}.sql.${i}${ext}" 2>/dev/null || true
fi
name_backup_file="${db}"
else
# ---------------
# default methode
# ---------------
# calcul the mtime parameter for find
# $count is the number of backup to keep
# daily : if we are keeping X backup, deleting the file which has the mtime at X + 1 days
# weekly : if we are keeping X backup, deleting the file which has the mtime at (X + 1) * 7 day
# echo "last2del=( $count + 1 ) * $coef "
#
last2del=$(( ( $count + 1 ) * $coef ))
# find the oldest backup file need to be delete
# find ${target_dir} : in the target_dir
# -name \"${db}.*sql.*\" : All files like <db_name>.*sql.*
# -maxdepth 0 : only in the target dir (on not in the subdirectory)
# -mtime $last2del : files with the exact mtime set to $last2del
# daily : ( number of backup to keep + 1 ) days
# weekly : ( number of backup to keep + 1 ) * 7 days
# -exec rm -f {} \; : remove all files found
#
debug "find ${target_dir} -name \"${db}.*sql${ext}\" -maxdepth 1 -mtime +$last2del -exec rm -f {} \; -ls"
find ${target_dir} -name "${db}.*sql${ext}" -maxdepth 1 -mtime +${last2del} -exec rm -f {} \; -ls || true
# set the name of the backup file with the date of the day
name_backup_file="${db}.${DATE}"
fi
# if the backup exite and SQLBACKUP_OVERWRITE is set to NO, cancel backup
if [ -f "${target_dir}/${name_backup_file}.sql${ext}" ] && [ "$SQLBACKUP_OVERWRITE" == "no" ] ; then
info "sqlbackup.sh: ${target_dir}/${name_backup_file}.sql${ext}: already exist"
info " => no backup done as specify in allow-overwrite = $SQLBACKUP_OVERWRITE"
DO_BACKUP="NO"
# if the backup exite and SQLBACKUP_OVERWRITE is set to RENAME, add
elif [ -f "${target_dir}/${name_backup_file}.sql${ext}" ] && [ "$SQLBACKUP_OVERWRITE" == "rename" ] ; then
info "sqlbackup.sh: ${target_dir}/${name_backup_file}.sql${ext}: already exist"
info " => renaming the new file as specify in allow-overwrite = $SQLBACKUP_OVERWRITE"
hours=`date +"%H%M"`
name_backup_file="${name_backup_file}.${hours}"
# if the backup exite and SQLBACKUP_OVERWRITE is set OVERWRITE, add
elif [ -f "${target_dir}/${name_backup_file}.sql${ext}" ] && [ "$SQLBACKUP_OVERWRITE" == "overwrite" ] ; then
info "sqlbackup.sh: ${target_dir}/${name_backup_file}.sql${ext}: already exist"
info " => overwrite file as specify in allow-overwrite = $SQLBACKUP_OVERWRITE"
fi
###
# mysqldump Option :
# --add-drop-table : Add a 'drop table' before each create.
# usefull if you want to override the database without delete table before
# this is need to used restore from the alternc interface
# --allow-keywords : Allow creation of column names that are keywords.
#
# --quote-names : Quote table and column names with `
# Usefull if you have space in table or column names
# --force : Continue even if we get an sql-error.
# To avoid end of script during backup script execution
# Allow script to backup other database if one of the have an error
# --quick : Don't buffer query, dump directly to stdout.
# optimisation option
# --extended-insert : Allows utilization of the new, much faster INSERT syntax.
# optimization option
# --add-locks : Add locks around insert statements.
# --lock-tables : Lock all tables for read.
# those 2 options avoid insert during dump which can create an unconsistent
# state of the database backup
if [ "$DO_BACKUP" == "YES" ]; then
command="mysqldump --defaults-file=/etc/alternc/my.cnf --add-drop-table --allow-keywords --quote-names --force --quick --add-locks --lock-tables --extended-insert $db"
if [ "$compressed" -eq 1 ] ; then
debug "$command > ${target_dir}/${name_backup_file}.sql${ext}"
$command | gzip -c > "${target_dir}/${name_backup_file}.sql${ext}" || echo "backup failed for ${name_backup_file}"
else
debug "$command > ${target_dir}/${name_backup_file}.sql${ext}"
$command > "${target_dir}/${name_backup_file}.sql${ext}" || echo "backup failed for ${name_backup_file}"
fi
fi
IFS=" "
done
IFS="$old_ifs"
}
# read_parameters gets all command-line arguments and analyzes them
#
# return:
read_parameters() {
# for all parameter give to the script
while [ "$1" != "" ] ; do
case "$1" in
-h|--help) usage; exit ;;
-v|--verbose) VERBOSE="ON" ;;
-d|--debug) DEBUG="ON" ;;
-t|--type) shift; TYPE="$1";;
-n|--name-methode) shift; SQLBACKUP_TYPE="$1";;
-a|--allow-ovewrite) shift; SQLBACKUP_OVERWRITE="$1" ;;
daily|weekly) TYPE="$1";; # backwards compatibility
*)
error "invalid option -- $1"
error "Try \`sqlbackup.sh --help' for more information."
exit ;;
esac
# in case of no argument give to an option
# shift execute an exit if already empty
# add test to avoid this at least to print error message
[ "$1" != "" ] && shift
done
debug "TYPE = $TYPE"
debug "SQLBACKUP_TYPE = $SQLBACKUP_TYPE"
debug "SQLBACKUP_OVERWRITE = $SQLBACKUP_OVERWRITE"
# check options
if [ "$TYPE" == "daily" ]; then
# Daily :
mode=2
coef=1
elif [ "$TYPE" == "weekly" ] ; then
# Weekly:
mode=1
coef=7
elif [ -n "$TYPE" ] ; then
error "missing argument: type"
error "Try \`sqlbackup.sh --help' for more information."
exit
else
error "invalid argument: type -- $TYPE"
error "Try \`sqlbackup.sh --help' for more information."
exit
fi
if ! ( [ -z "$SQLBACKUP_TYPE" ] ||
[ "$SQLBACKUP_TYPE" == "date" ] ||
[ "$SQLBACKUP_TYPE" == "rotate" ] ) ; then
error "invalid argument: name-methode -- $SQLBACKUP_TYPE"
error "Try \`sqlbackup.sh --help' for more information."
exit
fi
if ! ( [ -z "$SQLBACKUP_OVERWRITE" ] ||
[ "$SQLBACKUP_OVERWRITE" == "no" ] ||
[ "$SQLBACKUP_OVERWRITE" == "rename" ] ||
[ "$SQLBACKUP_OVERWRITE" == "overwrite" ] ); then
error "invalid argument: allow-ovewrite -- $SQLBACKUP_OVERWRITE"
error "Try \`sqlbackup.sh --help' for more information."
exit
fi
}
# a quick intro to the software, displayed when no params found
usage() {
echo "Usage: sqlbackup.sh [OPTION] -t TYPE
sqlbackup.sh is a script used by alternc for sql backup
Mandatory arguments to long options are mandatory for short options too.
-v, --verbose set verbose mode on
-d, --debug set debug mode on
-n, --name-method METHOD set the method type for files' name
-a, --allow-override OVERRIDE specify the behaviour if backup files already exist
-t, --type TYPE set backup type
-h, --help display this help and exit
the TYPE arguments specify type of backup. Here are the values:
daily Execute a daily backup on all databases set to daily backup
weekly Execute a daily backup on all databases set to weekly backup
the METHOD argument the type for files' name. Here are the values:
date insert in the backup file's name the date of the backup
(default value)
rotate rename file as file.<number><extension> where <number>
is incremented
the OVERRIDE argument the behaviour of the script if a backup file already exist.
Here are the values:
no if a backup file already exist, no backup done
rename if a backup file already exist, add an extension to the new
backup file
overwrite if a backup file already exist, overwrite it with the new
backup"
}
debug begin $@
# read all paramter before doing anything before
read_parameters $@
debug end
###
# select backup information from the alternc database in the db table
# all backup for the specify mode (daily or weekly)
# option :
# --batch : Print results with a tab as separator, each row on a new line.
# avoid seperator like "|" which are not usefull in a shell script
# need to set the IFS environment variable to "\t" (tabbulation) for
# the `read' command (indicate field separator by default `read'
# use space)
# tail -n '+2' permit to skip the first line (legende line)
# execut dobck on all database found by the sql request
#
# the "<< EOF" mean send data to the command until EOF (end of file)
#
debug /usr/bin/mysql --defaults-file=/etc/alternc/my.cnf --batch
/usr/bin/mysql --defaults-file=/etc/alternc/my.cnf --batch << EOF | tail -n '+2' | dobck
SELECT login, pass, db, bck_history, bck_gzip, bck_dir
FROM db
WHERE bck_mode=$mode;
EOF
# vim: et sw=4
|
AlternC/AlternC
|
src/sqlbackup.sh
|
Shell
|
gpl-2.0
| 15,964 |
#/bin/bash
# old testing setup
# Test configurations with TestBaseDecider as Decider
./decider > out.tmp 2>&1
diff -I '^Assigned runID=' -I '^Loading NED files from' -w exp-output out.tmp
rm -f out.tmp
|
olafur/mixim
|
tests/decider/runTest.sh
|
Shell
|
gpl-2.0
| 208 |
#!/usr/bin/env bash
################################################################################
# THIS FILE IS 100% GENERATED BY ZPROJECT; DO NOT EDIT EXCEPT EXPERIMENTALLY #
# READ THE ZPROJECT/README.MD FOR INFORMATION ABOUT MAKING PERMANENT CHANGES. #
################################################################################
set -x
set -e
if [ "$BUILD_TYPE" == "default" ]; then
# Tell travis to deploy all files in dist
mkdir dist
export FTY_METRIC_SNMP_DEPLOYMENT=dist/*
# Move archives to dist
mv *.tar.gz dist
mv *.zip dist
# Generate hash sums
cd dist
md5sum *.zip *.tar.gz > MD5SUMS
sha1sum *.zip *.tar.gz > SHA1SUMS
cd -
elif [ "$BUILD_TYPE" == "bindings" ] && [ "$BINDING" == "jni" ]; then
( cd bindings/jni && TERM=dumb PKG_CONFIG_PATH=/tmp/lib/pkgconfig ./gradlew clean bintrayUpload )
cp bindings/jni/android/fty_metric_snmp-android.jar fty_metric_snmp-android-1.0.0.jar
export FTY_METRIC_SNMP_DEPLOYMENT=fty_metric_snmp-android-1.0.0.jar
else
export FTY_METRIC_SNMP_DEPLOYMENT=""
fi
|
thalman/fty-metric-snmp
|
ci_deploy.sh
|
Shell
|
gpl-2.0
| 1,077 |
#!/bin/sh
# Setup development environment on Mac OS X (tested with 10.6.8 and Xcode 3.2.6)
#
# $Id$
#
# Copyright 2011 Michael Tuexen, Joerg Mayer, Guy Harris (see AUTHORS file)
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 1998 Gerald Combs
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# To build cmake
# CMAKE=1
#
# To build all libraries as 32-bit libraries uncomment the following three lines.
#
# export CFLAGS="$CFLAGS -arch i386"
# export CXXFLAGS="$CXXFLAGS -arch i386"
# export LDFLAGS="$LDFLAGS -arch i386"
#
# and change "macx-clang" to "macx-clang-32" in the line below.
#
# Note: when building against the 10.6 SDK, clang fails, because there's
# a missing libstdc++.dylib in the SDK; this does not bother g++, however.
#
#TARGET_PLATFORM=macx-g++
TARGET_PLATFORM=macx-clang
#
# Versions of packages to download and install.
#
#
# Some packages need xz to unpack their current source.
# xz is not yet provided with OS X.
#
XZ_VERSION=5.0.4
#
# In case we want to build with cmake.
#
CMAKE_VERSION=2.8.10.2
#
# The following libraries and tools are required even to build only TShark.
#
GETTEXT_VERSION=0.18.2
GLIB_VERSION=2.36.0
PKG_CONFIG_VERSION=0.28
#
# One or more of the following libraries are required to build Wireshark.
#
# If you don't want to build with Qt, comment out the QT_VERSION= line.
#
# If you want to build with GTK+ 2, comment out the GTK_VERSION=3.* line
# and un-comment the GTK_VERSION=2.* line.
#
# If you don't want to build with GTK+ at all, comment out both lines.
#
#QT_VERSION=5.1.1
QT_VERSION=5.2.0
GTK_VERSION=2.24.17
#GTK_VERSION=3.5.2
if [ "$GTK_VERSION" ]; then
#
# We'll be building GTK+, so we need some additional libraries.
#
GTK_MAJOR_VERSION="`expr $GTK_VERSION : '\([0-9][0-9]*\).*'`"
GTK_MINOR_VERSION="`expr $GTK_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
GTK_DOTDOT_VERSION="`expr $GTK_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
ATK_VERSION=2.8.0
PANGO_VERSION=1.30.1
PNG_VERSION=1.5.17
PIXMAN_VERSION=0.26.0
CAIRO_VERSION=1.12.2
GDK_PIXBUF_VERSION=2.28.0
fi
#
# The following libraries are optional.
# Comment them out if you don't want them, but note that some of
# the optional libraries are required by other optional libraries.
#
LIBSMI_VERSION=0.4.8
#
# libgpg-error is required for libgcrypt.
#
LIBGPG_ERROR_VERSION=1.10
#
# libgcrypt is required for GnuTLS.
# XXX - the link for "Libgcrypt source code" at
# http://www.gnupg.org/download/#libgcrypt is for 1.5.0, and is a bzip2
# file, but http://directory.fsf.org/project/libgcrypt/ lists only
# 1.4.6.
#
LIBGCRYPT_VERSION=1.5.0
GNUTLS_VERSION=2.12.19
# Stay with Lua 5.1 when updating until the code has been changed
# to support 5.2
LUA_VERSION=5.1.5
PORTAUDIO_VERSION=pa_stable_v19_20111121
#
# XXX - they appear to have an unversioned gzipped tarball for the
# current version; should we just download that, with some other
# way of specifying whether to download the GeoIP API?
#
GEOIP_VERSION=1.4.8
CARES_VERSION=1.10.0
DARWIN_MAJOR_VERSION=`uname -r | sed 's/\([0-9]*\).*/\1/'`
#
# GNU autotools; they're provided with releases up to Snow Leopard, but
# not in later releases.
#
if [[ $DARWIN_MAJOR_VERSION -gt 10 ]]; then
AUTOCONF_VERSION=2.69
AUTOMAKE_VERSION=1.13.3
LIBTOOL_VERSION=2.4.2
fi
uninstall() {
if [ -d macosx-support-libs ]
then
cd macosx-support-libs
#
# Uninstall items in the reverse order from the order in which they're
# installed. Only uninstall if the download/build/install process
# completed; uninstall the version that appears in the name of
# the -done file.
#
# We also do a "make distclean", so that we don't have leftovers from
# old configurations.
#
installed_cares_version=`ls cares-*-done 2>/dev/null | sed 's/cares-\(.*\)-done/\1/'`
if [ ! -z "$installed_cares_version" ] ; then
echo "Uninstalling C-Ares API:"
cd cares-$installed_cares_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm cares-$installed_cares_version-done
fi
installed_geoip_version=`ls geoip-*-done 2>/dev/null | sed 's/geoip-\(.*\)-done/\1/'`
if [ ! -z "$installed_geoip_version" ] ; then
echo "Uninstalling GeoIP API:"
cd GeoIP-$installed_geoip_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm geoip-$installed_geoip_version-done
fi
if [ "$PORTAUDIO_VERSION" -a -f portaudio-done ] ; then
echo "Uninstalling PortAudio:"
cd portaudio
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm portaudio-done
fi
installed_lua_version=`ls lua-*-done 2>/dev/null | sed 's/lua-\(.*\)-done/\1/'`
if [ ! -z "$installed_lua_version" ] ; then
echo "Uninstalling Lua:"
#
# Lua has no "make uninstall", so just remove stuff manually.
# There's no configure script, so there's no need for
# "make distclean", either; just do "make clean".
#
(cd /usr/local/bin; $DO_RM -f lua luac)
(cd /usr/local/include; $DO_RM -f lua.h luaconf.h lualib.h lauxlib.h lua.hpp)
(cd /usr/local/lib; $DO_RM -f liblua.a)
(cd /usr/local/man/man1; $DO_RM -f lua.1 luac.1)
cd lua-$installed_lua_version
make clean || exit 1
cd ..
rm lua-$installed_lua_version-done
fi
installed_gnutls_version=`ls gnutls-*-done 2>/dev/null | sed 's/gnutls-\(.*\)-done/\1/'`
if [ ! -z "$installed_gnutls_version" ] ; then
echo "Uninstalling GnuTLS:"
cd gnutls-$installed_gnutls_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm gnutls-$installed_gnutls_version-done
fi
installed_libgcrypt_version=`ls libgcrypt-*-done 2>/dev/null | sed 's/libgcrypt-\(.*\)-done/\1/'`
if [ ! -z "$installed_libgcrypt_version" ] ; then
echo "Uninstalling libgcrypt:"
cd libgcrypt-$installed_libgcrypt_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm libgcrypt-$installed_libgcrypt_version-done
fi
installed_libgpg_error_version=`ls libgpg-error-*-done 2>/dev/null | sed 's/libgpg-error-\(.*\)-done/\1/'`
if [ ! -z "$installed_libgpg_error_version" ] ; then
echo "Uninstalling libgpg-error:"
cd libgpg-error-$installed_libgpg_error_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm libgpg-error-$installed_libgpg_error_version-done
fi
installed_libsmi_version=`ls libsmi-*-done 2>/dev/null | sed 's/libsmi-\(.*\)-done/\1/'`
if [ ! -z "$installed_libsmi_version" ] ; then
echo "Uninstalling libsmi:"
cd libsmi-$installed_libsmi_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm libsmi-$installed_libsmi_version-done
fi
installed_gtk_version=`ls gtk+-*-done 2>/dev/null | sed 's/gtk+-\(.*\)-done/\1/'`
if [ ! -z "$installed_gtk_version" ] ; then
echo "Uninstalling GTK+:"
cd gtk+-$installed_gtk_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm gtk+-$installed_gtk_version-done
fi
installed_gdk_pixbuf_version=`ls gdk-pixbuf-*-done 2>/dev/null | sed 's/gdk-pixbuf-\(.*\)-done/\1/'`
if [ ! -z "$installed_gdk_pixbuf_version" ] ; then
echo "Uninstalling gdk-pixbuf:"
cd gdk-pixbuf-$installed_gdk_pixbuf_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm gdk-pixbuf-$installed_gdk_pixbuf_version-done
fi
installed_pango_version=`ls pango-*-done 2>/dev/null | sed 's/pango-\(.*\)-done/\1/'`
if [ ! -z "$installed_pango_version" ] ; then
echo "Uninstalling Pango:"
cd pango-$installed_pango_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm pango-$installed_pango_version-done
fi
installed_atk_version=`ls atk-*-done 2>/dev/null | sed 's/atk-\(.*\)-done/\1/'`
if [ ! -z "$installed_atk_version" ] ; then
echo "Uninstalling ATK:"
cd atk-$installed_atk_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm atk-$installed_atk_version-done
fi
installed_cairo_version=`ls cairo-*-done 2>/dev/null | sed 's/cairo-\(.*\)-done/\1/'`
if [ ! -z "$installed_cairo_version" ] ; then
echo "Uninstalling Cairo:"
cd cairo-$installed_cairo_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm cairo-$installed_cairo_version-done
fi
installed_pixman_version=`ls pixman-*-done 2>/dev/null | sed 's/pixman-\(.*\)-done/\1/'`
if [ ! -z "$installed_pixman_version" ] ; then
echo "Uninstalling pixman:"
cd pixman-$installed_pixman_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm pixman-$installed_pixman_version-done
fi
installed_libpng_version=`ls libpng-*-done 2>/dev/null | sed 's/libpng-\(.*\)-done/\1/'`
if [ ! -z "$installed_libpng_version" ] ; then
echo "Uninstalling libpng:"
cd libpng-$installed_libpng_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm libpng-$installed_libpng_version-done
fi
installed_qt_version=`ls qt-*-done 2>/dev/null | sed 's/qt-\(.*\)-done/\1/'`
if [ ! -z "$installed_qt_version" ] ; then
echo "Uninstalling Qt:"
cd qt-everywhere-opensource-src-$installed_qt_version
$DO_MAKE_UNINSTALL || exit 1
#
# XXX - "make distclean" doesn't work. qmake sure does a
# good job of constructing Makefiles that work correctly....
#
#make distclean || exit 1
cd ..
rm qt-$installed_qt_version-done
fi
installed_glib_version=`ls glib-*-done 2>/dev/null | sed 's/glib-\(.*\)-done/\1/'`
if [ ! -z "$installed_glib_version" ] ; then
echo "Uninstalling GLib:"
cd glib-$installed_glib_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm glib-$installed_glib_version-done
fi
installed_pkg_config_version=`ls pkg-config-*-done 2>/dev/null | sed 's/pkg-config-\(.*\)-done/\1/'`
if [ ! -z "$installed_pkg_config_version" ] ; then
echo "Uninstalling pkg-config:"
cd pkg-config-$installed_pkg_config_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm pkg-config-$installed_pkg_config_version-done
fi
installed_gettext_version=`ls gettext-*-done 2>/dev/null | sed 's/gettext-\(.*\)-done/\1/'`
if [ ! -z "$installed_gettext_version" ] ; then
echo "Uninstalling GNU gettext:"
cd gettext-$installed_gettext_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm gettext-$installed_gettext_version-done
fi
installed_cmake_version=`ls cmake-*-done 2>/dev/null | sed 's/cmake-\(.*\)-done/\1/'`
if [ ! -z "$installed_cmake_version" ]; then
echo "Uninstalling CMake:"
cd cmake-$installed_cmake_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm cmake-$installed_cmake_version-done
fi
installed_libtool_version=`ls libtool-*-done 2>/dev/null | sed 's/libtool-\(.*\)-done/\1/'`
if [ ! -z "$installed_libtool_version" ] ; then
echo "Uninstalling GNU libtool:"
cd libtool-$installed_libtool_version
mv /usr/local/bin/glibtool /usr/local/bin/libtool
mv /usr/local/bin/glibtoolize /usr/local/bin/libtoolize
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm libtool-$installed_libtool_version-done
fi
installed_automake_version=`ls automake-*-done 2>/dev/null | sed 's/automake-\(.*\)-done/\1/'`
if [ ! -z "$installed_automake_version" ] ; then
echo "Uninstalling GNU automake:"
cd automake-$installed_automake_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm automake-$installed_automake_version-done
fi
installed_autoconf_version=`ls autoconf-*-done 2>/dev/null | sed 's/autoconf-\(.*\)-done/\1/'`
if [ ! -z "$installed_autoconf_version" ] ; then
echo "Uninstalling GNU autoconf:"
cd autoconf-$installed_autoconf_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm autoconf-$installed_autoconf_version-done
fi
installed_xz_version=`ls xz-*-done 2>/dev/null | sed 's/xz-\(.*\)-done/\1/'`
if [ ! -z "$installed_xz_version" ] ; then
echo "Uninstalling xz:"
cd xz-$installed_xz_version
$DO_MAKE_UNINSTALL || exit 1
make distclean || exit 1
cd ..
rm xz-$installed_xz_version-done
fi
fi
}
#
# Do we have permission to write in /usr/local?
#
# If so, assume we have permission to write in its subdirectories.
# (If that's not the case, this test needs to check the subdirectories
# as well.)
#
# If not, do "make install", "make uninstall", and the removes for Lua
# with sudo.
#
if [ -w /usr/local ]
then
DO_MAKE_INSTALL="make install"
DO_MAKE_UNINSTALL="make uninstall"
DO_RM="rm"
else
DO_MAKE_INSTALL="sudo make install"
DO_MAKE_UNINSTALL="sudo make uninstall"
DO_RM="sudo rm"
fi
#
# If we have SDKs available, the default target OS is the major version
# of the one we're running; get that and strip off the third component.
#
for i in /Developer/SDKs \
/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs \
/Library/Developer/CommandLineTools/SDKs
do
if [ -d "$i" ]
then
min_osx_target=`sw_vers -productVersion | sed 's/\([[0-9]]*\).\([[0-9]]*\).[[0-9]]*/\1.\2/'`
break
fi
done
#
# Parse command-line flags:
#
# -h - print help.
# -t <target> - build libraries so that they'll work on the specified
# version of OS X and later versions.
# -u - do an uninstall.
#
while getopts ht:u name
do
case $name in
u)
do_uninstall=yes
;;
t)
min_osx_target="$OPTARG"
;;
h|?)
echo "Usage: macosx-setup.sh [ -t <target> ] [ -u ]" 1>&1
exit 0
;;
esac
done
if [ "$do_uninstall" = "yes" ]
then
uninstall
exit 0
fi
#
# Configure scripts tend to set CFLAGS and CXXFLAGS to "-g -O2" if
# invoked without CFLAGS or CXXFLAGS being set in the environment.
#
# However, we *are* setting them in the environment, for our own
# nefarious purposes, so start them out as "-g -O2".
#
CFLAGS="-g -O2"
CXXFLAGS="-g -O2"
#
# To make this work on Leopard (rather than working *on* Snow Leopard
# when building *for* Leopard) will take more work.
#
# For one thing, Leopard's /usr/X11/lib/libXdamage.la claims, at least
# with all software updates applied, that the Xdamage shared library
# is libXdamage.1.0.0.dylib, but it is, in fact, libXdamage.1.1.0.dylib.
# This causes problems when building GTK+, so the script would have to
# fix that file.
#
if [[ $DARWIN_MAJOR_VERSION -le 9 ]]; then
echo "This script does not support any versions of OS X before Snow Leopard" 1>&2
exit 1
fi
# if no make options are present, set default options
if [ -z "$MAKE_BUILD_OPTS" ] ; then
# by default use 1.5x number of cores for parallel build
MAKE_BUILD_OPTS="-j $(( $(sysctl -n hw.logicalcpu) * 3 / 2))"
fi
#
# If we have a target release, look for its SDK, and build libraries
# against it rather than against the headers and, more importantly,
# libraries that come with the OS, so that we don't end up with
# support libraries that only work on the OS version on which
# we built them, not earlier versions of the same release, or
# earlier releases if the minimum is earlier.
#
if [ ! -z "$min_osx_target" ]
then
for i in /Developer/SDKs \
/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs \
/Library/Developer/CommandLineTools/SDKs
do
if [ -d "$i"/"MacOSX$min_osx_target.sdk" ]
then
SDKPATH="$i"/"MacOSX$min_osx_target.sdk"
break
fi
done
if [ -z "$SDKPATH" ]
then
echo "macosx-setup.sh: Couldn't find the SDK for OS X $min_osx_target" 1>&2
exit 1
fi
#
# Make sure there are links to /usr/local/include and /usr/local/lib
# in the SDK's usr/local.
#
if [ ! -e $SDKPATH/usr/local/include ]
then
if [ ! -d $SDKPATH/usr/local ]
then
sudo mkdir $SDKPATH/usr/local
fi
sudo ln -s /usr/local/include $SDKPATH/usr/local/include
fi
if [ ! -e $SDKPATH/usr/local/lib ]
then
if [ ! -d $SDKPATH/usr/local ]
then
sudo mkdir $SDKPATH/usr/local
fi
sudo ln -s /usr/local/lib $SDKPATH/usr/local/lib
fi
#
# Set the minimum OS version for which to build to the specified
# minimum target OS version, so we don't, for example, end up using
# linker features supported by the OS verson on which we're building
# but not by the target version.
#
VERSION_MIN_FLAGS="-mmacosx-version-min=$min_osx_target"
#
# Compile and link against the SDK.
#
SDKFLAGS="-isysroot $SDKPATH"
if [[ "$min_osx_target" == "10.5" ]]
then
#
# Cairo is part of Mac OS X 10.6 and later.
# The *headers* are supplied by 10.5, but the *libraries*
# aren't, so we have to build it if we're building for 10.5.
#
cairo_not_in_the_os=yes
#
# Build with older versions of the support libraries, as
# were used on the Wireshark Leopard buildbot at one
# point. (Most of these versions come from the About page
# from Wireshark 1.8.6, the last build done on that buildbot;
# the ATK version isn't reported, so this is a guess.)
#
# If you want to try building with newer versions of
# the libraries, note that:
#
# The version of fontconfig that comes with Leopard doesn't
# support FC_WEIGHT_EXTRABLACK, so we can't use any version
# of Pango newer than 1.22.4.
#
# However, Pango 1.22.4 doesn't work with versions of GLib
# after 2.29.6, because Pango 1.22.4 uses G_CONST_RETURN and
# GLib 2.29.8 and later deprecate it (there doesn't appear to
# be a GLib 2.29.7). That means we'd either have to patch
# Pango not to use it (just use "const"; G_CONST_RETURN was
# there to allow code to choose whether to use "const" or not),
# or use GLib 2.29.6 or earlier.
#
# GLib 2.29.6 includes an implementation of g_bit_lock() that,
# on x86 (32-bit and 64-bit), uses asms in a fashion
# ("asm volatile goto") that requires GCC 4.5 or later, which
# is later than the compilers that come with Leopard and Snow
# Leopard. Recent versions of GLib check for that, but 2.29.6
# doesn't, so, if you want to build GLib 2.29.6 on Leopard or
# Snow Leopard, you would have to patch glib/gbitlock.c to do
# what the newer versions of GLib do:
#
# define a USE_ASM_GOTO macro that indicates whether "asm goto"
# can be used:
# #if (defined (i386) || defined (__amd64__))
# #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
# #define USE_ASM_GOTO 1
# #endif
# #endif
#
# replace all occurrences of
#
# #if defined (__GNUC__) && (defined (i386) || defined (__amd64__))
#
# with
#
# #ifdef USE_ASM_GOTO
#
# Using GLib 2.29.6 or earlier, however, means that we can't
# use a version of ATK later than 2.3.93, as those versions
# don't work with GLib 2.29.6. The same applies to gdk-pixbuf;
# versions of gdk-pixbuf after 2.24.1 won't work with GLib
# 2.29.6.
#
# Then you have to make sure that what you've build doesn't
# cause the X server that comes with Leopard to crash; at
# least one attempt at building for Leopard did.
#
# At least if building on Leopard, you might also find
# that, with various older versions of Cairo, including
# 1.6.4 and at least some 1.8.x versions, when you try to
# build it, the build fails because it can't find
# png_set_longjmp_fn(). I vaguely remember dealing with that,
# ages ago, but don't remember what I did.
#
GLIB_VERSION=2.16.3
CAIRO_VERSION=1.6.4
ATK_VERSION=1.24.0
PANGO_VERSION=1.20.2
GTK_VERSION=2.12.9
#
# That version of GTK+ includes gdk-pixbuf.
# XXX - base this on the version of GTK+ requested.
#
GDK_PIXBUF_VERSION=
#
# Libgcrypt 1.5.0 fails to compile due to some problem with an
# asm in rijndael.c, at least with i686-apple-darwin10-gcc-4.2.1
# (GCC) 4.2.1 (Apple Inc. build 5666) (dot 3) when building
# 32-bit.
#
# We try libgcrypt 1.4.3 instead, as that's what shows up in
# the version from the Leopard buildbot.
LIBGCRYPT_VERSION=1.4.3
#
# Build 32-bit while we're at it; Leopard has a bug that
# causes some BPF functions not to work with 64-bit userland
# code, so capturing won't work.
#
CFLAGS="$CFLAGS -arch i386"
CXXFLAGS="$CXXFLAGS -arch i386"
export LDFLAGS="$LDFLAGS -arch i386"
fi
fi
export CFLAGS
export CXXFLAGS
#
# You need Xcode or the command-line tools installed to get the compilers.
#
if [ ! -x /usr/bin/xcodebuild ]; then
echo "Please install Xcode first (should be available on DVD or from http://developer.apple.com/xcode/index.php)."
exit 1
fi
if [ "$QT_VERSION" ]; then
#
# We need Xcode, not just the command-line tools, installed to build
# Qt.
#
if ! /usr/bin/xcrun -find xcrun >/dev/null 2>&1; then
echo "Please install Xcode first (should be available on DVD or from http://developer.apple.com/xcode/index.php)."
echo "The command-line build tools are not sufficient to build Qt."
exit 1
fi
fi
if [ "$GTK_VERSION" ]; then
#
# If we're building with GTK+, you also need the X11 SDK; with at least
# some versions of OS X and Xcode, that is, I think, an optional install.
# (Or it might be installed with X11, but I think *that* is an optional
# install on at least some versions of OS X.)
#
if [ ! -d /usr/X11/include ]; then
echo "Please install X11 and the X11 SDK first."
exit 1
fi
fi
export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig:/usr/X11/lib/pkgconfig
#
# Do all the downloads and untarring in a subdirectory, so all that
# stuff can be removed once we've installed the support libraries.
#
if [ ! -d macosx-support-libs ]
then
mkdir macosx-support-libs || exit 1
fi
cd macosx-support-libs
# Start with xz: It is the sole download format of glib later than 2.31.2
#
if [ "$XZ_VERSION" -a ! -f xz-$XZ_VERSION-done ] ; then
echo "Downloading, building, and installing xz:"
[ -f xz-$XZ_VERSION.tar.bz2 ] || curl -O http://tukaani.org/xz/xz-$XZ_VERSION.tar.bz2 || exit 1
bzcat xz-$XZ_VERSION.tar.bz2 | tar xf - || exit 1
cd xz-$XZ_VERSION
CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch xz-$XZ_VERSION-done
fi
if [ "$AUTOCONF_VERSION" -a ! -f autoconf-$AUTOCONF_VERSION-done ] ; then
echo "Downloading, building and installing GNU autoconf..."
[ -f autoconf-$AUTOCONF_VERSION.tar.xz ] || curl -O ftp://ftp.gnu.org/gnu/autoconf/autoconf-$AUTOCONF_VERSION.tar.xz || exit 1
xzcat autoconf-$AUTOCONF_VERSION.tar.xz | tar xf - || exit 1
cd autoconf-$AUTOCONF_VERSION
./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch autoconf-$AUTOCONF_VERSION-done
fi
if [ "$AUTOMAKE_VERSION" -a ! -f automake-$AUTOMAKE_VERSION-done ] ; then
echo "Downloading, building and installing GNU automake..."
[ -f automake-$AUTOMAKE_VERSION.tar.xz ] || curl -O ftp://ftp.gnu.org/gnu/automake/automake-$AUTOMAKE_VERSION.tar.xz || exit 1
xzcat automake-$AUTOMAKE_VERSION.tar.xz | tar xf - || exit 1
cd automake-$AUTOMAKE_VERSION
./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch automake-$AUTOMAKE_VERSION-done
fi
if [ "$LIBTOOL_VERSION" -a ! -f libtool-$LIBTOOL_VERSION-done ] ; then
echo "Downloading, building and installing GNU libtool..."
[ -f libtool-$LIBTOOL_VERSION.tar.xz ] || curl -O ftp://ftp.gnu.org/gnu/libtool/libtool-$LIBTOOL_VERSION.tar.xz || exit 1
xzcat libtool-$LIBTOOL_VERSION.tar.xz | tar xf - || exit 1
cd libtool-$LIBTOOL_VERSION
./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
mv /usr/local/bin/libtool /usr/local/bin/glibtool
mv /usr/local/bin/libtoolize /usr/local/bin/glibtoolize
cd ..
touch libtool-$LIBTOOL_VERSION-done
fi
if [ -n "$CMAKE" -a ! -f cmake-$CMAKE_VERSION-done ]; then
echo "Downloading, building, and installing CMAKE:"
cmake_dir=`expr $CMAKE_VERSION : '\([0-9][0-9]*\.[0-9][0-9]*\).*'`
[ -f cmake-$CMAKE_VERSION.tar.gz ] || curl -O http://www.cmake.org/files/v$cmake_dir/cmake-$CMAKE_VERSION.tar.gz || exit 1
gzcat cmake-$CMAKE_VERSION.tar.gz | tar xf - || exit 1
cd cmake-$CMAKE_VERSION
./bootstrap || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch cmake-$CMAKE_VERSION-done
fi
#
# Start with GNU gettext; GLib requires it, and OS X doesn't have it
# or a BSD-licensed replacement.
#
# At least on Lion with Xcode 4, _FORTIFY_SOURCE gets defined as 2
# by default, which causes, for example, stpncpy to be defined as
# a hairy macro that collides with the GNU gettext configure script's
# attempts to workaround AIX's lack of a declaration for stpncpy,
# with the result being a huge train wreck. Define _FORTIFY_SOURCE
# as 0 in an attempt to keep the trains on separate tracks.
#
if [ ! -f gettext-$GETTEXT_VERSION-done ] ; then
echo "Downloading, building, and installing GNU gettext:"
[ -f gettext-$GETTEXT_VERSION.tar.gz ] || curl -O http://ftp.gnu.org/pub/gnu/gettext/gettext-$GETTEXT_VERSION.tar.gz || exit 1
gzcat gettext-$GETTEXT_VERSION.tar.gz | tar xf - || exit 1
cd gettext-$GETTEXT_VERSION
CFLAGS="$CFLAGS -D_FORTIFY_SOURCE=0 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch gettext-$GETTEXT_VERSION-done
fi
#
# GLib depends on pkg-config.
# By default, pkg-config depends on GLib; we break the dependency cycle
# by configuring pkg-config to use its own internal version of GLib.
#
if [ ! -f pkg-config-$PKG_CONFIG_VERSION-done ] ; then
echo "Downloading, building, and installing pkg-config:"
[ -f pkg-config-$PKG_CONFIG_VERSION.tar.gz ] || curl -O http://pkgconfig.freedesktop.org/releases/pkg-config-$PKG_CONFIG_VERSION.tar.gz || exit 1
gzcat pkg-config-$PKG_CONFIG_VERSION.tar.gz | tar xf - || exit 1
cd pkg-config-$PKG_CONFIG_VERSION
./configure --with-internal-glib || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch pkg-config-$PKG_CONFIG_VERSION-done
fi
if [ ! -f glib-$GLIB_VERSION-done ] ; then
echo "Downloading, building, and installing GLib:"
glib_dir=`expr $GLIB_VERSION : '\([0-9][0-9]*\.[0-9][0-9]*\).*'`
GLIB_MAJOR_VERSION="`expr $GLIB_VERSION : '\([0-9][0-9]*\).*'`"
GLIB_MINOR_VERSION="`expr $GLIB_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
GLIB_DOTDOT_VERSION="`expr $GLIB_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
if [[ $GLIB_MAJOR_VERSION -gt 2 ||
$GLIB_MINOR_VERSION -gt 28 ||
($GLIB_MINOR_VERSION -eq 28 && $GLIB_DOTDOT_VERSION -ge 8) ]]
then
#
# Starting with GLib 2.28.8, xz-compressed tarballs are available.
#
[ -f glib-$GLIB_VERSION.tar.xz ] || curl -L -O http://ftp.gnome.org/pub/gnome/sources/glib/$glib_dir/glib-$GLIB_VERSION.tar.xz || exit 1
xzcat glib-$GLIB_VERSION.tar.xz | tar xf - || exit 1
else
[ -f glib-$GLIB_VERSION.tar.bz2 ] || curl -L -O http://ftp.gnome.org/pub/gnome/sources/glib/$glib_dir/glib-$GLIB_VERSION.tar.bz2 || exit 1
bzcat glib-$GLIB_VERSION.tar.bz2 | tar xf - || exit 1
fi
cd glib-$GLIB_VERSION
#
# OS X ships with libffi, but doesn't provide its pkg-config file;
# explicitly specify LIBFFI_CFLAGS and LIBFFI_LIBS, so the configure
# script doesn't try to use pkg-config to get the appropriate
# C flags and loader flags.
#
# And, what's worse, at least with the version of Xcode that comes
# with Leopard, /usr/include/ffi/fficonfig.h doesn't define MACOSX,
# which causes the build of GLib to fail. If we don't find
# "#define.*MACOSX" in /usr/include/ffi/fficonfig.h, explicitly
# define it.
#
# While we're at it, suppress -Wformat-nonliteral to avoid a case
# where clang's stricter rules on when not to complain about
# non-literal format arguments cause it to complain about code
# that's safe but it wasn't told that. See my comment #25 in
# GNOME bug 691608:
#
# https://bugzilla.gnome.org/show_bug.cgi?id=691608#c25
#
# First, determine where the system include files are. (It's not
# necessarily /usr/include.) There's a bit of a greasy hack here;
# pre-5.x versions of the developer tools don't support the
# --show-sdk-path option, and will produce no output, so includedir
# will be set to /usr/include (in those older versions of the
# developer tools, there is a /usr/include directory).
#
includedir=`xcrun --show-sdk-path 2>/dev/null`/usr/include
if grep -qs '#define.*MACOSX' $includedir/ffi/fficonfig.h
then
# It's defined, nothing to do
LIBFFI_CFLAGS="-I $includedir/ffi" LIBFFI_LIBS="-lffi" CFLAGS="$CFLAGS -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
else
LIBFFI_CFLAGS="-I $includedir/ffi" LIBFFI_LIBS="-lffi" CFLAGS="$CFLAGS -DMACOSX -Wno-format-nonliteral $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
fi
#
# Apply the fix to GNOME bug 529806:
#
# https://bugzilla.gnome.org/show_bug.cgi?id=529806
#
# if we have a version of GLib prior to 2.30.
#
if [[ $GLIB_MAJOR_VERSION -eq 2 && $GLIB_MINOR_VERSION -le 30 ]]
then
patch -p0 <../../macosx-support-lib-patches/glib-gconvert.c.patch || exit 1
fi
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch glib-$GLIB_VERSION-done
fi
#
# Now we have reached a point where we can build everything but
# the GUI (Wireshark).
#
if [ "$QT_VERSION" -a ! -f qt-$QT_VERSION-done ]; then
echo "Downloading, building, and installing Qt:"
QT_MAJOR_VERSION="`expr $QT_VERSION : '\([0-9][0-9]*\).*'`"
QT_MINOR_VERSION="`expr $QT_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
QT_DOTDOT_VERSION="`expr $QT_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
QT_MAJOR_MINOR_VERSION=$QT_MAJOR_VERSION.$QT_MINOR_VERSION
#
# What you get for this URL might just be a 302 Found reply, so use
# -L so we get redirected.
#
curl -L -O http://download.qt-project.org/official_releases/qt/$QT_MAJOR_MINOR_VERSION/$QT_VERSION/single/qt-everywhere-opensource-src-$QT_VERSION.tar.gz
#
# Qt 5.1.x sets QMAKE_MACOSX_DEPLOYMENT_TARGET = 10.6
# in qtbase/mkspecs/$TARGET_PLATFORM/qmake.conf
# We may need to adjust this manually in the future.
#
# The -no-c++11 flag is needed to work around
# https://bugreports.qt-project.org/browse/QTBUG-30487
#
tar xf qt-everywhere-opensource-src-$QT_VERSION.tar.gz
cd qt-everywhere-opensource-src-$QT_VERSION
#
# We don't build Qt in its Full Shining Glory, as we don't need all
# of its components, and it takes *forever* to build in that form.
#
# Qt 5.2.0 beta1 fails to build on OS X without -no-xcb due to bug
# QTBUG-34382.
#
# Qt 5.x fails to build on OS X with -no-opengl due to bug
# QTBUG-31151.
#
./configure -v -sdk macosx$min_osx_target -platform $TARGET_PLATFORM \
-opensource -confirm-license -no-c++11 -no-dbus \
-no-sql-sqlite -no-xcb -nomake examples \
-skip qtdoc -skip qtquickcontrols -skip qtwebkit \
-skip qtwebkit-examples -skip qtxmlpatterns
make || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch qt-$QT_VERSION-done
fi
if [ "$GTK_VERSION" ]; then
#
# GTK+ 3 requires a newer Cairo build than the one that comes with
# 10.6, so we build Cairo if we are using GTK+ 3.
#
# In 10.6 and 10.7, it's an X11 library; if we build with "native" GTK+
# rather than X11 GTK+, we might have to build and install Cairo.
# In 10.8 and later, there is no X11, but it's included in Xquartz;
# again, if we build with "native" GTK+, we'd have to build and install
# it.
#
if [[ "$GTK_MAJOR_VERSION" -eq 3 || "$cairo_not_in_the_os" = yes ]]; then
#
# Requirements for Cairo first
#
# The libpng that comes with the X11 for Leopard has a bogus
# pkg-config file that lies about where the header files are,
# which causes other packages not to be able to find its
# headers.
#
# The libpng in later versions is not what the version of
# libpixman we build below wants - it wants libpng15.
#
if [ ! -f libpng-$PNG_VERSION-done ] ; then
echo "Downloading, building, and installing libpng:"
#
# The FTP site puts libpng x.y.* into a libpngxy directory.
#
subdir=`echo $PNG_VERSION | sed 's/\([1-9][0-9]*\)\.\([1-9][0-9]*\).*/libpng\1\2'/`
[ -f libpng-$PNG_VERSION.tar.xz ] || curl -O ftp://ftp.simplesystems.org/pub/libpng/png/src/$subdir/libpng-$PNG_VERSION.tar.xz
xzcat libpng-$PNG_VERSION.tar.xz | tar xf - || exit 1
cd libpng-$PNG_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch libpng-$PNG_VERSION-done
fi
#
# The libpixman versions that come with the X11s for Leopard,
# Snow Leopard, and Lion is too old to support Cairo's image
# surface backend feature (which requires pixman-1 >= 0.22.0).
#
# XXX - what about the one that comes with the latest version
# of Xquartz?
#
if [ ! -f pixman-$PIXMAN_VERSION-done ] ; then
echo "Downloading, building, and installing pixman:"
[ -f pixman-$PIXMAN_VERSION.tar.gz ] || curl -O http://www.cairographics.org/releases/pixman-$PIXMAN_VERSION.tar.gz
gzcat pixman-$PIXMAN_VERSION.tar.gz | tar xf - || exit 1
cd pixman-$PIXMAN_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make V=1 $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch pixman-$PIXMAN_VERSION-done
fi
#
# And now Cairo itself.
# XXX - with the libxcb that comes with 10.6,
#
# xcb_discard_reply() is missing, and the build fails.
#
if [ ! -f cairo-$CAIRO_VERSION-done ] ; then
echo "Downloading, building, and installing Cairo:"
CAIRO_MAJOR_VERSION="`expr $CAIRO_VERSION : '\([0-9][0-9]*\).*'`"
CAIRO_MINOR_VERSION="`expr $CAIRO_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
CAIRO_DOTDOT_VERSION="`expr $CAIRO_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
if [[ $CAIRO_MAJOR_VERSION -gt 1 ||
$CAIRO_MINOR_VERSION -gt 12 ||
($CAIRO_MINOR_VERSION -eq 12 && $CAIRO_DOTDOT_VERSION -ge 2) ]]
then
#
# Starting with Cairo 1.12.2, the tarballs are compressed with
# xz rather than gzip.
#
[ -f cairo-$CAIRO_VERSION.tar.xz ] || curl -O http://cairographics.org/releases/cairo-$CAIRO_VERSION.tar.xz || exit 1
xzcat cairo-$CAIRO_VERSION.tar.xz | tar xf - || exit 1
else
[ -f cairo-$CAIRO_VERSION.tar.gz ] || curl -O http://cairographics.org/releases/cairo-$CAIRO_VERSION.tar.gz || exit 1
gzcat cairo-$CAIRO_VERSION.tar.gz | tar xf - || exit 1
fi
cd cairo-$CAIRO_VERSION
# CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --enable-quartz=no || exit 1
# Maybe follow http://cairographics.org/end_to_end_build_for_mac_os_x/
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --enable-quartz=yes || exit 1
#
# We must avoid the version of libpng that comes with X11; the
# only way I've found to force that is to forcibly set INCLUDES
# when we do the build, so that this comes before CAIRO_CFLAGS,
# which has -I/usr/X11/include added to it before anything
# connected to libpng is.
#
INCLUDES="-I/usr/local/include/libpng15" make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch cairo-$CAIRO_VERSION-done
fi
fi
if [ ! -f atk-$ATK_VERSION-done ] ; then
echo "Downloading, building, and installing ATK:"
atk_dir=`expr $ATK_VERSION : '\([0-9][0-9]*\.[0-9][0-9]*\).*'`
ATK_MAJOR_VERSION="`expr $ATK_VERSION : '\([0-9][0-9]*\).*'`"
ATK_MINOR_VERSION="`expr $ATK_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
ATK_DOTDOT_VERSION="`expr $ATK_VERSION : '[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
if [[ $ATK_MAJOR_VERSION -gt 2 ||
($ATK_MAJOR_VERSION -eq 2 && $ATK_MINOR_VERSION -gt 0) ||
($ATK_MANOR_VERSION -eq 2 && $ATK_MINOR_VERSION -eq 0 && $ATK_DOTDOT_VERSION -ge 1) ]]
then
#
# Starting with ATK 2.0.1, xz-compressed tarballs are available.
#
[ -f atk-$ATK_VERSION.tar.xz ] || curl -O http://ftp.gnome.org/pub/gnome/sources/atk/$atk_dir/atk-$ATK_VERSION.tar.xz || exit 1
xzcat atk-$ATK_VERSION.tar.xz | tar xf - || exit 1
else
[ -f atk-$ATK_VERSION.tar.bz2 ] || curl -O http://ftp.gnome.org/pub/gnome/sources/atk/$atk_dir/atk-$ATK_VERSION.tar.bz2 || exit 1
bzcat atk-$ATK_VERSION.tar.bz2 | tar xf - || exit 1
fi
cd atk-$ATK_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch atk-$ATK_VERSION-done
fi
if [ ! -f pango-$PANGO_VERSION-done ] ; then
echo "Downloading, building, and installing Pango:"
pango_dir=`expr $PANGO_VERSION : '\([0-9][0-9]*\.[0-9][0-9]*\).*'`
PANGO_MAJOR_VERSION="`expr $PANGO_VERSION : '\([0-9][0-9]*\).*'`"
PANGO_MINOR_VERSION="`expr $PANGO_VERSION : '[0-9][0-9]*\.\([0-9][0-9]*\).*'`"
if [[ $PANGO_MAJOR_VERSION -gt 1 ||
$PANGO_MINOR_VERSION -ge 29 ]]
then
#
# Starting with Pango 1.29, the tarballs are compressed with
# xz rather than bzip2.
#
[ -f pango-$PANGO_VERSION.tar.xz ] || curl -L -O http://ftp.gnome.org/pub/gnome/sources/pango/$pango_dir/pango-$PANGO_VERSION.tar.xz || exit 1
xzcat pango-$PANGO_VERSION.tar.xz | tar xf - || exit 1
else
[ -f pango-$PANGO_VERSION.tar.bz2 ] || curl -L -O http://ftp.gnome.org/pub/gnome/sources/pango/$pango_dir/pango-$PANGO_VERSION.tar.bz2 || exit 1
bzcat pango-$PANGO_VERSION.tar.bz2 | tar xf - || exit 1
fi
cd pango-$PANGO_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch pango-$PANGO_VERSION-done
fi
if [ "$GDK_PIXBUF_VERSION" -a ! -f gdk-pixbuf-$GDK_PIXBUF_VERSION-done ] ; then
echo "Downloading, building, and installing gdk-pixbuf:"
gdk_pixbuf_dir=`expr $GDK_PIXBUF_VERSION : '\([0-9][0-9]*\.[0-9][0-9]*\).*'`
[ -f gdk-pixbuf-$GDK_PIXBUF_VERSION.tar.xz ] || curl -L -O http://ftp.gnome.org/pub/gnome/sources/gdk-pixbuf/$gdk_pixbuf_dir/gdk-pixbuf-$GDK_PIXBUF_VERSION.tar.xz || exit 1
xzcat gdk-pixbuf-$GDK_PIXBUF_VERSION.tar.xz | tar xf - || exit 1
cd gdk-pixbuf-$GDK_PIXBUF_VERSION
#
# If we're building for 10.6, use libpng12; if you have 10.7.5, including
# X11, and Xcode 4.3.3, the system has libpng15, complete with pkg-config
# files, as part of X11, but 10.6's X11 has only libpng12, and the 10.6
# SDK in Xcode 4.3.3 also has only libpng12, and has no pkg-config files
# of its own, so we have to explicitly set LIBPNG to override the
# configure script, and also force the CFLAGS to look for the header
# files for libpng12 (note that -isysroot doesn't affect the arguments
# to -I, so we need to include the SDK path explicitly).
#
if [[ "$min_osx_target" = 10.6 ]]
then
LIBPNG="-L/usr/X11/lib -lpng12" CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS -I$SDKPATH/usr/X11/include/libpng12" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --without-libtiff --without-libjpeg || exit 1
else
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --without-libtiff --without-libjpeg || exit 1
fi
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch gdk-pixbuf-$GDK_PIXBUF_VERSION-done
fi
if [ ! -f gtk+-$GTK_VERSION-done ] ; then
echo "Downloading, building, and installing GTK+:"
gtk_dir=`expr $GTK_VERSION : '\([0-9][0-9]*\.[0-9][0-9]*\).*'`
if [[ $GTK_MAJOR_VERSION -gt 2 ||
$GTK_MINOR_VERSION -gt 24 ||
($GTK_MINOR_VERSION -eq 24 && $GTK_DOTDOT_VERSION -ge 5) ]]
then
#
# Starting with GTK+ 2.24.5, the tarballs are compressed with
# xz rather than gzip, in addition to bzip2; use xz, as we've
# built and installed it, and as xz compresses better than
# bzip2 so the tarballs take less time to download.
#
[ -f gtk+-$GTK_VERSION.tar.xz ] || curl -L -O http://ftp.gnome.org/pub/gnome/sources/gtk+/$gtk_dir/gtk+-$GTK_VERSION.tar.xz || exit 1
xzcat gtk+-$GTK_VERSION.tar.xz | tar xf - || exit 1
else
[ -f gtk+-$GTK_VERSION.tar.bz2 ] || curl -L -O http://ftp.gnome.org/pub/gnome/sources/gtk+/$gtk_dir/gtk+-$GTK_VERSION.tar.bz2 || exit 1
bzcat gtk+-$GTK_VERSION.tar.bz2 | tar xf - || exit 1
fi
cd gtk+-$GTK_VERSION
if [ $DARWIN_MAJOR_VERSION -ge "12" ]
then
#
# GTK+ 2.24.10, at least, doesn't build on Mountain Lion with the
# CUPS printing backend - either the CUPS API changed incompatibly
# or the backend was depending on non-API implementation details.
#
# Configure it out, on Mountain Lion and later, for now.
# (12 is the Darwin major version number in Mountain Lion.)
#
# Also, configure out libtiff and libjpeg; configure scripts
# just ignore unknown --enable/--disable and --with/--without
# options (at least they've always do so up to now).
#
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-cups --without-libtiff --without-libjpeg || exit 1
else
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --without-libtiff --without-libjpeg || exit 1
fi
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch gtk+-$GTK_VERSION-done
fi
fi
#
# Now we have reached a point where we can build everything including
# the GUI (Wireshark), but not with any optional features such as
# SNMP OID resolution, some forms of decryption, Lua scripting, playback
# of audio, or GeoIP mapping of IP addresses.
#
# We now conditionally download optional libraries to support them;
# the default is to download them all.
#
if [ "$LIBSMI_VERSION" -a ! -f libsmi-$LIBSMI_VERSION-done ] ; then
echo "Downloading, building, and installing libsmi:"
[ -f libsmi-$LIBSMI_VERSION.tar.gz ] || curl -L -O ftp://ftp.ibr.cs.tu-bs.de/pub/local/libsmi/libsmi-$LIBSMI_VERSION.tar.gz || exit 1
gzcat libsmi-$LIBSMI_VERSION.tar.gz | tar xf - || exit 1
cd libsmi-$LIBSMI_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch libsmi-$LIBSMI_VERSION-done
fi
if [ "$LIBGPG_ERROR_VERSION" -a ! -f libgpg-error-$LIBGPG_ERROR_VERSION-done ] ; then
echo "Downloading, building, and installing libgpg-error:"
[ -f libgpg-error-$LIBGPG_ERROR_VERSION.tar.bz2 ] || curl -L -O ftp://ftp.gnupg.org/gcrypt/libgpg-error/libgpg-error-$LIBGPG_ERROR_VERSION.tar.bz2 || exit 1
bzcat libgpg-error-$LIBGPG_ERROR_VERSION.tar.bz2 | tar xf - || exit 1
cd libgpg-error-$LIBGPG_ERROR_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch libgpg-error-$LIBGPG_ERROR_VERSION-done
fi
if [ "$LIBGCRYPT_VERSION" -a ! -f libgcrypt-$LIBGCRYPT_VERSION-done ] ; then
#
# libgpg-error is required for libgcrypt.
#
if [ -z $LIBGPG_ERROR_VERSION ]
then
echo "libgcrypt requires libgpg-error, but you didn't install libgpg-error." 1>&2
exit 1
fi
echo "Downloading, building, and installing libgcrypt:"
[ -f libgcrypt-$LIBGCRYPT_VERSION.tar.gz ] || curl -L -O ftp://ftp.gnupg.org/gcrypt/libgcrypt/libgcrypt-$LIBGCRYPT_VERSION.tar.gz || exit 1
gzcat libgcrypt-$LIBGCRYPT_VERSION.tar.gz | tar xf - || exit 1
cd libgcrypt-$LIBGCRYPT_VERSION
#
# The assembler language code is not compatible with the OS X
# x86 assembler (or is it an x86-64 vs. x86-32 issue?).
#
# libgcrypt expects gnu89, not c99/gnu99, semantics for
# "inline". See, for example:
#
# http://lists.freebsd.org/pipermail/freebsd-ports-bugs/2010-October/198809.html
#
CFLAGS="$CFLAGS -std=gnu89 $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-asm || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch libgcrypt-$LIBGCRYPT_VERSION-done
fi
if [ "$GNUTLS_VERSION" -a ! -f gnutls-$GNUTLS_VERSION-done ] ; then
#
# GnuTLS requires libgcrypt (or nettle, in newer versions).
#
if [ -z $LIBGCRYPT_VERSION ]
then
echo "GnuTLS requires libgcrypt, but you didn't install libgcrypt" 1>&2
exit 1
fi
echo "Downloading, building, and installing GnuTLS:"
[ -f gnutls-$GNUTLS_VERSION.tar.bz2 ] || curl -L -O http://ftp.gnu.org/gnu/gnutls/gnutls-$GNUTLS_VERSION.tar.bz2 || exit 1
bzcat gnutls-$GNUTLS_VERSION.tar.bz2 | tar xf - || exit 1
cd gnutls-$GNUTLS_VERSION
#
# Use libgcrypt, not nettle.
# XXX - is there some reason to prefer nettle? Or does
# Wireshark directly use libgcrypt routines?
#
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --with-libgcrypt --without-p11-kit || exit 1
make $MAKE_BUILD_OPTS || exit 1
#
# The pkgconfig file for GnuTLS says "requires zlib", but OS X,
# while it supplies zlib, doesn't supply a pkgconfig file for
# it.
#
# Patch the GnuTLS pkgconfig file not to require zlib.
# (If the capabilities of GnuTLS that Wireshark uses don't
# depend on building GnuTLS with zlib, an alternative would be
# to configure it not to use zlib.)
#
patch -p0 lib/gnutls.pc.in <../../macosx-support-lib-patches/gnutls-pkgconfig.patch || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch gnutls-$GNUTLS_VERSION-done
fi
if [ "$LUA_VERSION" -a ! -f lua-$LUA_VERSION-done ] ; then
echo "Downloading, building, and installing Lua:"
[ -f lua-$LUA_VERSION.tar.gz ] || curl -L -O http://www.lua.org/ftp/lua-$LUA_VERSION.tar.gz || exit 1
gzcat lua-$LUA_VERSION.tar.gz | tar xf - || exit 1
cd lua-$LUA_VERSION
make $MAKE_BUILD_OPTS macosx || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch lua-$LUA_VERSION-done
fi
if [ "$PORTAUDIO_VERSION" -a ! -f portaudio-done ] ; then
echo "Downloading, building, and installing PortAudio:"
[ -f $PORTAUDIO_VERSION.tgz ] || curl -L -O http://www.portaudio.com/archives/$PORTAUDIO_VERSION.tgz || exit 1
gzcat $PORTAUDIO_VERSION.tgz | tar xf - || exit 1
cd portaudio
#
# Un-comment an include that's required on Lion.
#
patch -p0 include/pa_mac_core.h <../../macosx-support-lib-patches/portaudio-pa_mac_core.h.patch
#
# Fix a bug that showed up with clang (but is a bug with any
# compiler).
#
patch -p0 src/hostapi/coreaudio/pa_mac_core.c <../../macosx-support-lib-patches/portaudio-pa_mac_core.c.patch
#
# Disable fat builds - the configure script doesn't work right
# with Xcode 4 if you leave them enabled, and we don't build
# any other libraries fat (GLib, for example, would be very
# hard to build fat), so there's no advantage to having PortAudio
# built fat.
#
# Set the minimum OS X version to 10.4, to suppress some
# deprecation warnings. (Good luck trying to make any of
# this build on an OS+Xcode with a pre-10.4 SDK; we don't
# worry about the user requesting that.)
#
CFLAGS="$CFLAGS -mmacosx-version-min=10.4 $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure --disable-mac-universal || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch portaudio-done
fi
if [ "$GEOIP_VERSION" -a ! -f geoip-$GEOIP_VERSION-done ]
then
echo "Downloading, building, and installing GeoIP API:"
[ -f GeoIP-$GEOIP_VERSION.tar.gz ] || curl -L -O http://geolite.maxmind.com/download/geoip/api/c/GeoIP-$GEOIP_VERSION.tar.gz || exit 1
gzcat GeoIP-$GEOIP_VERSION.tar.gz | tar xf - || exit 1
cd GeoIP-$GEOIP_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
#
# Grr. Their man pages "helpfully" have an ISO 8859-1
# copyright symbol in the copyright notice, but OS X's
# default character encoding is UTF-8. sed on Mountain
# Lion barfs at the "illegal character sequence" represented
# by an ISO 8859-1 copyright symbol, as it's not a valid
# UTF-8 sequence.
#
# iconv the relevant man pages into UTF-8.
#
for i in geoipupdate.1.in geoiplookup6.1.in geoiplookup.1.in
do
iconv -f iso8859-1 -t utf-8 man/"$i" >man/"$i".tmp &&
mv man/"$i".tmp man/"$i"
done
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch geoip-$GEOIP_VERSION-done
fi
if [ "$CARES_VERSION" -a ! -f geoip-$CARES_VERSION-done ]
then
echo "Downloading, building, and installing C-Ares API:"
[ -f c-ares-$CARES_VERSION.tar.gz ] || curl -L -O http://c-ares.haxx.se/download/c-ares-$CARES_VERSION.tar.gz || exit 1
gzcat c-ares-$CARES_VERSION.tar.gz | tar xf - || exit 1
cd c-ares-$CARES_VERSION
CFLAGS="$CFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" LDFLAGS="$LDFLAGS $VERSION_MIN_FLAGS $SDKFLAGS" ./configure || exit 1
make $MAKE_BUILD_OPTS || exit 1
$DO_MAKE_INSTALL || exit 1
cd ..
touch geoip-$CARES_VERSION-done
fi
echo ""
echo "You are now prepared to build Wireshark. To do so do:"
echo "export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig:/usr/X11/lib/pkgconfig"
echo ""
if [ -n "$CMAKE" ]; then
echo "mkdir build; cd build"
echo "cmake .."
echo
echo "or"
echo
fi
echo "./autogen.sh"
echo "mkdir build; cd build"
echo "../configure"
echo ""
echo "make $MAKE_BUILD_OPTS"
echo "make install"
echo ""
echo "Make sure you are allowed capture access to the network devices"
echo "See: http://wiki.wireshark.org/CaptureSetup/CapturePrivileges"
echo ""
exit 0
|
hashbrowncipher/wireshark
|
macosx-setup.sh
|
Shell
|
gpl-2.0
| 55,583 |
#!/usr/bin/env bash
status=""
counter=0
checkcounter=0
until [[ $status = "false" ]]; do
status=$(curl 2>/dev/null "http://$1/status.php" | jq .maintenance)
echo "($checkcounter) $status"
if [[ "$status" =~ "false" || "$status" = "" ]]; then
let "counter += 1"
if [[ $counter -gt 50 ]]; then
echo "Failed to wait for server"
exit 1
fi
fi
let "checkcounter += 1"
sleep 10
done
echo "($checkcounter) Done"
|
nextcloud/android
|
scripts/wait_for_server.sh
|
Shell
|
gpl-2.0
| 482 |
#!/bin/bash
# sat6-updates.sh updates our hosts automatically it is
# called from cron everyday but only runs according the
# following schedule:
# Content View | Environments | Date
# -------------------------------------------------------------------
# Crash | Library --> Dev --> Prod | 1st Monday of every month
# day of the week today
dow=$( date +"%u%d" )
# if first Monday
if [[ "$dow" -ge 101 && "$dow" -le 107 ]]
then
## Update Crash
# Generate new content view version
/root/api/Sat6APIUpdateHC.py -o HO -cv Crash --create-new-version
# Promote dev to prod
/root/api/Sat6APIUpdateHC.py -o HO -cv Crash --promote-from-env dev --promote-to-env prod
# Promote Library to dev
/root/api/Sat6APIUpdateHC.py -o HO -cv Crash --promote-from-env Library --promote-to-env dev
# remove old versions
/root/api/Sat6APIUpdateHC.py -o HO -cv Crash --cleanup --keep 1
# if first Tuesday
elif [[ "$dow" -ge 201 && "$dow" -le 207 ]]
then
## Update Infra
# Generate new content view version
/root/api/Sat6APIUpdateHC.py -o HO -cv Infra --create-new-version
# Promote Library to dev
/root/api/Sat6APIUpdateHC.py -o HO -cv Infra --promote-from-env Library --promote-to-env prod
# remove old versions
/root/api/Sat6APIUpdateHC.py -o HO -cv Infra --cleanup --keep 1
# if first Wednesday
elif [[ "$dow" -ge 301 && "$dow" -le 307 ]]
then
echo "its wednesday"
# if first Thursday
elif [[ "$dow" -ge 401 && "$dow" -le 407 ]]
then
echo "its thursday"
# if first Friday
elif [[ "$dow" -ge 501 && "$dow" -le 507 ]]
then
echo "its friday"
# if not any of the above (weekend) do nothing.
else
echo "It is a weekend - nothing to do"
fi
|
sean797/sat6
|
api/sat6-wrapper.sh
|
Shell
|
gpl-2.0
| 1,675 |
#!/bin/ksh
# Had bug in not handling when errexit was set.
# We'll also test set -u.
set -o errexit
### FIXME: a bug in ksh prevents this, I think.
# set -u
print one
|
rocky/kshdb
|
test/example/bug-errexit.sh
|
Shell
|
gpl-2.0
| 167 |
#!/bin/bash
## FUNCTIONS ##
noroot() {
sudo -EH -u vagrant HTTP_HOST="${SITE_HOST}" "$@";
}
get_config() {
local key=$1
local s='[[:space:]]*'
local w='[a-zA-Z0-9_]*'
sed -n "s/^$s$key$s:$s\($w\)$s$/\1/ p" "wp-cli.yml"
}
## PROVISIONING ##
DATABASE=$(get_config dbname)
echo "Setting up a local WordPress project for development..."
noroot composer update
if [ ! -f "index.php" ]; then
noroot cat >"index.php" <<PHP
<?php require dirname( __FILE__ ) . '/wp/index.php';
PHP
fi
if ! $(noroot wp core is-installed); then
echo " * Creating database schema ${DATABASE}"
mysql -u root --password=root -e "CREATE DATABASE IF NOT EXISTS ${DATABASE}"
mysql -u root --password=root -e "GRANT ALL PRIVILEGES ON ${DATABASE}.* TO wp@localhost IDENTIFIED BY 'wp';"
echo " * Configuring WordPress"
WP_CACHE_KEY_SALT=`date +%s | sha256sum | head -c 64`
noroot wp core config --extra-php <<PHP
define( 'WP_CACHE', true );
define( 'WP_CACHE_KEY_SALT', '$WP_CACHE_KEY_SALT' );
define( 'WP_DEBUG', true );
define( 'WP_DEBUG_LOG', true );
define( 'WP_DEBUG_DISPLAY', false );
define( 'SAVEQUERIES', false );
define( 'JETPACK_DEV_DEBUG', true );
@ini_set( 'display_errors', 0 );
define( 'WP_LOCAL_DEV', true );
define( 'WP_ENV', 'development' );
define( 'WP_CONTENT_DIR', dirname( __FILE__ ) . '/content' );
if ( defined( 'WP_HOME' ) ) {
define( 'WP_CONTENT_URL', WP_HOME . '/content' );
} else {
define( 'WP_CONTENT_URL', 'http://' . \$_SERVER['HTTP_HOST'] . '/content' );
}
if ( ! defined( 'ABSPATH' ) ) {
define( 'ABSPATH', dirname( __FILE__ ) . '/wp/' );
}
PHP
noroot mv wp/wp-config.php .
noroot wp core install
echo " * Setting additional options"
HOMEURL=$(noroot wp option get home)
noroot wp option update siteurl "$HOMEURL/wp"
noroot wp option update permalink_structure "/%postname%/"
echo " * Importing test content"
noroot curl -OLs https://raw.githubusercontent.com/manovotny/wptest/master/wptest.xml
noroot wp plugin activate wordpress-importer
noroot wp import wptest.xml --authors=create
noroot rm wptest.xml
fi
echo "All done!"
|
goblindegook/vvv-composer
|
vvv-init.sh
|
Shell
|
gpl-2.0
| 2,147 |
#!/bin/bash
threeDFile1=$1
threeDFile2=$2
finalName=$3
fileName1=${threeDFile1/.ply/}
fileName2=${threeDFile2/.ply/}
name_finale=${finalName/.ply/.txt}
if [ ! -d $fileName1 ]; then
./get_data.sh $threeDFile1
else
:
fi
if [ ! -d $fileName2 ]; then
./get_data.sh $threeDFile2
else
:
fi
while [ ! -d $fileName1 ]
do
:
done
while [ ! -d $fileName2 ]
do
:
done
R --no-save --no-restore --quiet --args $fileName1/$fileName1.vertex.txt $fileName2/$fileName2.vertex.txt $name_finale < matching.R > $name_finale.log 2>&1
while [ ! -f $name_finale ]
do
:
done
./clean.sh $name_finale
|
naredokitsch/cavesmerge
|
merge.sh
|
Shell
|
gpl-2.0
| 584 |
#!/bin/bash
patch_files () {
pushd $1
if [ ! -f vss_patch_files.done ]; then
for f in `find -name Makefile.in -o -name Makefile.am -o -name configure.ac`; do \
sed -i \
-e "s;\[gstreamer-;\[wpe-gstreamer-;g" \
-e "s;plugindir=.*;plugindir=\"\\\\$\(libdir\)/gstreamer\-\$\{GST_MAJORMINOR\}\-wpe\";g" \
$f; \
done
touch vss_patch_files.done
fi
popd
}
patch_files $1
|
Metrological/buildroot-wpe
|
package/vss-sdk/gst1/brcm.fix.sh
|
Shell
|
gpl-2.0
| 498 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.