code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
# Copyright (C) 2006 OpenWrt.org
# DEBUG="echo"
find_config() {
local iftype device iface ifaces ifn
for ifn in $interfaces; do
config_get iftype "$ifn" type
config_get iface "$ifn" ifname
case "$iftype" in
bridge) config_get ifaces "$ifn" ifnames;;
esac
config_get device "$ifn" device
for ifc in $device $iface $ifaces; do
[ ."$ifc" = ."$1" ] && {
echo "$ifn"
return 0
}
done
done
return 1;
}
scan_interfaces() {
local cfgfile="$1"
local mode iftype iface ifname device
interfaces=
config_cb() {
case "$1" in
interface)
config_set "$2" auto 1
;;
esac
config_get iftype "$CONFIG_SECTION" TYPE
case "$iftype" in
interface)
config_get proto "$CONFIG_SECTION" proto
append interfaces "$CONFIG_SECTION"
config_get iftype "$CONFIG_SECTION" type
config_get ifname "$CONFIG_SECTION" ifname
config_get device "$CONFIG_SECTION" device
config_set "$CONFIG_SECTION" device "${device:-$ifname}"
case "$iftype" in
bridge)
config_set "$CONFIG_SECTION" ifnames "${device:-$ifname}"
config_set "$CONFIG_SECTION" ifname br-"$CONFIG_SECTION"
;;
esac
( type "scan_$proto" ) >/dev/null 2>/dev/null && eval "scan_$proto '$CONFIG_SECTION'"
;;
esac
}
config_load "${cfgfile:-network}"
}
add_vlan() {
local vif="${1%\.*}"
[ "$1" = "$vif" ] || ifconfig "$1" >/dev/null 2>/dev/null || {
ifconfig "$vif" up 2>/dev/null >/dev/null || add_vlan "$vif"
$DEBUG vconfig add "$vif" "${1##*\.}"
return 0
}
return 1
}
# sort the device list, drop duplicates
sort_list() {
local arg="$*"
(
for item in $arg; do
echo "$item"
done
) | sort -u
}
# Create the interface, if necessary.
# Return status 0 indicates that the setup_interface() call should continue
# Return status 1 means that everything is set up already.
prepare_interface() {
local iface="$1"
local config="$2"
# if we're called for the bridge interface itself, don't bother trying
# to create any interfaces here. The scripts have already done that, otherwise
# the bridge interface wouldn't exist.
[ "br-$config" = "$iface" -o -e "$iface" ] && return 0;
ifconfig "$iface" 2>/dev/null >/dev/null && {
# make sure the interface is removed from any existing bridge and deconfigured
ifconfig "$iface" 0.0.0.0
unbridge "$iface"
}
# Setup VLAN interfaces
add_vlan "$iface" && return 1
ifconfig "$iface" 2>/dev/null >/dev/null || return 0
# Setup bridging
config_get iftype "$config" type
config_get stp "$config" stp
case "$iftype" in
bridge)
[ -x /usr/sbin/brctl ] && {
ifconfig "br-$config" 2>/dev/null >/dev/null && {
local newdevs=
config_get devices "$config" device
for dev in $(sort_list "$devices" "$iface"); do
append newdevs "$dev"
done
uci_set_state network "$config" device "$newdevs"
$DEBUG brctl addif "br-$config" "$iface"
# Bridge existed already. No further processing necesary
} || {
$DEBUG brctl addbr "br-$config"
$DEBUG brctl setfd "br-$config" 0
$DEBUG ifconfig "br-$config" up
$DEBUG brctl addif "br-$config" "$iface"
$DEBUG brctl stp "br-$config" ${stp:-off}
# Creating the bridge here will have triggered a hotplug event, which will
# result in another setup_interface() call, so we simply stop processing
# the current event at this point.
}
ifconfig "$iface" up 2>/dev/null >/dev/null
return 1
}
;;
esac
return 0
}
set_interface_ifname() {
local config="$1"
local ifname="$2"
config_get device "$1" device
uci_set_state network "$config" ifname "$ifname"
uci_set_state network "$config" device "$device"
}
setup_interface_none() {
env -i ACTION="ifup" INTERFACE="$2" DEVICE="$1" PROTO=none /sbin/hotplug-call "iface" &
}
setup_interface_static() {
local iface="$1"
local config="$2"
config_get ipaddr "$config" ipaddr
config_get netmask "$config" netmask
config_get ip6addr "$config" ip6addr
[ -z "$ipaddr" -o -z "$netmask" ] && [ -z "$ip6addr" ] && return 1
config_get gateway "$config" gateway
config_get ip6gw "$config" ip6gw
config_get dns "$config" dns
config_get bcast "$config" broadcast
[ -z "$ipaddr" ] || $DEBUG ifconfig "$iface" "$ipaddr" netmask "$netmask" broadcast "${bcast:-+}"
[ -z "$ip6addr" ] || $DEBUG ifconfig "$iface" add "$ip6addr"
[ -z "$gateway" ] || $DEBUG route add default gw "$gateway" dev "$iface"
[ -z "$ip6gw" ] || $DEBUG route -A inet6 add default gw "$ip6gw" dev "$iface"
[ -z "$dns" ] || {
for ns in $dns; do
grep "$ns" /tmp/resolv.conf.auto 2>/dev/null >/dev/null || {
echo "nameserver $ns" >> /tmp/resolv.conf.auto
}
done
}
env -i ACTION="ifup" INTERFACE="$config" DEVICE="$iface" PROTO=static /sbin/hotplug-call "iface" &
}
setup_interface_alias() {
local config="$1"
local parent="$2"
local iface="$3"
config_get cfg "$config" interface
[ "$parent" == "$cfg" ] || return 0
# alias counter
config_get ctr "$parent" alias_count
ctr="$(($ctr + 1))"
config_set "$parent" alias_count "$ctr"
# alias list
config_get list "$parent" aliases
append list "$config"
config_set "$parent" aliases "$list"
set_interface_ifname "$config" "$iface:$ctr"
config_get proto "$config" proto
case "${proto:-static}" in
static)
setup_interface_static "$iface:$ctr" "$config"
;;
*)
echo "Unsupported type '$proto' for alias config '$config'"
return 1
;;
esac
}
setup_interface() {
local iface="$1"
local config="$2"
local proto
local macaddr
[ -n "$config" ] || {
config=$(find_config "$iface")
[ "$?" = 0 ] || return 1
}
proto="${3:-$(config_get "$config" proto)}"
prepare_interface "$iface" "$config" || return 0
[ "$iface" = "br-$config" ] && {
# need to bring up the bridge and wait a second for
# it to switch to the 'forwarding' state, otherwise
# it will lose its routes...
ifconfig "$iface" up
sleep 1
}
# Interface settings
config_get mtu "$config" mtu
config_get macaddr "$config" macaddr
grep "$iface:" /proc/net/dev > /dev/null && \
$DEBUG ifconfig "$iface" ${macaddr:+hw ether "$macaddr"} ${mtu:+mtu $mtu} up
set_interface_ifname "$config" "$iface"
pidfile="/var/run/$iface.pid"
case "$proto" in
static)
setup_interface_static "$iface" "$config"
;;
dhcp)
# prevent udhcpc from starting more than once
lock "/var/lock/dhcp-$iface"
pid="$(cat "$pidfile" 2>/dev/null)"
if [ -d "/proc/$pid" ] && grep udhcpc "/proc/${pid}/cmdline" >/dev/null 2>/dev/null; then
lock -u "/var/lock/dhcp-$iface"
else
config_get ipaddr "$config" ipaddr
config_get netmask "$config" netmask
config_get hostname "$config" hostname
config_get proto1 "$config" proto
config_get clientid "$config" clientid
[ -z "$ipaddr" ] || \
$DEBUG ifconfig "$iface" "$ipaddr" ${netmask:+netmask "$netmask"}
# don't stay running in background if dhcp is not the main proto on the interface (e.g. when using pptp)
[ ."$proto1" != ."$proto" ] && dhcpopts="-n -q"
$DEBUG eval udhcpc -t 0 -i "$iface" ${ipaddr:+-r $ipaddr} ${hostname:+-H $hostname} ${clientid:+-c $clientid} -b -p "$pidfile" ${dhcpopts:- -R &}
lock -u "/var/lock/dhcp-$iface"
fi
;;
none)
setup_interface_none "$iface" "$config"
;;
*)
if ( eval "type setup_interface_$proto" ) >/dev/null 2>/dev/null; then
eval "setup_interface_$proto '$iface' '$config' '$proto'"
else
echo "Interface type $proto not supported."
return 1
fi
;;
esac
config_set "$config" aliases ""
config_set "$config" alias_count 0
config_foreach setup_interface_alias alias "$config" "$iface"
config_get aliases "$config" aliases
[ -z "$aliases" ] || uci_set_state network "$config" aliases "$aliases"
}
unbridge() {
local dev="$1"
local brdev
[ -x /usr/sbin/brctl ] || return 0
brctl show | grep "$dev" >/dev/null && {
# interface is still part of a bridge, correct that
for brdev in $(brctl show | awk '$2 ~ /^[0-9].*\./ { print $1 }'); do
brctl delif "$brdev" "$dev" 2>/dev/null >/dev/null
done
}
}
|
houzhenggang/MaRa-a1a0a5aNaL
|
package/base-files/files/lib/network/config.sh
|
Shell
|
gpl-2.0
| 8,040 |
#!/bin/bash
#This script installs mysql server
apt-get update
debconf-set-selections <<< "mysql-server mysql-server/root_password password $db_root_password"
debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $db_root_password"
apt-get -y install --fix-missing mysql-server
|
openstack/heat-translator
|
translator/tests/data/artifacts/mysql/mysql_dbms_install.sh
|
Shell
|
apache-2.0
| 308 |
#!/bin/bash
cloudmonkey set table default
TEMPOSID=$(cloudmonkey list ostypes keyword="Other PV (64-bit)" filter=id | grep ^id | awk {'print $3'})
# XenServer
cloudmonkey register template displayText=Tiny format=VHD hypervisor=XenServer isextractable=true isfeatured=true ispublic=true isrouting=false name=Tiny osTypeId=$TEMPOSID passwordEnabled=true requireshvm=true zoneid=-1 url=http://dl.openvm.eu/cloudstack/macchinina/x86_64/macchinina-xen.vhd.bz2
# KVM
cloudmonkey register template displayText=Tiny format=Qcow2 hypervisor=KVM isextractable=true isfeatured=true ispublic=true isrouting=false name=Tiny osTypeId=$TEMPOSID passwordEnabled=true requireshvm=true zoneid=-1 url=http://dl.openvm.eu/cloudstack/macchinina/x86_64/macchinina-kvm.qcow2.bz2
|
MissionCriticalCloud/cosmic-bubble
|
helper_scripts/cosmic/add_os_templates.sh
|
Shell
|
apache-2.0
| 758 |
#!/bin/bash
export PATH=$PATH:/sbin
rmmod g_file_storage
rmmod net2272
if [ $1 = "nand" ]; then
mount /mnt/nand || mount -o remount,rw /mnt/nand
elif [ $1 = "root" ]; then
mount -o remount,rw /
else
mount /mnt/sd || mount -o remount,rw /mnt/sd
fi
|
uli/gmenu2x
|
pandora/scripts/usboff.sh
|
Shell
|
gpl-2.0
| 250 |
#!/usr/bin/env bash
set -e
###############################################################################
#
# all-tests.sh
#
# Execute tests for edx-platform. This script is designed to be the
# entry point for various CI systems.
#
###############################################################################
# Violations thresholds for failing the build
export PYLINT_THRESHOLD=4500
export JSHINT_THRESHOLD=9080
doCheckVars() {
if [ -n "$CIRCLECI" ] ; then
SCRIPT_TO_RUN=scripts/circle-ci-tests.sh
elif [ -n "$JENKINS_HOME" ] ; then
source scripts/jenkins-common.sh
SCRIPT_TO_RUN=scripts/generic-ci-tests.sh
fi
}
# Determine the CI system for the environment
doCheckVars
# Run appropriate CI system script
if [ -n "$SCRIPT_TO_RUN" ] ; then
$SCRIPT_TO_RUN
# Exit with the exit code of the called script
exit $?
else
echo "ERROR. Could not detect continuous integration system."
exit 1
fi
|
analyseuc3m/ANALYSE-v1
|
scripts/all-tests.sh
|
Shell
|
agpl-3.0
| 962 |
#!/bin/sh
_do_mdmon_takeover() {
local ret
mdmon --takeover --all
ret=$?
[ $ret -eq 0 ] && info "Taking over mdmon processes."
return $ret
}
if command -v mdmon >/dev/null; then
_do_mdmon_takeover $1
fi
|
Calrama/dracut
|
modules.d/90mdraid/mdmon-pre-shutdown.sh
|
Shell
|
gpl-2.0
| 229 |
#!/bin/bash
#
# This script assumes a linux environment
echo "*** uBlock.firefox: Copying files"
DES=dist/build/uBlock.firefox
rm -rf $DES
mkdir -p $DES
cp -R assets $DES/
rm $DES/assets/*.sh
cp -R src/css $DES/
cp -R src/img $DES/
cp -R src/js $DES/
cp -R src/lib $DES/
cp -R src/_locales $DES/
cp src/*.html $DES/
mv $DES/img/icon_128.png $DES/icon.png
cp platform/firefox/vapi-*.js $DES/js/
cp platform/firefox/bootstrap.js $DES/
cp platform/firefox/frame*.js $DES/
cp -R platform/firefox/img $DES/
cp -R platform/firefox/css $DES/
cp platform/firefox/chrome.manifest $DES/
cp platform/firefox/install.rdf $DES/
cp platform/firefox/*.xul $DES/
cp LICENSE.txt $DES/
echo "*** uBlock.firefox: Generating meta..."
python tools/make-firefox-meta.py $DES/
if [ "$1" = all ]; then
echo "*** uBlock.firefox: Creating package..."
pushd $DES/
zip ../uBlock.firefox.xpi -qr *
popd
fi
echo "*** uBlock.firefox: Package done."
|
chrisaljoudi/uBlock
|
tools/make-firefox.sh
|
Shell
|
gpl-3.0
| 940 |
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
set -o xtrace
export REPO_DIR=${REPO_DIR:-$(pwd)}
export HOST_ARTIFACTS_DIR=${WORKSPACE}/_artifacts
mkdir -p "${HOST_ARTIFACTS_DIR}"
# Run the kubekins container, mapping in docker (so we can launch containers),
# the repo directory, and the artifacts output directory.
#
# Note: We pass in the absolute path to the repo on the host as an env var incase
# any tests that get run need to launch containers that also map volumes.
# This is required because if you do
#
# $ docker run -v $PATH:/container/path ...
#
# From _inside_ a container that has the host's docker mapped in, the $PATH
# provided must be resolvable on the *HOST*, not the container.
docker run --rm=true \
-v /var/run/docker.sock:/var/run/docker.sock \
-v "$(which docker)":/bin/docker \
-v "${REPO_DIR}":/go/src/k8s.io/kubernetes \
-v "${WORKSPACE}/_artifacts":/workspace/artifacts \
-v /etc/localtime:/etc/localtime:ro \
-e "KUBE_FORCE_VERIFY_CHECKS=${KUBE_FORCE_VERIFY_CHECKS:-}" \
-e "KUBE_VERIFY_GIT_BRANCH=${KUBE_VERIFY_GIT_BRANCH:-}" \
-e "REPO_DIR=${REPO_DIR}" \
-e "HOST_ARTIFACTS_DIR=${HOST_ARTIFACTS_DIR}" \
-i gcr.io/google_containers/kubekins-test:0.11 \
bash -c "cd kubernetes && ./hack/jenkins/test-dockerized.sh"
|
rajdeepd/kubernetes
|
hack/jenkins/gotest-dockerized.sh
|
Shell
|
apache-2.0
| 1,891 |
#!/bin/sh
#
# Copyright (C) 2014 OpenWrt.org
#
KIRKWOOD_BOARD_NAME=
KIRKWOOD_MODEL=
kirkwood_board_detect() {
local machine
local name
machine=$(cat /proc/device-tree/model)
case "$machine" in
"Seagate FreeAgent Dockstar")
name="dockstar"
;;
"Seagate GoFlex Net")
name="goflexnet"
;;
"Iomega Iconnect")
name="iconnect"
;;
"RaidSonic ICY BOX IB-NAS62x0 (Rev B)")
name="ib62x0"
;;
"Cloud Engines Pogoplug E02")
name="pogo_e02"
;;
"Linksys EA3500")
name="ea3500"
;;
"Linksys EA4500")
name="ea4500"
;;
"Globalscale Technologies Guruplug Server Plus")
name="guruplug-server-plus"
;;
"Globalscale Technologies SheevaPlug")
name="sheevaplug"
;;
"Globalscale Technologies eSATA SheevaPlug")
name="sheevaplug-esata"
;;
*)
name="generic"
;;
esac
[ -z "$KIRKWOOD_BOARD_NAME" ] && KIRKWOOD_BOARD_NAME="$name"
[ -z "$KIRKWOOD_MODEL" ] && KIRKWOOD_MODEL="$machine"
[ -e "/tmp/sysinfo/" ] || mkdir -p "/tmp/sysinfo/"
echo "$KIRKWOOD_BOARD_NAME" > /tmp/sysinfo/board_name
echo "$KIRKWOOD_MODEL" > /tmp/sysinfo/model
}
kirkwood_board_name() {
local name
[ -f /tmp/sysinfo/board_name ] || kirkwood_board_detect
[ -f /tmp/sysinfo/board_name ] && name=$(cat /tmp/sysinfo/board_name)
[ -z "$name" ] && name="unknown"
echo "$name"
}
|
morgenroth/openwrt
|
target/linux/kirkwood/base-files/lib/kirkwood.sh
|
Shell
|
gpl-2.0
| 1,301 |
#!/bin/sh -e
SRC_BASE=configure
SRC="$(dirname $0)/../${SRC_BASE}"
DST="$(pwd)/test/tmp/${SRC_BASE}.bz2"
mkdir -p test/tmp
rm -f "$DST"
bzip2 -c "$SRC" >"$DST"
./test/run_input "$DST/${SRC_BASE}" |diff "$SRC" -
|
SimonKagstrom/mpd-streamium
|
test/test_archive_bzip2.sh
|
Shell
|
gpl-2.0
| 213 |
# This hook compresses info(1) files.
hook() {
local f j dirat lnkat newlnk
local fpattern="s|${PKGDESTDIR}||g;s|^\./$||g;/^$/d"
#
# Find out if this package contains info files and compress
# all them with gzip.
#
if [ ! -f ${PKGDESTDIR}/usr/share/info/dir ]; then
return 0
fi
# Always remove this file if curpkg is not texinfo.
if [ "$pkgname" != "texinfo" ]; then
rm -f ${PKGDESTDIR}/usr/share/info/dir
fi
find ${PKGDESTDIR}/usr/share/info -type f -follow | while read f
do
j=$(echo "$f"|sed -e "$fpattern")
[ "$j" = "" ] && continue
[ "$j" = "/usr/share/info/dir" ] && continue
# Ignore compressed files.
if $(echo "$j"|grep -q '.*.gz$'); then
continue
fi
# Ignore non info files.
if ! $(echo "$j"|grep -q '.*.info$') && \
! $(echo "$j"|grep -q '.*.info-[0-9]*$'); then
continue
fi
if [ -h ${PKGDESTDIR}/"$j" ]; then
dirat=$(dirname "$j")
lnkat=$(readlink ${PKGDESTDIR}/"$j")
newlnk="${j##*/}"
rm -f ${PKGDESTDIR}/"$j"
cd ${PKGDESTDIR}/"$dirat"
ln -s "${lnkat}".gz "${newlnk}".gz
continue
fi
echo " Compressing info file: $j..."
gzip -nfq9 ${PKGDESTDIR}/"$j"
done
}
|
kulinacs/void-packages
|
common/hooks/post-install/00-compress-info-files.sh
|
Shell
|
bsd-2-clause
| 1,148 |
#!/bin/bash
getRnaPred 2> /dev/null || [[ "$?" == 255 ]]
|
JenCabral/bioconda-recipes
|
recipes/ucsc-getrnapred/run_test.sh
|
Shell
|
mit
| 57 |
#!/bin/bash
# A little bit of scripting magic so that whatever directory this script is
# run from, we always find the python scripts and data we need.
cd "$(dirname "$0")"
cwd=`pwd`/..
cd ${cwd}
# Activate python virtual environment
source ../../../../virtualenv/bin/activate
# Now do some work
mkdir -p ../../../output_data/cannon
# -----------------------------------------------------------------------------------------
# Loop over both Gaussian and half ellipse convolutions
for convolution_kernel in gaussian half_ellipse
do
# Do convolution for both 4MOST LRS and HRS
for mode in hrs lrs
do
# Loop over two different SNRs
for snr in 20 50
do
# Loop over different convolution widths for the test set
for convolution_width in 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 2.0 2.1 2.2
do
python3 cannon_test.py --train "galah_training_sample_4fs_${convolution_kernel}_1.7_${mode}" \
--test "galah_test_sample_4fs_${convolution_kernel}_${convolution_width}_${mode}_snr${snr}" \
--censor "line_list_filter_2016MNRAS.461.2174R.txt" \
--description "HRS with ${convolution_width}-pixel ${convolution_kernel} convolution; censored - 10 labels." \
--labels "Teff,logg,[Fe/H],[Ca/H],[Mg/H],[Ti/H],[Si/H],[Na/H],[Ni/H],[Cr/H]" \
--assume-scaled-solar \
--output-file "../../../output_data/cannon/cannon_galah_${convolution_kernel}_${convolution_width}_censored_${mode}_10label_snr${snr}"
done
done
done
done
|
dcf21/4most-4gp-scripts
|
src/scripts/test_cannon/examples/examples_galah_20180830.sh
|
Shell
|
mit
| 1,753 |
install_yarn() {
local dir="$build_dir/.heroku/yarn"
# Look in package.json's engines.yarn field for a semver range
local version=$($bp_dir/vendor/jq -r .engines.yarn $build_dir/package.json)
if needs_resolution "$version"; then
echo "Resolving yarn version ${version:-(latest)} via semver.io..."
local version=$(curl --silent --get --retry 5 --retry-max-time 15 --data-urlencode "range=${version}" https://semver.herokuapp.com/yarn/resolve)
fi
echo "Downloading and installing yarn ($version)..."
local download_url="https://yarnpkg.com/downloads/$version/yarn-v$version.tar.gz"
local code=$(curl "$download_url" -L --silent --fail --retry 5 --retry-max-time 15 -o /tmp/yarn.tar.gz --write-out "%{http_code}")
if [ "$code" != "200" ]; then
echo "Unable to download yarn: $code" && false
fi
rm -rf $dir
mkdir -p "$dir"
# https://github.com/yarnpkg/yarn/issues/770
if tar --version | grep -q 'gnu'; then
tar xzf /tmp/yarn.tar.gz -C "$dir" --strip 1 --warning=no-unknown-keyword
else
tar xzf /tmp/yarn.tar.gz -C "$dir" --strip 1
fi
chmod +x $dir/bin/*
PATH=$build_dir/.heroku/yarn/bin:$PATH
echo "Installed yarn $(yarn --version)"
}
|
tonycoco/heroku-buildpack-ember-cli
|
bin/yarn.sh
|
Shell
|
mit
| 1,193 |
rm ~/Downloads/K0057_soma_annotation/out/K0057-D31-somas_dsx12y12z4-clean-cut-fit-ellipses.h5
rm ~/Downloads/K0057_soma_annotation/out/K0057-D31-somas_dsx12y12z4-clean-cut-fit-ellipses.0.mesh.h5
python soma_axes_anal.py
dpWriteh5.py --srcfile ~/Downloads/K0057_soma_annotation/out/K0057-D31-somas_dsx12y12z4-clean-cut.h5 --outfile ~/Downloads/K0057_soma_annotation/out/K0057-D31-somas_dsx12y12z4-clean-cut-fit-ellipses.h5 --inraw ~/Downloads/K0057_soma_annotation/out/K0057_D31_dsx12y12z4_somas_clean_cut_fit_ellipses.gipl --dpL --dpW --chunk 0 0 0 --size 1696 1440 640 --dataset labels
python -u $HOME/gits/emdrp/recon/python/dpLabelMesher.py --dataset labels --dpLabelMesher-verbose --set-voxel-scale --dataset-root 0 --reduce-frac 0.1 --smooth 5 5 5 --contour-lvl 0.25 --mesh-outfile /home/watkinspv/Downloads/K0057_soma_annotation/out/K0057-D31-somas_dsx12y12z4-clean-cut-fit-ellipses.0.mesh.h5 --srcfile ~/Downloads/K0057_soma_annotation/out/K0057-D31-somas_dsx12y12z4-clean-cut-fit-ellipses.h5 --chunk 0 0 0 --size 1696 1440 640
python soma_celltype_export.py
|
elhuhdron/emdrp
|
pipeline/K0057_D31_dsx3y3z1-run5-somas/cut_iter_ellipses.sh
|
Shell
|
mit
| 1,073 |
# ansible local - if only ansible could be used without
# a local client when run by packer.
# More recent version of ansible is needed than version
# in ubuntu universe so install from PPA here.
PACKAGES="
software-properties-common
"
apt-get -y install $PACKAGES
apt-add-repository ppa:ansible/ansible
apt-get -y update
apt-get -y install ansible
|
caffeinate/vap
|
stage2/scripts/packages.sh
|
Shell
|
mit
| 349 |
APT_PACKAGES=(nodejs npm)
# -----------------------------------------------------------------------
header "Installing nodejs..."
install_packages "APT"
|
mifix/dotfiles
|
bundles/node.sh
|
Shell
|
mit
| 157 |
#!/bin/bash
echo "This script is deprecated. Use migrate.sh instead"
set -e
#let's configure environment
confd -onetime -backend env
cd /var/www
su-exec www-data php app/console doctrine:migrations:status-check
su-exec www-data php app/console doctrine:migrations:migrate --no-interaction -vvv
su-exec www-data php app/console doctrine:fixtures:load --no-interaction
|
ministryofjustice/opg-digi-deps-api
|
api/scripts/database.sh
|
Shell
|
mit
| 368 |
#!bin/sh
if [ -z "$host" ]; then
echo "host is empty"
else
echo "host is $host"
fi
if [ -z "$port" ]; then
echo "port is empty"
else
echo "port is $port"
fi
cd ./selenium-phpunit-test
./phpunit-selenium/vendor/bin/phpunit --log-json ./report/report.json ./test.php --host_ip_user $host --host_port_user $port
#./phpunit-selenium/vendor/bin/phpunit --log-json ./report/report.json ./test-phpunit-muzikOnline.php --host_ip_user $host --host_port_user $port
|
gosick/selenium-phpunit-test
|
run.sh
|
Shell
|
mit
| 490 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-3139-1
#
# Security announcement date: 2016-11-28 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:46 UTC
#
# Operating System: Ubuntu 14.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - vim-common:2:7.4.052-1ubuntu3.1
# - vim-gui-common:2:7.4.052-1ubuntu3.1
# - vim-runtime:2:7.4.052-1ubuntu3.1
# - vim-doc:2:7.4.052-1ubuntu3.1
# - vim-tiny:2:7.4.052-1ubuntu3.1
# - vim:2:7.4.052-1ubuntu3.1
# - vim-dbg:2:7.4.052-1ubuntu3.1
# - vim-gnome:2:7.4.052-1ubuntu3.1
#
# Last versions recommanded by security team:
# - vim-common:2:7.4.052-1ubuntu3.1
# - vim-gui-common:2:7.4.052-1ubuntu3.1
# - vim-runtime:2:7.4.052-1ubuntu3.1
# - vim-doc:2:7.4.052-1ubuntu3.1
# - vim-tiny:2:7.4.052-1ubuntu3.1
# - vim:2:7.4.052-1ubuntu3.1
# - vim-dbg:2:7.4.052-1ubuntu3.1
# - vim-gnome:2:7.4.052-1ubuntu3.1
#
# CVE List:
# - CVE-2016-1248
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade vim-common=2:7.4.052-1ubuntu3.1 -y
sudo apt-get install --only-upgrade vim-gui-common=2:7.4.052-1ubuntu3.1 -y
sudo apt-get install --only-upgrade vim-runtime=2:7.4.052-1ubuntu3.1 -y
sudo apt-get install --only-upgrade vim-doc=2:7.4.052-1ubuntu3.1 -y
sudo apt-get install --only-upgrade vim-tiny=2:7.4.052-1ubuntu3.1 -y
sudo apt-get install --only-upgrade vim=2:7.4.052-1ubuntu3.1 -y
sudo apt-get install --only-upgrade vim-dbg=2:7.4.052-1ubuntu3.1 -y
sudo apt-get install --only-upgrade vim-gnome=2:7.4.052-1ubuntu3.1 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_14.04_LTS/x86_64/2016/USN-3139-1.sh
|
Shell
|
mit
| 1,636 |
#!/bin/bash
#
# Get RAW YUV frames from Camera, encode it, decode it and display it
#
# Copyright (C) 2019 Xilinx
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
type vcu-demo-functions.sh > "/dev/null"
if [ $? -ne 0 ]; then
echo "Copy vcu-demo-functions.sh to /usr/bin/ or append it's path to PATH variable and re-run the script" && exit -1
fi
source vcu-demo-functions.sh
scriptName=`basename $0`
declare -a scriptArgs=("inputPath" "videoSize" "codecType" "sinkName" "numFrames" "targetBitrate" "showFps" "audioType" "internalEntropyBuffers" "v4l2Device" "displayDevice" "alsaSrc" "pulseSrc" "audioOutput" "alsaSink" "pulseSink" "frameRate")
declare -a checkEmpty=("codecType" "sinkName" "targetBitrate" "v4l2Device" "displayDevice" "frameRate")
############################################################################
# Name: usage
# Description: To display script's command line argument help
############################################################################
usage () {
echo ' Usage : '$scriptName' -i <device_id_string> -v <video_capture_device> -s <video_size> -c <codec_type> -a <audio_type> -o <sink_name> -n <number_of_frames> -b <target_bitrate> -e <internal_entropy_buffers> -r <capture_device_rate> -d <display_device> -f --use-alsasrc --use-pulsesrc --audio-output <Audio output device> --use-pulsesink --use-alsasink'
DisplayUsage "${scriptArgs[@]}"
echo ' Example :'
echo ' '$scriptName''
echo ' '$scriptName' -a aac'
echo ' '$scriptName' --use-alsasrc -i "hw:1" -d "fd4a0000.display" -a aac'
echo ' '$scriptName' -v "/dev/video1"'
echo ' '$scriptName' -n 500 --use-alsasrc'
echo ' '$scriptName' --use-alsasrc -i "hw:1" -n 500 --use-alsasrc -b 1200 -a aac'
echo ' '$scriptName' --use-pulsesrc -i "alsa_input.usb-046d_C922_Pro_Stream_Webcam_FCD7727F-02.analog-stereo" -n 500 -b 1200 -a aac'
echo ' '$scriptName' -f'
echo ' '$scriptName' -o fakevideosink'
echo ' '$scriptName' --use-alsasrc -i "hw:1" -s 1920x1080 -c avc -a aac'
echo ' '$scriptName' -s 1920x1080 -c avc -e 3'
echo ' '$scriptName' -s 1280x720 -c avc'
echo ' '$scriptName' --use-alsasrc -i "hw:1" -s 1280x720 -c avc -a aac'
echo ' '$scriptName' --use-alsasrc -i "hw:1" -s 1280x720 -c avc -a vorbis'
echo ' '$scriptName' -s 1280x720'
echo ' '$scriptName' --use-alsasrc -i "hw:1" -s 1280x720 -c avc -a aac'
echo ' '$scriptName' --use-alsasrc -i "hw:1" -s 1280x720 -c avc -a aac --audio-output "hw:0"'
echo ' '$scriptName' --use-pulsesrc -i "alsa_input.usb-046d_C922_Pro_Stream_Webcam_FCD7727F-02.analog-stereo" -n 500 -b 1200 -a aac'
echo ' "NOTE: This script depends on vcu-demo-settings.sh to be present in /usr/bin or its path set in $PATH"'
exit
}
############################################################################
# Name: CameraToDisplay
# Description: Get RAW data from camera, encode it, decode and display it
############################################################################
CameraToDisplay() {
if [ $SHOW_FPS ]; then
SINK="fpsdisplaysink name=fpssink text-overlay=false video-sink=\"$SINK_NAME\" sync=true -v"
else
SINK="$SINK_NAME"
fi
if [ $NUM_FRAMES ]; then
V4L2SRC="$V4L2SRC num-buffers=$NUM_FRAMES"
AUDIO_BUFFERS=$(($NUM_FRAMES*100/$FRAME_RATE))
fi
AUDIO_SRC_BASE="$AUDIO_SRC"
AUDIO_SINK_BASE="$AUDIO_SINK"
case $AUDIODEC_TYPE in
"aac")
AUDIODEC="faad"
AUDIOENC="faac";;
"vorbis")
AUDIODEC="vorbisdec"
AUDIOENC="vorbisenc";;
*)
if ! [ -z $AUDIODEC_TYPE ]; then
ErrorMsg "Invalid audio codec type specified, please specify either vorbis or aac"
fi
esac
IFS='x' read WIDTH HEIGHT <<< "$VIDEO_SIZE"
CAMERA_CAPS="video/x-raw,width=$WIDTH,height=$HEIGHT,framerate=$FRAME_RATE/1"
VIDEOCONVERT="videoconvert"
VIDEOCONVERT_CAPS="video/x-raw, format=\(string\)NV12"
if [ -z $SET_ENTROPY_BUF ]; then
INTERNAL_ENTROPY_BUFFERS="6"
fi
OMXH264ENC="omxh264enc num-slices=8 control-rate="low-latency" target-bitrate=$BIT_RATE prefetch-buffer=true"
OMXH265ENC="omxh265enc num-slices=8 control-rate="low-latency" target-bitrate=$BIT_RATE prefetch-buffer=true"
OMXH264DEC="$OMXH264DEC internal-entropy-buffers=$INTERNAL_ENTROPY_BUFFERS"
OMXH265DEC="$OMXH265DEC internal-entropy-buffers=$INTERNAL_ENTROPY_BUFFERS"
case $CODEC_TYPE in
"avc")
PARSER=$H264PARSE
ENCODER=$OMXH264ENC
DECODER=$OMXH264DEC
CAMERA_CAPS_ENC="video/x-h264";;
"hevc")
PARSER=$H265PARSE
ENCODER=$OMXH265ENC
DECODER=$OMXH265DEC
CAMERA_CAPS_ENC="video/x-h265";;
esac
restartPulseAudio
setAudioSrcProps
if ! [ -z $AUDIO_OUTPUT ] && [ $AUDIO_SINK_BASE != "autoaudiosink" ]; then
AUDIO_SINK="$AUDIO_SINK device=\"$AUDIO_OUTPUT\""
fi
if [ -z $AUDIODEC_TYPE ]; then
pipeline="$GST_LAUNCH $V4L2SRC ! $CAMERA_CAPS ! $VIDEOCONVERT ! $VIDEOCONVERT_CAPS ! $ENCODER ! $CAMERA_CAPS_ENC ! $QUEUE ! $DECODER ! $QUEUE max-size-bytes=0 ! $SINK"
else
if [ "$AUDIO_SRC_BASE" == "pulsesrc" ] && [ "$AUDIO_SINK_BASE" == "pulsesink" ]; then
pipeline="$GST_LAUNCH $V4L2SRC ! $CAMERA_CAPS ! $VIDEOCONVERT ! $VIDEOCONVERT_CAPS ! $ENCODER ! $CAMERA_CAPS_ENC ! $QUEUE ! $DECODER ! $QUEUE max-size-bytes=0 ! $SINK $AUDIO_SRC ! $QUEUE ! $AUDIOENC ! $AUDIODEC ! $AUDIO_SINK"
else
pipeline="$GST_LAUNCH $V4L2SRC ! $CAMERA_CAPS ! $VIDEOCONVERT ! $VIDEOCONVERT_CAPS ! $ENCODER ! $CAMERA_CAPS_ENC ! $QUEUE ! $DECODER ! $QUEUE max-size-bytes=0 ! $SINK $AUDIO_SRC ! $QUEUE ! $AUDIOCONVERT ! $AUDIOENC ! $QUEUE ! $AUDIODEC ! $AUDIOCONVERT ! $AUDIORESAMPLE ! $AUDIO_CAPS ! $AUDIO_SINK"
fi
fi
runGstPipeline "$pipeline"
}
# Command Line Argument Parsing
args=$(getopt -o "i:v:d:s:c:o:a:b:n:e:r:fh" --long "input-path:,video-capture-device:,display-device:,video-size:,audio-type:,codec-type:,sink-name:,num-frames:,bit-rate:,internal-entropy-buffers:,audio-output:,frame-rate:,show-fps,help,use-alsasrc,use-pulsesrc,use-alsasink,use-pulsesink" -- "$@")
[ $? -ne 0 ] && usage && exit -1
trap catchCTRL_C SIGINT
parseCommandLineArgs
checkforEmptyVar "${checkEmpty[@]}"
if [ -z $VIDEO_SIZE ]; then
VIDEO_SIZE="640x480"
echo "Video Size is not specified in args hence using 640x480 as default value"
fi
if [ -z $BIT_RATE ];then
BIT_RATE=1000
fi
if ! [ -z $AUDIODEC_TYPE ]; then
audioSetting
fi
RegSetting
DisableDPMS
CameraToDisplay
restoreContext
|
Xilinx/meta-petalinux
|
recipes-multimedia/gstreamer/gstreamer-vcu-examples/vcu-demo-camera-encode-decode-display.sh
|
Shell
|
mit
| 7,314 |
#!/bin/bash
# This scripts starts ROS messages log viewer.
rqt_console
|
bdjukic/selfdriving-robot-car
|
scripts/start_log_viewer.sh
|
Shell
|
mit
| 73 |
#!/bin/bash
# Define colors
GREEN="\033[0;32m"
RED="\033[1;31m"
RESET="\033[0m"
check_dependencies() {
local notfound=0
local deps=( grep awk sed )
for dep in ${deps[*]}
do
if [ -z $(which $dep) ]; then
echo -e "${dep}: \t${RED}not found${RESET}"
notfound=1
else
echo -e "${dep}: \t${GREEN}found${RESET}"
fi
done
return $notfound
}
display_menu() {
IFS=$'\n'
local return_value=$4
local eval items=($1)
local i=0
local count=${#items[*]}
let count=count-1
local length=0
for j in ${items[@]}; do
if [ $length -lt ${#j} ]; then
length=${#j}
fi
done
let length=length+4
echo
echo $2
tput sc
while [ 0 ]; do
tput rc
if [ $i -lt 0 ]; then i=0; fi
if [ $i -eq ${#items[*]} ]; then let i=i-1; fi
for ((a=0; a<=$count; a++)); do
if [ $a -eq $i ]; then
tput rev
else
tput dim
fi
printf "\r%*s" $length ""
echo -en "\r"
if [ $a -eq $i ]; then
echo -en " > "
else
echo -en " "
fi
echo -e "${items[a]} ${RESET}"
done;
read -sn 1 twiddle
case "$twiddle" in
"B")
let i=i+1
;;
"A")
let i=i-1
;;
"")
eval "$3 ${items[$i]}"
read -sn 1 confirm
if [ "$confirm" == "y" -o "$confirm" == "Y" ]; then
break
else
tput cuu1
tput el
tput cuu1
tput el
tput cuu1
tput el
tput rc
fi
;;
esac
done
eval $return_value="'${items[$i]}'"
}
choose_sd() {
local return_value=$1
display_menu "$(df -h | grep "^/")" "Please select the drive you wish to install to" confirm_sd choice
eval $return_value='$choice'
}
confirm_sd() {
local drive=$(echo "$1" | awk '{print $1}')
local dev=$(diskutil info ${drive} | sed -En 's/ *Part Of Whole: *(.*)/\1/p')
local mountpoint=$(diskutil info ${drive} | sed -En 's/ *Mount Point: *(.*)/\1/p')
echo
echo -e "${RED}WARNING: This will wipe all data on the drive.${RESET}"
echo "Are you sure you want to install to ${mountpoint} on ${dev} (y/N)?"
}
search_for_distro() {
local return_value=$1
distros=$(find . -name "*.img" -maxdepth 1)
if [ -z $distros ]; then
echo "ERROR: No distribution image found."
exit
fi
if [ ${#distros[*]} -eq 1 ]; then
dist=$distros
else
display_menu "${distros}" "Please select the distribution you wish to install" confirm_distro dist
fi
eval $return_value='$dist'
}
confirm_distro() {
local dist=$(echo $1 | sed -En 's/.\/(.*)/\1/p')
echo
echo "You've selected to install ${dist}. Correct (y/N)?"
}
flash_card() {
echo "Installing $1 to $2..."
local filesize="$(stat -r $1 | awk '{print $8}')"
# dd the image whilst displaying a progress bar
dd bs=1m if=$1 2>/dev/null | pv -pe -s $filesize | dd bs=1m of=$2 2>/dev/null
return 1
}
# just incase the last program didn't clean up properly
tput sgr0
# OS name
OS=$(uname -s)
# check dependencies
check_dependencies
choose_sd sd
drive=$(echo "${sd}" | awk '{print $1}')
dev=$(diskutil info ${drive} | sed -En 's/ *Part Of Whole: *(.*)/\1/p')
node=$(diskutil info ${dev} | sed -En 's/ *Device Node: *(.*)/\1/p')
# unmount the drive
disktool -u $dev
search_for_distro distro
flash_card $distro $node
# eject the drive
diskutil eject $node
echo "Your Pi is now complete. Thank you for visiting the Pi Factory."
|
bytespider/pi_factory
|
sdflash.sh
|
Shell
|
mit
| 3,263 |
#!/bin/bash
set -e
cleanup() {
pkill send-events.py || true
rm -rf cavalieri-rules || true
sudo sh -c "cat > /var/mail/ubuntu"
}
git clone https://github.com/juruen/cavalieri-rules.git
cp rules-mail.cpp cavalieri-rules/rules.cpp
cd cavalieri-rules && mkdir build && cd build && cmake .. && make
export LD_LIBRARY_PATH=$(pwd)
valgrind --error-exitcode=1 --show-possibly-lost=no \
--workaround-gcc296-bugs=yes --log-file=../../valgrind.out \
cavalieri -v 0 -rules_directory . -index_expire_interval 10000 &
valgrind_pid=$!
cd ../..
sleep 5
./send-events.py 100 &
for i in $(seq 0 300); do
EMAILS=$(sudo sh -c 'grep "^To:" /var/mail/ubuntu | wc -l') || true
echo "EMAILS: ${EMAILS}"
if [ "$EMAILS" == "100" ]; then
cleanup
sleep 10
kill -INT $valgrind_pid
if ! wait $valgrind_pid; then
echo "valgrind reported an error"
cat valgrind.out
exit 1
fi
grep -q "definitely lost: 0 bytes" valgrind.out \
|| (cat valgrind.out && false)
exit 0
fi
sleep 1
done
echo "email test failed"
cleanup
exit 1
|
juruen/cavalieri
|
ci/email-test/run-memory-check.sh
|
Shell
|
mit
| 1,108 |
#!/bin/bash
#Set these variables
PROJECT_NAME="project_name"
PROJECT_URL="$PROJECT_NAME.com"
# Define a timestamp function
timestamp() {
date +"%c"
}
# Set up Drupal SOLR
WRITE_FILE="/home/vagrant/.solr_setup"
SOLR="/var/solr/collection1"
MODULE_CONF="/var/www/$PROJECT_URL/htdocs/sites/all/modules/contrib/search_api_solr/solr-conf"
SOLR_VERSION="3.x"
DVMCONF="/var/solr_files"
if [ ! -e $WRITE_FILE ]; then
sudo cp -a $MODULE_CONF/$SOLR_VERSION/. $SOLR/conf/
sudo chown -R solr:solr $SOLR/conf
sudo service solr restart
touch $WRITE_FILE
echo "$(timestamp): Updated conf from file." >> $WRITE_FILE
else
echo "$(timestamp): SOLR setup has already been run." >> $WRITE_FILE
fi
|
vml-jbartlett/JB-DrupalVM
|
dvm/scripts/post/provision/solr_setup.sh
|
Shell
|
mit
| 695 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/BaseFoundation/BaseFoundation.framework"
install_framework "$BUILT_PRODUCTS_DIR/Masonry/Masonry.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/BaseFoundation/BaseFoundation.framework"
install_framework "$BUILT_PRODUCTS_DIR/Masonry/Masonry.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
liushuorepo/BaseFoundation
|
Example/Pods/Target Support Files/Pods-BaseFoundation_Example/Pods-BaseFoundation_Example-frameworks.sh
|
Shell
|
mit
| 3,867 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2919-1
#
# Security announcement date: 2016-03-03 00:00:00 UTC
# Script generation date: 2017-01-01 21:05:14 UTC
#
# Operating System: Ubuntu 15.10
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libjasper1:1.900.1-debian1-2.4ubuntu0.15.10.1
#
# Last versions recommanded by security team:
# - libjasper1:1.900.1-debian1-2.4ubuntu0.15.10.1
#
# CVE List:
# - CVE-2016-1577
# - CVE-2016-2116
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libjasper1=1.900.1-debian1-2.4ubuntu0.15.10.1 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_15.10/x86_64/2016/USN-2919-1.sh
|
Shell
|
mit
| 698 |
export TIMEOUT_SCALE_FACTOR=15
export TEST_PACKAGES_EXCLUDE="less"
export SELF_TEST_EXCLUDE="^can't publish package with colons|^old cli tests|^logs - logged (in|out)|^mongo - logged (in|out)|^minifiers can't register non-js|^minifiers: apps can't use|^compiler plugins - addAssets"
# Don't print as many progress indicators
export EMACS=t
# Since PhantomJS has been removed from dev_bundle/lib/node_modules
# (#6905), but self-test still needs it, install it now.
./meteor npm install -g phantomjs-prebuilt browserstack-webdriver
# Make sure we have initialized and updated submodules such as
# packages/non-core/blaze.
git submodule update --init --recursive
# run different jobs based on CicleCI parallel container index
case $CIRCLE_NODE_INDEX in
0)
echo "Running warehouse self-tests"
./meteor self-test --headless \
--with-tag "custom-warehouse" \
--exclude "$SELF_TEST_EXCLUDE"
;;
1)
echo "Running self-test (1): A-Com"
./meteor self-test --headless \
--file "^[a-b]|^c[a-n]|^co[a-l]|^compiler-plugins" \
--without-tag "custom-warehouse" \
--exclude "$SELF_TEST_EXCLUDE"
;;
2)
echo "Running self-test (2): Con-K"
./meteor self-test --headless \
--file "^co[n-z]|^c[p-z]|^[d-k]" \
--without-tag "custom-warehouse" \
--exclude "$SELF_TEST_EXCLUDE"
;;
3)
echo "Running self-test (3): L-O"
./meteor self-test --headless \
--file "^[l-o]" \
--without-tag "custom-warehouse" \
--exclude "$SELF_TEST_EXCLUDE"
;;
4)
echo "Running self-test (4): P"
./meteor self-test --headless \
--file "^p" \
--without-tag "custom-warehouse" \
--exclude "$SELF_TEST_EXCLUDE"
;;
5)
echo "Running self-test (5): Run"
./meteor self-test --headless \
--file "^run" \
--without-tag "custom-warehouse" \
--exclude "$SELF_TEST_EXCLUDE"
;;
6)
echo "Running self-test (6): R-S"
./meteor self-test --headless \
--file "^r(?!un)|^s" \
--without-tag "custom-warehouse" \
--exclude "$SELF_TEST_EXCLUDE"
;;
7)
echo "Running self-test (7): Sp-Z"
./meteor self-test --headless \
--file "^[t-z]|^command-line" \
--without-tag "custom-warehouse" \
--exclude "$SELF_TEST_EXCLUDE"
;;
esac
|
chasertech/meteor
|
scripts/ci.sh
|
Shell
|
mit
| 2,243 |
alias fw="sudo firewall-cmd"
alias fwp="sudo firewall-cmd --permanent"
alias fwr="sudo firewall-cmd --reload"
alias fwrp="sudo firewall-cmd --runtime-to-permanent"
|
ahmadassaf/oh-my-zsh
|
_aliases/firewalld.alias.zsh
|
Shell
|
mit
| 163 |
#!/bin/bash
curl -k \
-o bin/js/phaser.js https://raw.githubusercontent.com/photonstorm/phaser-ce/master/build/phaser.js \
-o bin/js/phaser.map https://raw.githubusercontent.com/photonstorm/phaser-ce/master/build/phaser.map \
-o bin/js/phaser.min.js https://raw.githubusercontent.com/photonstorm/phaser-ce/master/build/phaser.min.js \
-o tsd/box2d.d.ts https://raw.githubusercontent.com/photonstorm/phaser-ce/master/typescript/box2d.d.ts \
-o tsd/p2.d.ts https://raw.githubusercontent.com/photonstorm/phaser-ce/master/typescript/p2.d.ts \
-o tsd/phaser.comments.d.ts https://raw.githubusercontent.com/photonstorm/phaser-ce/master/typescript/phaser.comments.d.ts \
-o tsd/phaser.d.ts https://raw.githubusercontent.com/photonstorm/phaser-ce/master/typescript/phaser.d.ts \
-o tsd/phaser_box2d.d.ts https://raw.githubusercontent.com/photonstorm/phaser-ce/master/typescript/phaser_box2d.d.ts \
-o tsd/pixi.comments.d.ts https://raw.githubusercontent.com/photonstorm/phaser-ce/master/typescript/pixi.comments.d.ts \
-o tsd/pixi.d.ts https://raw.githubusercontent.com/photonstorm/phaser-ce/master/typescript/pixi.d.ts
|
djfdat/phaser-typescript-vscode-boilerplate
|
update_phaser.sh
|
Shell
|
mit
| 1,111 |
error() {
echo " ! $*" >&2
exit 1
}
status() {
echo "-----> $*"
}
# sed -l basically makes sed replace and buffer through stdin to stdout
# so you get updates while the command runs and dont wait for the end
# e.g. npm install | indent
indent() {
c='s/^/ /'
case $(uname) in
Darwin) sed -l "$c";; # mac/bsd sed: -l buffers on line boundaries
*) sed -u "$c";; # unix/gnu sed: -u unbuffered (arbitrary) chunks of data
esac
}
export_env_dir() {
env_dir=$1
whitelist_regex=${2:-''}
blacklist_regex=${3:-'^(PATH|GIT_DIR|CPATH|CPPATH|LD_PRELOAD|LIBRARY_PATH)$'}
if [ -d "$env_dir" ]; then
for e in $(ls $env_dir); do
echo "$e" | grep -E "$whitelist_regex" | grep -qvE "$blacklist_regex" &&
export "$e=$(cat $env_dir/$e)"
:
done
fi
}
|
willian/heroku-buildpack-static
|
bin/common.sh
|
Shell
|
mit
| 801 |
USER=${USER:-super}
PASS=${PASS:-$(pwgen -s -1 16)}
pre_start_action() {
# Echo out info to later obtain by running `docker logs container_name`
echo "MARIADB_USER=$USER"
echo "MARIADB_PASS=$PASS"
echo "MARIADB_DATA_DIR=$DATA_DIR"
# test if DATA_DIR has content
if [[ ! "$(ls -A $DATA_DIR)" ]]; then
echo "Initializing MariaDB at $DATA_DIR"
# Copy the data that we generated within the container to the empty DATA_DIR.
cp -R /var/lib/mysql/* $DATA_DIR
fi
# Ensure mysql owns the DATA_DIR
chown -R mysql $DATA_DIR
chown root $DATA_DIR/debian*.flag
# test if CONFIG_DIR exists or has no content
if [ ! -d "$CONFIG_DIR" ] || [ ! "$(ls -A $CONFIG_DIR)" ]; then
echo "Initializing config directory at $CONFIG_DIR"
# move config to /config and link the directory, allows mounting & backup of config
mkdir -p $CONFIG_DIR
mv /var/www/html/config/* $CONFIG_DIR
rm -rf /var/www/html/config/
ln -s $CONFIG_DIR /var/www/html
fi
# ensure permission
chmod 755 $CONFIG_DIR
chown -R www-data $CONFIG_DIR
}
post_start_action() {
# The password for 'debian-sys-maint'@'localhost' is auto generated.
# The database inside of DATA_DIR may not have been generated with this password.
# So, we need to set this for our database to be portable.
DB_MAINT_PASS=$(cat /etc/mysql/debian.cnf | grep -m 1 "password\s*=\s*"| sed 's/^password\s*=\s*//')
mysql -u root -e \
"GRANT ALL PRIVILEGES ON *.* TO 'debian-sys-maint'@'localhost' IDENTIFIED BY '$DB_MAINT_PASS';"
# Create the superuser.
mysql -u root <<-EOF
DELETE FROM mysql.user WHERE user = '$USER';
FLUSH PRIVILEGES;
CREATE USER '$USER'@'localhost' IDENTIFIED BY '$PASS';
GRANT ALL PRIVILEGES ON *.* TO '$USER'@'localhost' WITH GRANT OPTION;
CREATE USER '$USER'@'%' IDENTIFIED BY '$PASS';
GRANT ALL PRIVILEGES ON *.* TO '$USER'@'%' WITH GRANT OPTION;
EOF
rm /firstrun
}
|
chris-rock/docker-piwik
|
scripts/first_run.sh
|
Shell
|
mit
| 1,941 |
#!/bin/bash
echo -e "\033[1;36mServer started.[1;0m"
gunicorn manage:app -c gunicorn.conf
|
pastgift/seed-website-py
|
run-deploy.sh
|
Shell
|
mit
| 90 |
#!/bin/sh
# Load RVM into a shell session *as a function*
if [[ -s "$HOME/.rvm/scripts/rvm" ]] ; then
# First try to load from a user install
source "$HOME/.rvm/scripts/rvm"
elif [[ -s "/usr/local/rvm/scripts/rvm" ]] ; then
# Then try to load from a root install
source "/usr/local/rvm/scripts/rvm"
else
printf "ERROR: An RVM installation was not found.\n"
fi
rvm use jruby@bullring
irb -rubygems -I lib -r bullring/workers/rhino_server.rb
|
kindlinglabs/bullring
|
lib/scripts/server_console.sh
|
Shell
|
mit
| 459 |
# My awesome bash prompt
#
# Copyright (c) 2012 "Cowboy" Ben Alman
# Licensed under the MIT license.
# http://benalman.com/about/license/
#
# Example:
# [master:!?][cowboy@CowBook:~/.dotfiles]
# [11:14:45] $
#
# Read more (and see a screenshot) in the "Prompt" section of
# https://github.com/cowboy/dotfiles
# ANSI CODES - SEPARATE MULTIPLE VALUES WITH ;
#
# 0 reset 4 underline
# 1 bold 7 inverse
#
# FG BG COLOR FG BG COLOR
# 30 40 black 34 44 blue
# 31 41 red 35 45 magenta
# 32 42 green 36 46 cyan
# 33 43 yellow 37 47 white
AGKOZAK_PROMPT_DIRTRIM=4
AGKOZAK_BLANK_LINES=1
AGKOZAK_LEFT_PROMPT_ONLY=1
AGKOZAK_CUSTOM_RPROMPT='%* (${SPACK_ENV##*/})'
AGKOZAK_PROMPT_CHAR=( ❯ ❯ ❮ )
AGKOZAK_CUSTOM_SYMBOLS=( '⇣⇡' '⇣' '⇡' '+' 'x' '!' '>' '?' 'S')
is_bash || return 1 # if not bash, skip the rest of this file
if [[ ! "${prompt_colors[@]}" ]]; then
prompt_colors=(
"36" # information color
"37" # bracket color
"31" # error color
)
if [[ "$SSH_TTY" ]]; then
# connected via ssh
prompt_colors[0]="32"
elif [[ "$USER" == "root" ]]; then
# logged in as root
prompt_colors[0]="35"
fi
fi
# Inside a prompt function, run this alias to setup local $c0-$c9 color vars.
alias prompt_getcolors='prompt_colors[9]=; local i; for i in ${!prompt_colors[@]}; do local c$i="\[\e[0;${prompt_colors[$i]}m\]"; done'
# Exit code of previous command.
function prompt_exitcode() {
prompt_getcolors
[[ $1 != 0 ]] && echo " $c2$1$c9"
}
# Git status.
function prompt_git() {
prompt_getcolors
local status output flags branch
status="$(git status 2>/dev/null)"
[[ $? != 0 ]] && return;
output="$(echo "$status" | awk '/# Initial commit/ {print "(init)"}')"
[[ "$output" ]] || output="$(echo "$status" | awk '/# On branch/ {print $4}')"
[[ "$output" ]] || output="$(git branch | perl -ne '/^\* \(detached from (.*)\)$/ ? print "($1)" : /^\* (.*)/ && print $1')"
flags="$(
echo "$status" | awk 'BEGIN {r=""} \
/^(# )?Changes to be committed:$/ {r=r "+"}\
/^(# )?Changes not staged for commit:$/ {r=r "!"}\
/^(# )?Untracked files:$/ {r=r "?"}\
END {print r}'
)"
if [[ "$flags" ]]; then
output="$output$c1:$c0$flags"
fi
echo "$c1[$c0$output$c1]$c9"
}
# hg status.
function prompt_hg() {
prompt_getcolors
local summary output bookmark flags
summary="$(hg summary 2>/dev/null)"
[[ $? != 0 ]] && return;
output="$(echo "$summary" | awk '/branch:/ {print $2}')"
bookmark="$(echo "$summary" | awk '/bookmarks:/ {print $2}')"
flags="$(
echo "$summary" | awk 'BEGIN {r="";a=""} \
/(modified)/ {r= "+"}\
/(unknown)/ {a= "?"}\
END {print r a}'
)"
output="$output:$bookmark"
if [[ "$flags" ]]; then
output="$output$c1:$c0$flags"
fi
echo "$c1[$c0$output$c1]$c9"
}
# SVN info.
function prompt_svn() {
prompt_getcolors
local info="$(svn info . 2> /dev/null)"
local last current
if [[ "$info" ]]; then
last="$(echo "$info" | awk '/Last Changed Rev:/ {print $4}')"
current="$(echo "$info" | awk '/Revision:/ {print $2}')"
echo "$c1[$c0$last$c1:$c0$current$c1]$c9"
fi
}
# Maintain a per-execution call stack.
prompt_stack=()
trap 'prompt_stack=("${prompt_stack[@]}" "$BASH_COMMAND")' DEBUG
function prompt_command() {
local exit_code=$?
# If the first command in the stack is prompt_command, no command was run.
# Set exit_code to 0 and reset the stack.
[[ "${prompt_stack[0]}" == "prompt_command" ]] && exit_code=0
prompt_stack=()
# Manually load z here, after $? is checked, to keep $? from being clobbered.
[[ "$(type -t _z)" ]] && _z --add "$(pwd -P 2>/dev/null)" 2>/dev/null
# While the simple_prompt environment var is set, disable the awesome prompt.
[[ "$simple_prompt" ]] && PS1='\n$ ' && return
prompt_getcolors
# http://twitter.com/cowboy/status/150254030654939137
PS1="\n"
# svn: [repo:lastchanged]
#PS1="$PS1$(prompt_svn)"
# git: [branch:flags]
PS1="$PS1$(prompt_git)"
# hg: [branch:flags]
#PS1="$PS1$(prompt_hg)"
# misc: [cmd#:hist#]
# PS1="$PS1$c1[$c0#\#$c1:$c0!\!$c1]$c9"
# path: [user@host:path]
PS1="$PS1$c1[$c0\u$c1@$c0\h$c1:$c0\w$c1]$c9"
PS1="$PS1\n"
# date: [HH:MM:SS]
PS1="$PS1$c1[$c0$(date +"%H$c1:$c0%M$c1:$c0%S")$c1]$c9"
# exit code: 127
PS1="$PS1$(prompt_exitcode "$exit_code")"
PS1="$PS1 \$ "
}
PROMPT_COMMAND="prompt_command"
|
SteVwonder/dotfiles
|
source/50_prompt.sh
|
Shell
|
mit
| 4,466 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3423-1
#
# Security announcement date: 2015-12-16 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:41 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - cacti:0.8.8a+dfsg-5+deb7u7
#
# Last versions recommanded by security team:
# - cacti:0.8.8a+dfsg-5+deb7u10
#
# CVE List:
# - CVE-2015-8369
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade cacti=0.8.8a+dfsg-5+deb7u10 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/i386/2015/DSA-3423-1.sh
|
Shell
|
mit
| 626 |
security delete-keychain ios-build.keychain
|
nikburnt/easeqs-objc
|
Scripts/travis/remove-key.sh
|
Shell
|
mit
| 43 |
#!/usr/bin/env bash
xkbcomp -I. keymap.xkb $DISPLAY 2>/dev/null
|
jdudy/dot-files
|
unix-like/linux/xkb/install.sh
|
Shell
|
mit
| 65 |
#!/bin/bash
INFOFILE="$2"
FROM=`pwd`
cd "$1"
COUNTER=0
for i in *.wav; do
if [ -e "$i" ]; then
COUNTER=$[$COUNTER+1]
FILE=`basename "$i" .wav`
ROW=$[COUNTER+4]
ARTIST=`sed -n 1p $INFOFILE`
ALBUM=`sed -n 2p $INFOFILE`
RELEASEYEAR=`sed -n 3p $INFOFILE`
GENRE=`sed -n 4p $INFOFILE`
SONG=`sed -n ${ROW}p $INFOFILE`
TRACK=`printf %02d $COUNTER`
NEWFILENAME="$TRACK - $ARTIST - $SONG"
echo $NEWFILENAME
lame -h -b 320 "$i" "$NEWFILENAME.mp3"
id3v2 --artist "$ARTIST" --album "$ALBUM" --year "$RELEASEYEAR" --genre "$GENRE" --song "$SONG" --track "$TRACK" "$NEWFILENAME.mp3"
fi
done
cd $FROM
|
dirtylabcoat/sillytilities
|
wav2mp3.sh
|
Shell
|
mit
| 613 |
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${UNLOCALIZED_RESOURCES_FOLDER_PATH+x} ]; then
# If UNLOCALIZED_RESOURCES_FOLDER_PATH is not set, then there's nowhere for us to copy
# resources to, so exit 0 (signalling the script phase was successful).
exit 0
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=()
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
case "${TARGETED_DEVICE_FAMILY:-}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
3)
TARGET_DEVICE_ARGS="--target-device tv"
;;
4)
TARGET_DEVICE_ARGS="--target-device watch"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
install_resource()
{
if [[ "$1" = /* ]] ; then
RESOURCE_PATH="$1"
else
RESOURCE_PATH="${PODS_ROOT}/$1"
fi
if [[ ! -e "$RESOURCE_PATH" ]] ; then
cat << EOM
error: Resource "$RESOURCE_PATH" not found. Run 'pod install' to update the copy resources script.
EOM
exit 1
fi
case $RESOURCE_PATH in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .storyboard`.storyboardc" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile ${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib $RESOURCE_PATH --sdk ${SDKROOT} ${TARGET_DEVICE_ARGS}" || true
ibtool --reference-external-strings-file --errors --warnings --notices --minimum-deployment-target ${!DEPLOYMENT_TARGET_SETTING_NAME} --output-format human-readable-text --compile "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$RESOURCE_PATH\" .xib`.nib" "$RESOURCE_PATH" --sdk "${SDKROOT}" ${TARGET_DEVICE_ARGS}
;;
*.framework)
echo "mkdir -p ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true
mkdir -p "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" $RESOURCE_PATH ${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}" || true
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH"`.mom\"" || true
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd\"" || true
xcrun momc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"$RESOURCE_PATH\" \"${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm\"" || true
xcrun mapc "$RESOURCE_PATH" "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$RESOURCE_PATH" .xcmappingmodel`.cdm"
;;
*.xcassets)
ABSOLUTE_XCASSET_FILE="$RESOURCE_PATH"
XCASSET_FILES+=("$ABSOLUTE_XCASSET_FILE")
;;
*)
echo "$RESOURCE_PATH" || true
echo "$RESOURCE_PATH" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "${PODS_ROOT}/Intercom/Intercom/Intercom.framework/Versions/A/Resources/Intercom.bundle"
install_resource "${PODS_ROOT}/Intercom/Intercom/Intercom.framework/Versions/A/Resources/IntercomTranslations.bundle"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "${PODS_ROOT}/Intercom/Intercom/Intercom.framework/Versions/A/Resources/Intercom.bundle"
install_resource "${PODS_ROOT}/Intercom/Intercom/Intercom.framework/Versions/A/Resources/IntercomTranslations.bundle"
fi
mkdir -p "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${TARGET_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]] && [[ "${SKIP_INSTALL}" == "NO" ]]; then
mkdir -p "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n "${XCASSET_FILES:-}" ]
then
# Find all other xcassets (this unfortunately includes those of path pods and other targets).
OTHER_XCASSETS=$(find "$PWD" -iname "*.xcassets" -type d)
while read line; do
if [[ $line != "${PODS_ROOT}*" ]]; then
XCASSET_FILES+=("$line")
fi
done <<<"$OTHER_XCASSETS"
if [ -z ${ASSETCATALOG_COMPILER_APPICON_NAME+x} ]; then
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
else
printf "%s\0" "${XCASSET_FILES[@]}" | xargs -0 xcrun actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${!DEPLOYMENT_TARGET_SETTING_NAME}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}" --app-icon "${ASSETCATALOG_COMPILER_APPICON_NAME}" --output-partial-info-plist "${TARGET_TEMP_DIR}/assetcatalog_generated_info_cocoapods.plist"
fi
fi
|
twobitlabs/AnalyticsKit
|
AnalyticsKit/Pods/Target Support Files/Pods-AnalyticsKit/Pods-AnalyticsKit-resources.sh
|
Shell
|
mit
| 6,992 |
#!/bin/sh
set -e
# TODO: Implement this in terraform https://www.terraform.io/docs/providers/postgresql/index.html
# Check environment variables
export PGPASSWORD=${TF_VAR_secrets_cf_db_master_password:?}
api_pass=${TF_VAR_external_cc_database_password:?}
uaa_pass=${TF_VAR_external_uaa_database_password:?}
bbs_pass=${TF_VAR_external_bbs_database_password:?}
locket_pass=${TF_VAR_external_locket_database_password:?}
network_connectivity_pass=${TF_VAR_external_silk_controller_database_password:?}
network_policy_pass=${TF_VAR_external_policy_server_database_password:?}
app_autoscaler_pass=${TF_VAR_external_app_autoscaler_database_password:?}
db_address=${TF_VAR_cf_db_address:?}
# See: https://github.com/koalaman/shellcheck/wiki/SC2086#exceptions
psql_adm() { psql -h "${db_address}" -U dbadmin "$@"; }
# Create roles
psql_adm -d postgres -c "SELECT rolname FROM pg_roles WHERE rolname = 'api'" \
| grep -q 'api' || psql_adm -d postgres -c "CREATE USER api WITH ROLE dbadmin"
psql_adm -d postgres -c "SELECT rolname FROM pg_roles WHERE rolname = 'uaa'" \
| grep -q 'uaa' || psql_adm -d postgres -c "CREATE USER uaa WITH ROLE dbadmin"
psql_adm -d postgres -c "SELECT rolname FROM pg_roles WHERE rolname = 'bbs'" \
| grep -q 'bbs' || psql_adm -d postgres -c "CREATE USER bbs WITH ROLE dbadmin"
psql_adm -d postgres -c "SELECT rolname FROM pg_roles WHERE rolname = 'locket'" \
| grep -q 'locket' || psql_adm -d postgres -c "CREATE USER locket WITH ROLE dbadmin"
psql_adm -d postgres -c "SELECT rolname FROM pg_roles WHERE rolname = 'network_connectivity'" \
| grep -q 'network_connectivity' || psql_adm -d postgres -c "CREATE USER network_connectivity WITH ROLE dbadmin"
psql_adm -d postgres -c "SELECT rolname FROM pg_roles WHERE rolname = 'network_policy'" \
| grep -q 'network_policy' || psql_adm -d postgres -c "CREATE USER network_policy WITH ROLE dbadmin"
psql_adm -d postgres -c "SELECT rolname FROM pg_roles WHERE rolname = 'app_autoscaler'" \
| grep -q 'app_autoscaler' || psql_adm -d postgres -c "CREATE USER app_autoscaler WITH ROLE dbadmin"
# Always update passwords
psql_adm -d postgres -c "ALTER USER api WITH PASSWORD '${api_pass}'"
psql_adm -d postgres -c "ALTER USER uaa WITH PASSWORD '${uaa_pass}'"
psql_adm -d postgres -c "ALTER USER bbs WITH PASSWORD '${bbs_pass}'"
psql_adm -d postgres -c "ALTER USER locket WITH PASSWORD '${locket_pass}'"
psql_adm -d postgres -c "ALTER USER network_connectivity WITH PASSWORD '${network_connectivity_pass}'"
psql_adm -d postgres -c "ALTER USER network_policy WITH PASSWORD '${network_policy_pass}'"
psql_adm -d postgres -c "ALTER USER app_autoscaler WITH PASSWORD '${app_autoscaler_pass}'"
for db in api uaa bbs locket network_connectivity network_policy app_autoscaler; do
# Create database
psql_adm -d postgres -l | grep -q " ${db} " || \
psql_adm -d postgres -c "CREATE DATABASE ${db} OWNER ${db}"
# Enable extensions
for ext in citext pgcrypto pg_stat_statements; do
psql_adm -d "${db}" -c "CREATE EXTENSION IF NOT EXISTS ${ext}"
done
done
|
alphagov/paas-cf
|
manifests/cf-manifest/scripts/create-cf-dbs.sh
|
Shell
|
mit
| 3,051 |
#!/bin/bash
# run the application with maven because a known bug: https://github.com/spring-projects/spring-boot/issues/6709
RUN="mvn spring-boot:run"
NAME=goeuro-routes-check-service
LOG="spring-neo4j.log"
ACTION=$1
FILE=$2
$RUN -Drun.arguments=$ACTION,$FILE
|
vezzoni/spring-neo4j
|
service.sh
|
Shell
|
mit
| 263 |
#!/usr/bin/env bash
set -Eeuo pipefail
epoch="$(TZ=UTC date --date "$TIMESTAMP" +%s)"
serial="$(TZ=UTC date --date "@$epoch" +%Y%m%d)"
buildArgs=()
if [ "$SUITE" = 'eol' ]; then
buildArgs+=( '--eol' )
SUITE="$CODENAME"
elif [ -n "${CODENAME:-}" ]; then
buildArgs+=( '--codename-copy' )
fi
if [ -n "${ARCH:-}" ]; then
buildArgs+=( "--arch=${ARCH}" )
if [ "$ARCH" != 'i386' ]; then
if [ "$ARCH" != 'arm64' ]; then
buildArgs+=( '--ports' )
fi
fi
fi
buildArgs+=( validate "$SUITE" "@$epoch" )
checkFile="validate/$serial/${ARCH:-amd64}/${CODENAME:-$SUITE}/rootfs.tar.xz"
mkdir -p validate
set -x
./scripts/debuerreotype-version
./docker-run.sh --pull ./examples/debian.sh "${buildArgs[@]}"
real="$(sha256sum "$checkFile" | cut -d' ' -f1)"
[ -z "$SHA256" ] || [ "$SHA256" = "$real" ]
|
debuerreotype/debuerreotype
|
.validate-debian.sh
|
Shell
|
mit
| 798 |
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")" \
&& . "../../utils.sh" \
&& . "./utils.sh"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
print_in_purple "\n Browsers\n"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
printf "\n"
brew_install "Chrome" "google-chrome" "caskroom/cask" "cask"
brew_install "Chrome Canary" "google-chrome-canary" "caskroom/versions" "cask"
brew_install "Chromium" "chromium" "caskroom/cask" "cask"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
printf "\n"
brew_install "Firefox" "firefox" "caskroom/cask" "cask"
brew_install "Firefox Developer" "firefoxdeveloperedition" "caskroom/versions" "cask"
brew_install "Firefox Nightly" "firefoxnightly" "caskroom/versions" "cask"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
printf "\n"
brew_install "Flash" "flash-npapi" "caskroom/cask" "cask"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
printf "\n"
brew_install "Opera" "opera" "caskroom/cask" "cask"
brew_install "Opera Beta" "opera-beta" "caskroom/versions" "cask"
brew_install "Opera Developer" "opera-developer" "caskroom/versions" "cask"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# `Safari Technology Preview` requires macOS 10.11.4 or la
# https://github.com/alrra/dotfiles/issues
if is_supported_version "$(get_os_version)" "10.11.4"; then
printf "\n"
brew_install "Safari Technology Preview" "safari-technology-preview" "caskroom/versions" "cask"
fi
brew_install "WebKit" "webkit-nightly" "caskroom/versions" "cask"
|
b-boogaard/dotfiles
|
src/os/install/macos/browsers.sh
|
Shell
|
mit
| 1,647 |
#!/bin/sh
# this file installs a bunch of useful stuff on ubuntu 14.04
# to install run from your "~" directory "sudo <location of this file>"
# variable used to store actions to take after install script is done
remindVar="reminders:"
apt-get update # To get the latest package lists
# install trash-cli to allow rebind of rm
sudo apt-get install trash-cli
# git version control
apt-get install git -y
# download sites for offline viewing
apt-get install httrack
# launching icons not always gives focus, temp fix
dconf write /org/compiz/profiles/unity/plugins/core/focus-prevention-level 0
# vlc media player
apt-get install vlc-nox -y
# GIF creator
add-apt-repository ppa:fossfreedom/byzanz
apt-get update
apt-get install byzanz
mkdir ~/.gifs #directory for gifs storage
# snipping tool
add-apt-repository ppa:shutter/ppa
apt-get update
apt-get install shutter
# compiz config, allows moving windows from screen to screen with "super key" + z
apt-get install compizconfig-settings-manager -y
apt-get update && sudo apt-get install compiz-plugins -y
remindVar="$remindVar\n launch compiz, and got to themes or modify put key"
# GNOME, GUI for
apt-get install ubuntu-gnome-desktop -y
remindVar="$remindVar\n gnome: log-out and back in, install a theme. or see compiz to change to dark theme"
# puush, create and send screenshots quickly, install xclip dependency
apt-get install xclip -y
mkdir ~/puush; cd ~/puush
git clone https://github.com/sunmockyang/puush-linux.git
cd -
cd ~/puush/puush-linux/src
cp config.py.dist config.py
cd -
remindVar="$remindVar\n puush: setup API in PUUSH file inside ~/puush/puush-linux/PUUSH, chmod permission on PUUSH, and create shortcut for 'puush -b to $ and puush -d U' in compiz keboard config"
# gdm, display manager works well with GNOME, allows to set default gnome as GUI shell
apt-get install gdm -y
remindVar="$remindVar\n gdm: run command 'sudo dpkg-reconfigure gdm' and select gnome as default after installing gnome"
# htop, monitoring tool for system resources, useful for servers and desktop
apt-get install htop -y
# un-rar files
apt-get install unrar -y
# redshift (alternative to flux), turns screen to reddish at night (eases strain on eyes)
#apt-get install redshift redshift-gtk -y
# disable for now
#gtk-redshift -l 43:-79 & # setup for toronto, autorun in background using nohup, for new lat:long http://itouchmap.com/latlong.html
remindVar="$remindVar\n click redshift and add autostart functionality"
# notepad++, text editor for notes (good autosave on crash feature)
add-apt-repository ppa:notepadqq-team/notepadqq -y
apt-get update
apt-get install notepadqq -y
# tmux, splitting terminal horizontally/vertically
apt-get install tmux -y
# add slack
remindVar="$remindVar\n slack for messaging"
# anki, memorization cards
remindVar="$remindVar\n install anki for memorization"
# gimp, paint/photoshop for ubuntu
apt-get gimp
# vim file editor, grabbing gnome version that is has xterm (for copy paste functionality)_and auto-completion plugin works
apt-get install vim-gnome
remindVar="$remindVar\n open vim and type :PluginInstall to install the bundles"
# nodejs, npm, and then link them to bin. Used to develop node apps
apt-get install nodejs -y
remindVar="$remindVar\n node may be old, try downloading from node website, at least v5.2 at this moment 2015-dec-16"
apt-get install npm -y
ln -s /usr/bin/nodejs /usr/bin/node
# reminders for actions to take after install script is done
remindVar="$remindVar\n setup launcher-panel: notepad++, chrome, cmd/terminal, devtool (webstorm), filesystem, gitter, anki, other "
remindVar="$remindVar\n google chrome, login for extensions"
remindVar="$remindVar\n check if python is installed (was pre-installed on ubuntu 14.4)"
remindVar="$remindVar\n get https://github.com/jerem/psdparse and install using \"sudo python setup.py install\""
# password management system
remindVar="$remindVar\n install lastpass"
# javascript IDE
remindVar="$remindVar\n get webstorm (see this shellscript file for instruction on JDK installation first, install vim plugin"
# get anki, powerful memorization software
remindVar="$remindVar\n download and install anki"
remindVar="$remindVar\n setup terminal default colors, maybe GNOME does this by default, bash can also fix this"
# install JDK for webstorm to work
#sudo add-apt-repository ppa:webupd8team/java
#sudo apt-get update
#sudo mkdir -p /usr/lib/mozilla/plugins #just in case, this will be added to the package in the next version
#sudo apt-get install oracle-jdk7-installer
# also get webstorm, unpack, extract to a folder, then link webstorm/bin/webstorm.sh to bin
printf "$remindVar\n"
|
1dose/devTools
|
install_scripts/ubuntu1404devSetup.sh
|
Shell
|
mit
| 4,674 |
#!/bin/bash
tests=( "test-get-remote-ip-sh.sh" )
testcount=${#tests[@]}
testcount=$(echo $testcount - 1 | bc)
for i in `seq 0 $testcount`;
do
ret=$(tests/${tests[$i]})
result=$?
echo "result = $result"
done
exit $result
|
patinthehat/get-remote-ip
|
tests/run-tests.sh
|
Shell
|
mit
| 232 |
#!/bin/bash
source configuration
adb uninstall org.tribler.at3.${APPNAME}
adb install app/AT3${APPNAME}-1.0-debug.apk
|
rjagerman/AT3
|
install.sh
|
Shell
|
mit
| 118 |
. /opt/homebrew/opt/asdf/libexec/asdf.sh
|
bolek/dotfiles
|
asdf/init.zsh
|
Shell
|
mit
| 40 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3351-1
#
# Security announcement date: 2015-09-03 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:32 UTC
#
# Operating System: Debian 8 (Jessie)
# Architecture: i386
#
# Vulnerable packages fix on version:
# - chromium-browser:45.0.2454.85-1~deb8u1
#
# Last versions recommanded by security team:
# - chromium-browser:45.0.2454.85-1~deb8u1
#
# CVE List:
# - CVE-2015-1291
# - CVE-2015-1292
# - CVE-2015-1293
# - CVE-2015-1294
# - CVE-2015-1295
# - CVE-2015-1296
# - CVE-2015-1297
# - CVE-2015-1298
# - CVE-2015-1299
# - CVE-2015-1300
# - CVE-2015-1301
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade chromium-browser=45.0.2454.85-1~deb8u1 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_8_(Jessie)/i386/2015/DSA-3351-1.sh
|
Shell
|
mit
| 860 |
#!/bin/bash
set -e
cd frontend
npm run build
cd ..
./start_postgres_local.sh
cd backend
python app.py
|
liufuyang/lifeinweeks
|
start_backend_local.sh
|
Shell
|
mit
| 106 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2438-1
#
# Security announcement date: 2014-12-10 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:08 UTC
#
# Operating System: Ubuntu 12.04 LTS
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - nvidia-331-updates:331.113-0ubuntu0.0.0.3
# - nvidia-331:331.113-0ubuntu0.0.0.3
# - nvidia-304-updates:304.125-0ubuntu0.0.0.1
# - nvidia-304:304.125-0ubuntu0.0.0.1
#
# Last versions recommanded by security team:
# - nvidia-331-updates:331.113-0ubuntu0.0.0.3
# - nvidia-331:331.113-0ubuntu0.0.0.3
# - nvidia-304-updates:304.125-0ubuntu0.0.0.1
# - nvidia-304:304.125-0ubuntu0.0.0.1
#
# CVE List:
# - CVE-2014-8091
# - CVE-2014-8098
# - CVE-2014-8298
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade nvidia-331-updates=331.113-0ubuntu0.0.0.3 -y
sudo apt-get install --only-upgrade nvidia-331=331.113-0ubuntu0.0.0.3 -y
sudo apt-get install --only-upgrade nvidia-304-updates=304.125-0ubuntu0.0.0.1 -y
sudo apt-get install --only-upgrade nvidia-304=304.125-0ubuntu0.0.0.1 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_12.04_LTS/x86_64/2014/USN-2438-1.sh
|
Shell
|
mit
| 1,193 |
#! /usr/bin/bash
#
# Author: Bert Van Vreckem <[email protected]>
#
# "Unvagrantify" a VM and export to OVA.
set -u # abort on unbound variable
#{{{ Variables
#}}}
#{{{ Functions
usage() {
cat << _EOF_
Usage: ${0} VM_NAME
with VM_NAME the name of a VirtualBox VM
_EOF_
}
#}}}
# {{{ Command line parsing
if [[ "$#" -ne "1" ]]; then
echo "Expected 1 argument, got $#" >&2
usage
exit 2
fi
vm="$1"
#}}}
# Script proper
if [[ -z "$(vboxmanage list vms | grep \"${vm}\")" ]]; then
echo "This is not a VM: ${vm}. Choose one of the following:" >&2
vboxmanage list vms >&2
exit 1
fi
vboxmanage sharedfolder remove "${vm}" --name "etc_ansible"
vboxmanage sharedfolder remove "${vm}" --name "vagrant"
vboxmanage modifyvm "${vm}" --cableconnected2 off
vboxmanage export "${vm}" --output "${vm}.ova" --options manifest
|
bertvv/fedora-testbox
|
unvagrantify.sh
|
Shell
|
mit
| 849 |
#!/bin/bash
#SBATCH --partition=mono
#SBATCH --ntasks=1
#SBATCH --time=4-0:00
#SBATCH --mem-per-cpu=8000
#SBATCH -J Deep-RBM_DBM_4_inc_bin_CD1_base
#SBATCH -e Deep-RBM_DBM_4_inc_bin_CD1_base.err.txt
#SBATCH -o Deep-RBM_DBM_4_inc_bin_CD1_base.out.txt
source /etc/profile.modules
module load gcc
module load matlab
cd ~/deepLearn && srun ./deepFunction 4 'RBM' 'DBM' '128 1000 1500 10' '0 1 1 1' '4_inc_bin' 'CD1_base' "'iteration.n_epochs', 'learning.lrate', 'learning.cd_k', 'learning.persistent_cd', 'parallel_tempering.use'" '200 1e-3 1 0 0' "'iteration.n_epochs', 'learning.persistent_cd'" '200 1'
|
aciditeam/matlab-ts
|
jobs/deepJobs_RBM_DBM_4_inc_bin_CD1_base.sh
|
Shell
|
mit
| 611 |
#!/bin/bash
echo 'Installing bower'
npmInstall 'bower'
echo 'Done...'
|
bnewt/machine-setup
|
node-module-installers/install-bower.sh
|
Shell
|
mit
| 70 |
#!/bin/sh -e
sudo mysqltuner
|
FunTimeCoding/mysql-tools
|
bin/tuner.sh
|
Shell
|
mit
| 30 |
for req in $(cat requirements/linux-deb-pkgs.txt); do apt install $req; done
for req in $(cat requirements/python-pkgs.txt); do pip install $req; done
|
worldbank/cv4ag
|
requirements/install_linux.sh
|
Shell
|
mit
| 151 |
#!/bin/sh
cd ..
rm -rf build/npm
mkdir build/npm
git archive master -o build/npm/ng-annotate.tar --prefix=ng-annotate/
cd build/npm
tar xf ng-annotate.tar && rm ng-annotate.tar
cd ng-annotate/build
./build.sh
# delete build scripts
rm *.sh *.js defs-config.json ng-annotate
# delete large test artifacts
rm ../tests/angular.js ../build/es5/tests/angular.js
cd ../..
tar czf ng-annotate.tgz ng-annotate && rm -rf ng-annotate
|
Smarp/ng-annotate
|
build/prepare.sh
|
Shell
|
mit
| 424 |
#!/bin/bash
usage () {
echo "usage: $0 [-i <#>] [-e <#>] [-a <assembly>] [-o <output>] [-c <name>] [-x <#>] <pfx1> <pfx2> <pfx3> ... <pfxn> "
echo "Required parameters:"
echo "-i The number of the most represented sites to ignore"
echo "-e The number of randomly distributed pseudo-datasets to generate"
echo "-a The name of the assembly you're using (PAO1, PA14, AAD7S, SGCH1, ECK12W3110, HG003)"
echo "-o The name for the output file"
echo "-c The name for the control condition"
echo "-x The number of replicates for the control condition"
echo ""
echo "The required parameters must precede the prefixes to be joined, listed with the"
echo " control conditions followed by the test conditions. See examples below."
echo ""
echo "Examples:"
echo "$0 -i 50 -e 10 -a PAO1 -o Example -c control -x 2 C1 C2"
}
# Read in the important options
while getopts ":i:e:a:o:h:c:x:" option; do
case "$option" in
i) CUT="$OPTARG" ;;
e) PSEUDO="$OPTARG" ;;
a) ASSEMBLY="$OPTARG" ;;
o) OUT_PFX="$OPTARG" ;;
c) CONTROL_PFX="$OPTARG" ;;
x) CONTROL_REPS="$OPTARG" ;;
h) # it's always useful to provide some help
usage
exit 0
;;
:) echo "Error: -$option requires an argument"
usage
exit 1
;;
?) echo "Error: unknown option -$option"
usage
exit 1
;;
esac
done
shift $(( OPTIND - 1 ))
# Do some error checking to make sure parameters are defined
if [ -z "$CUT" ]; then
echo "Error: you must specify the number of sites to ignore using -i"
usage
exit 1
fi
if [ -z "$PSEUDO" ]; then
echo "Error: you must specify the number of pseudo-datasets to generate using -e"
usage
exit 1
fi
if [ -z "$ASSEMBLY" ]; then
echo "Error: you must specify an assembly using -a"
usage
exit 1
fi
if [ -z "$OUT_PFX" ]; then
echo "Error: you must specify an output prefix for your file using -o"
usage
exit 1
fi
if [ -z "$CONTROL_PFX" ]; then
echo "Error: you must specify the name for your control"
echo "using -c"
usage
exit 1
fi
if [ -z "$CONTROL_REPS" ]; then
echo "Error: you must specify the number of replicates for your control"
echo "condition using -x"
usage
exit 1
fi
# Give the usage if there aren't enough parameters
if [ $# -lt 1 ] ; then
echo "Please provide at least 1 file"
usage
exit 1
fi
ASSEMBLY_PFX="$REFGENOME/$ASSEMBLY/$ASSEMBLY"
# Smoothing (LOESS) and normalization (TMM)
echo "Performing LOESS smoothing, normalization and obligate essentiality analysis on count data..."
R --vanilla --args $CONTROL_PFX $CONTROL_REPS $ASSEMBLY_PFX $OUT_PFX $CUT $PSEUDO $@ < ~/local/bin/TnSeqDESeq2Essential.R
|
spleonard1/Tn-seq
|
TnSeqEssential.sh
|
Shell
|
mit
| 2,712 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
flower --basic_auth=pixy:woohoo007 --broker=amqp://guest:guest@${RABBITMQ_SERVICE_SERVICE_HOST:localhost}:5672//
|
hyperbolic2346/coreos
|
dockerfiles/flower/run_flower.sh
|
Shell
|
mit
| 734 |
#!/bin/sh
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -e
DIR="$( cd "$( dirname "$0" )" && pwd )"
if [ "$(uname -s)" = "Darwin" ]; then
if [ "$(whoami)" = "root" ]; then
TARGET_DIR="/Library/Google/Chrome/NativeMessagingHosts"
else
TARGET_DIR="$HOME/Library/Application Support/Google/Chrome/NativeMessagingHosts"
fi
else
if [ "$(whoami)" = "root" ]; then
TARGET_DIR="/etc/opt/chrome/native-messaging-hosts"
else
TARGET_DIR="$HOME/.config/google-chrome/NativeMessagingHosts"
fi
fi
HOST_NAME=enslite
# Create directory to store native messaging host.
mkdir -p "$TARGET_DIR"
# Copy native messaging host manifest.
cp "$DIR/$HOST_NAME.json" "$TARGET_DIR"
# Update host path in the manifest.
HOST_PATH=$GOPATH/bin/resolver
TO_REPLACE="replace_me"
sed -i -e "s+$TO_REPLACE+$HOST_PATH+" "$TARGET_DIR/$HOST_NAME.json"
# Set permissions for the manifest so that all users can read it.
chmod o+r "$TARGET_DIR/$HOST_NAME.json"
echo "Native messaging host $HOST_NAME has been installed."
|
cpacia/ens-chrome-extension
|
install.sh
|
Shell
|
mit
| 1,136 |
#!/bin/sh
function start() {
echo "Starting the TypeScript watcher"
tsc --watch >"$(filename tsc.out log)" 2>"$(filename tsc.err log)" &
echo $! > "$(filename tsc pid)"
echo "Starting the Middleman server"
bundle exec middleman serve >"$(filename mid.out log)" 2>"$(filename mid.err log)" &
echo $! > "$(filename mid pid)"
}
function stop() {
echo "Stopping the Middleman server"
kill $(cat $(filename mid pid))
rm "$(filename mid pid)"
echo "Stopping the TypeScript watcher"
kill $(cat $(filename tsc pid))
rm "$(filename tsc pid)"
}
function filename() {
echo "${1}-$(cat /etc/hostname).${2}"
}
case $1 in
start)
start
;;
stop)
stop
;;
*)
echo "Use start to turn on the servers or stop to stop them."
;;
esac
|
myrrlyn/myrrlyn.net
|
serve.sh
|
Shell
|
mit
| 740 |
mono paket.bootstrapper.exe
mono paket.exe install
chmod +x packages/FsLexYacc/bin/fslex.exe
chmod +x packages/FsLexYacc/bin/fsyacc.exe
|
PeteProgrammer/JsonFSharp
|
bootstrap.sh
|
Shell
|
mit
| 136 |
#!/bin/bash
# This script executes "npm install" on the current, active,
# project (which is assumed to be located at /project)
echo ""
echo "[Provision-Script] Installing Project Dependencies"
echo ""
cd /project
npm install
|
vmadman/linux-scripts
|
vagrant/centos7/dev/v1/npm-install-deps.sh
|
Shell
|
mit
| 229 |
#!/bin/bash
# finding files with locate and updatedb
LC_ALL=C sudo /Users/paulirish/.homebrew/bin/gupdatedb --prunepaths="/tmp /var/tmp /.Spotlight-V100 /.fseventsd /Volumes/MobileBackups /Volumes/Volume /.MobileBackups"
which glocate > /dev/null && alias locate=glocate
locate navbar
# listing all useragent from your logs
zcat ~/path/to/access/logs* | awk -F'"' '{print $6}' | sort | uniq -c | sort -rn | head -n20000
zcat logs/paulirish.com/http/access.log* | awk -F'"' '{print $6}' | sort | uniq -c | sort -rn | head -n20000 | less
### rsync
rsync -havz --progress --partial --append-verify login@host:/full/path ./
###############################################################################################################
### pull android app apk (like chrome canary) off a phone and install elsewhere.
# get canary off phone
adb shell pm list packages | grep canary | sed 's/package://' # com.chrome.canary
adb shell pm path com.chrome.canary | sed 's/package://' # /data/app/com.chrome.canary-2/base.apk
# pull it
adb pull /data/app/com.chrome.canary-1/base.apk
# get the vrsion of base.apk and save to $chromeversion
chromeversion=$(/Applications/Genymotion.app/Contents/MacOS/tools/aapt dump badging base.apk | grep "package:" | sed "s/.*versionName\='//" | sed "s/'.*//")
chromefilename="chrome-$chromeversion.apk"
# rename. optionally with version
mv base.apk $chromefilename
# plug in the new phone
# yes really do it
# check when done
adb devices
# force install it.
adb install -r $chromefilename
# optionally clean up the apk
rm $chromefilename
###
###############################################################################################################
##############################################################################################
## one day i tried really really hard to get millisecond-precision time deltas printed for profiling.
## but i'm pretty sure it failed.
## since it was a few hours of work i'm keeping it. :)
start="$(eval 'gdate +%s%3N')"
function timestamp(){
if hash gdate 2>/dev/null; then
echo $(gdate +%s%3N)
else
echo $(date +%s%3)
fi;
}
if hash gdate 2>/dev/null; then
dateCmd="gdate +%s%3N"
else
dateCmd="date +%s%3"
fi;
dateAwesome="$(($(eval 'gdate +%s%3N')-$start))"
printf "%s" "$dateAwesome";
###
#################################################################################################
|
victormiranda/dotfiles
|
docs/common-things.sh
|
Shell
|
mit
| 2,508 |
#!/bin/bash
#ubuntu
echo "deb http://deb.goaccess.io/ $(lsb_release -cs) main" | sudo tee -a /etc/apt/sources.list.d/goaccess.list
wget -O - http://deb.goaccess.io/gnugpg.key | sudo apt-key add -
sudo apt-get update
sudo apt-get install -y goaccess
#Mac
brew install goaccess
|
lgh8820/ansible-test
|
OperationsShell/GoAccess/install_goaccess.sh
|
Shell
|
mit
| 278 |
#!/usr/bin/env bash
#=============================================================
# Initialization
work_dir="$(pwd)"
tools_dir="$(cd "$(dirname "$0")" && pwd)"
tweet_sh="$tools_dir/tweet.sh/tweet.sh"
detect_client_key_file() {
local loaded_key=''
while read path
do
if [ -f "$path" ]
then
loaded_key="$(source "$path"; echo "$CONSUMER_KEY")"
if [ "$loaded_key" != '' ]
then
echo "$path"
return 0
fi
fi
done < <(cat << FIN
$work_dir/tweet.client.key
$HOME/.tweet.client.key
$tools_dir/tweet.client.key
FIN
)
echo ''
}
load_keys() {
if [ "$CONSUMER_KEY" = '' ]
then
local path="$(detect_client_key_file)"
echo "Using client key at $path" 1>&2
source "$path"
fi
export MY_SCREEN_NAME
export MY_USER_ID
export MY_LANGUAGE
export CONSUMER_KEY
export CONSUMER_SECRET
export ACCESS_TOKEN
export ACCESS_TOKEN_SECRET
}
load_keys
if [ "$TWEET_BASE_DIR" != '' ]
then
TWEET_BASE_DIR="$(cd "$TWEET_BASE_DIR" && pwd)"
else
TWEET_BASE_DIR="$work_dir"
fi
export TWEET_BASE_DIR
base_dir="$TWEET_BASE_DIR"
log_dir="$TWEET_BASE_DIR/logs"
mkdir -p "$log_dir"
logfile="$log_dir/general.log"
logmodule="$TWEET_LOGMODULE"
logdate_format='%Y-%m-%d %H:%M:%S'
log() {
local logmodule_part=''
[ "$logmodule" != '' ] && logmodule_part=" $logmodule:"
local message="[$(date +"$logdate_format")]$logmodule_part $*"
echo "$message" 1>&2
echo "$message" >> "$logfile"
}
debug() {
[ "$TWEETBOT_DEBUG" = '' ] && return 0
local logmodule_part=''
[ "$logmodule" != '' ] && logmodule_part=" $logmodule:"
local message="[$(date +"$logdate_format")]$logmodule_part $*"
echo "$message" 1>&2
echo "$message" >> "$logfile"
}
# Orphan processes can be left after Ctrl-C or something,
# because there can be detached. We manually find them and kill all.
kill_descendants() {
local target_pid=$1
local children=$(ps --no-heading --ppid $target_pid -o pid)
for child in $children
do
kill_descendants $child
done
if [ $target_pid != $$ ]
then
kill $target_pid 2>&1 > /dev/null
fi
}
responder="$TWEET_BASE_DIR/responder.sh"
monologue_selector="$TWEET_BASE_DIR/monologue_selector.sh"
status_dir="$TWEET_BASE_DIR/.status"
mkdir -p "$status_dir"
already_replied_dir="$status_dir/already_replied"
mkdir -p "$already_replied_dir"
already_processed_dir="$status_dir/already_processed"
mkdir -p "$already_processed_dir"
body_cache_dir="$status_dir/body_cache"
mkdir -p "$body_cache_dir"
command_queue_dir="$status_dir/command_queue"
mkdir -p "$command_queue_dir"
responses_dir="$TWEET_BASE_DIR/responses"
mkdir -p "$responses_dir"
monologues_dir="$TWEET_BASE_DIR/monologues"
mkdir -p "$monologues_dir"
# default personality
FOLLOW_ON_FOLLOWED=true
FOLLOW_ON_MENTIONED=true
FOLLOW_ON_QUOTED=true
FOLLOW_ON_RETWEETED=false
SPAM_USER_PATTERN='follow *(back|me)'
FAVORITE_MENTIONS=true
FAVORITE_QUOTATIONS=true
FAVORITE_SEARCH_RESULTS=true
RETWEET_MENTIONS=false
RETWEET_QUOTATIONS=true
RETWEET_SEARCH_RESULTS=true
RESPOND_TO_MENTIONS=true
RESPOND_TO_SIDE_MENTIONS=false
RESPOND_TO_MULTIPLE_TARGETS_REPLY=false
RESPOND_TO_QUOTATIONS=true
RESPOND_TO_SEARCH_RESULTS=true
TIMELY_TOPIC_PROBABILITY=20
FREQUENCY_OF_CAPRICES=66
NEW_TOPIC=66
CONVERSATION_PERSISTENCE=40
MENTION_LIMIT_PERIOD_MIN=120
MAX_MENTIONS_IN_PERIOD=10
MAX_BODY_CACHE=1000
ADMINISTRATORS=''
WATCH_KEYWORDS=''
AUTO_FOLLOW_QUERY=''
PROCESS_QUEUE_INTERVALL_MINUTES=10
ACTIVE_TIME_RANGE="11:40-15:00,17:00-24:00"
MONOLOGUE_INTERVAL_MINUTES=60
MONOLOGUE_ACTIVE_TIME_RANGE="00:00-00:30,06:00-24:00"
MONOLOGUE_TIME_RANGE_GROUPS="morning/06:00-07:00 \
noon/12:00-13:00 \
afternoon/15:00-15:30 \
evening/17:30-18:30 \
night/19:00-21:00 \
midnight/23:00-24:00,00:00-03:00"
personality_file="$TWEET_BASE_DIR/personality.txt"
if [ -f "$personality_file" ]
then
source "$personality_file"
fi
#=============================================================
# Utilities to operate primitive strings
whitespaces=' \f\n\r\t '
non_whitespaces='[^ \f\n\r\t ]'
# Custom version of sed with extended regexp, "$esed" (like "egerp")
case $(uname) in
Darwin|*BSD|CYGWIN*)
esed="sed -E"
;;
*)
esed="sed -r"
;;
esac
is_true() {
echo "$1" | egrep -i "^(1|true|yes)$" > /dev/null
}
is_false() {
if is_true "$1"
then
return 1
else
return 0
fi
}
time_to_minutes() {
local hours minutes
read hours minutes <<< "$(cat | $esed 's/^0?([0-9]+):0?([0-9]+)$/\1 \2/')"
echo $(( $hours * 60 + $minutes ))
}
is_in_time_range() {
local time_ranges="$1"
local now=$2
[ "$now" = '' ] && now="$(date +%H:%M)"
now=$(echo "$now" | time_to_minutes)
local time_range
local start
local end
for time_range in $(echo "$time_ranges" | sed 's/,/ /g')
do
start="$(echo "$time_range" | cut -d '-' -f 1 | time_to_minutes)"
end="$(echo "$time_range" | cut -d '-' -f 2 | time_to_minutes)"
[ $now -ge $start -a $now -le $end ] && return 0
done
return 1
}
is_not_in_time_range() {
if is_in_time_range "$@"
then
return 1
else
return 0
fi
}
#=============================================================
# Utilities to operate tweet JSON strings
abs() {
echo "sqrt($1 ^ 2)" | bc
}
expired_by_seconds() {
local expire_seconds=$1
local target="$(cat | jq -c .)"
local created_at="$(echo "$target" | jq -r .created_at)"
local created_timestamp="$(echo "$target" | jq -r .created_timestamp)"
if [ "$created_timestamp" != 'null' ]
then
# event
created_at="$(expr "$created_timestamp" / 1000)"
else
# tweet
created_at="$(echo "$created_at" | date -f - +%s)"
fi
local now=$(date +%s)
[ $((now - created_at)) -gt $expire_seconds ]
}
is_protected_tweet() {
cat | jq -r .user.protected | grep 'true' > /dev/null
}
is_protected_user() {
cat | jq -r .protected | grep 'true' > /dev/null
}
is_spam_like_user() {
local user="$(cat)"
local spam_level=0
if [ "$(echo "$user" | jq -r .default_profile)" = 'true' ]
then
log " => default profile"
spam_level=$(($spam_level + 1))
fi
if [ "$(echo "$user" | jq -r .default_profile_image)" = 'true' ]
then
log " => default icon"
spam_level=$(($spam_level + 1))
fi
local created_at="$(echo "$user" | jq -r .created_at | date -f - +%s)"
local now=$(date +%s)
local one_year_in_seconds=$((365 * 24 * 60 * 60))
if [ $((now - created_at)) -lt $one_year_in_seconds ]
then
log " => recently created"
spam_level=$(($spam_level + 1))
fi
local count="$(echo "$user" | jq -r .statuses_count)"
if [ $count -lt 100 ]
then
log " => too less tweets ($count < 100)"
spam_level=$(($spam_level + 1))
fi
local screen_name="$(echo "$user" | jq -r .screen_name)"
local description="$(echo "$user" | jq -r .description)"
if [ "$description" = '' ]
then
log " => no description"
spam_level=$(($spam_level + 1))
fi
if echo "@$screen_name $description" | egrep "$SPAM_USER_PATTERN" > /dev/null
then
log " => matched to the spam pattern"
spam_level=$(($spam_level + 1))
fi
if [ $spam_level -ge 2 ]
then
log " => spam level $spam_level: this account is detected as a spam."
return 0
fi
return 1
}
is_reply() {
local replied_id="$(jq -r .in_reply_to_status_id_str)"
[ "$replied_id" != 'null' -a "$replied_id" != '' ]
}
other_replied_people() {
cat |
$esed -e "s/^((@[^$whitespaces]+[$whitespaces]+)+)?.*/\1/" \
-e "s/@${MY_SCREEN_NAME}[$whitespaces]+//"
}
follow_owner() {
local tweet="$(cat)"
local id="$(echo "$tweet" | jq -r .id_str)"
local owner="$(echo "$tweet" | jq -r .user.screen_name)"
log "Trying to follow to the owner of $id, $owner..."
user="$(echo "$tweet" | jq -c .user)"
if echo "$user" | is_protected_user
then
log " => protected user should not be followed to avoid privacy issues"
return 0
fi
if echo "$user" | is_spam_like_user
then
log " => spam like user should not be followed"
return 0
fi
if echo "$tweet" | jq -r .user.following | grep "false" > /dev/null
then
log " => follow $owner"
result="$("$tweet_sh" follow $owner)"
if [ $? = 0 ]
then
log ' => successfully followed'
else
log " => failed to follow $owner"
log " result: $result"
fi
else
log " => already followed"
fi
}
favorite() {
local tweet="$(cat)"
local id="$(echo "$tweet" | jq -r .id_str)"
log "Trying to favorite $id..."
if echo "$tweet" | jq -r .favorited | grep "false" > /dev/null
then
log " => favorite $id"
result="$("$tweet_sh" favorite $id)"
if [ $? = 0 ]
then
log ' => successfully favorited'
else
log ' => failed to favorite'
log " result: $result"
fi
else
log " => already favorited"
fi
}
retweet() {
local tweet="$(cat)"
local id="$(echo "$tweet" | jq -r .id_str)"
log "Trying to retweet $id..."
if echo "$tweet" | jq -r .retweeted | grep "false" > /dev/null
then
log " => retweet $id"
if is_in_time_range "$ACTIVE_TIME_RANGE"
then
result="$("$tweet_sh" retweet $id)"
if [ $? != 0 ]
then
log ' => failed to retweet'
log " result: $result"
fi
else
local queue="retweet $id"
echo "$queue" > "$command_queue_dir/$id.retweet"
log " => queued: \"$queue\""
fi
else
log " => already retweeted"
fi
}
is_already_replied() {
local id="$1"
[ "$FORCE_PROCESS" != 'yes' ] &&
[ "$(cd "$already_replied_dir"; find . -name "$id.*")" != '' ]
}
is_too_frequent_mention() {
local users="$1"
local user
local mentions
local all_users="$(cat | unified_users_from_body_and_args "$users")"
for user in $all_users
do
user="$(echo "$user" | $esed -e 's/^@//')"
mentions="$(cd "$already_replied_dir"; find . -name "*.$user.*" -cmin -$MENTION_LIMIT_PERIOD_MIN | wc -l)"
if [ $mentions -gt $MAX_MENTIONS_IN_PERIOD ]
then
return 0
fi
done
return 1
}
on_replied() {
local id="$1"
local users="$2"
local all_users="$(cat | unified_users_from_body_and_args "$users")"
touch "$already_replied_dir/$id.$(echo "$all_users" | $esed -e 's/ +/./g')."
# remove too old files
find "$already_replied_dir" -ctime +1 | while read path
do
rm -rf "$path"
done
}
unified_users_from_body_and_args() {
local body="$(cat)"
cat <(echo "$body" | users_in_body) \
<(echo "$users" | $esed -e 's/ +/\n/g' | $esed -e 's/^.*@//') | \
sort | uniq | tr -d '\n' | paste -s -d '.'
}
users_in_body() {
while read -r body
do
echo "$body" | $esed -e 's/ +/\n/g' | grep -E '^\.?@.' | $esed -e 's/^.*@//'
done
}
post_replies() {
local id="$1"
local users="$2"
local body="$(cat)"
if is_already_replied "$id"
then
log ' => already replied'
return 1
fi
if echo "$body" | is_too_frequent_mention "$users"
then
log ' => too frequent mention for same user'
return 1
fi
log "Sending replies to $id..."
echo "$body" | post_sequential_tweets "$id" "$users"
return $?
}
post_sequential_tweets() {
local previous_id="$1"
local users="$2"
local result
while read -r body
do
body="$(echo "$body" | $esed -e 's/<br>/\n/g')"
if [ "$previous_id" != '' ]
then
result="$(echo -e "$body" | "$tweet_sh" reply "$previous_id")"
else
result="$(echo -e "$body" | "$tweet_sh" post)"
fi
if [ $? = 0 ]
then
echo "$body" | on_replied "$previous_id" "$users"
previous_id="$(echo "$result" | jq -r .id_str)"
echo "$body" | cache_body "$previous_id"
log ' => successfully posted'
else
log ' => failed to post'
log " result: $result"
return 1
fi
sleep 10s
done
return 0
}
post_quotation() {
local owner=$1
local id=$2
local url="https://twitter.com/$owner/status/$id"
local bodies="$(cat)"
if is_already_replied "$id"
then
log ' => already replied'
return 1
fi
if is_too_frequent_mention "$owner"
then
log ' => too frequent mention for same user'
return 1
fi
log "Quoting the tweet $id by $owner..."
if is_in_time_range "$ACTIVE_TIME_RANGE"
then
local result
echo "$bodies" | while read -r body
do
result="$(echo -e "$body $url" | "$tweet_sh" reply "$id")"
if [ $? = 0 ]
then
log ' => successfully quoted'
echo "$body" | on_replied "$id" "$owner"
# send following resposnes as a sequential tweets
id="$(echo "$result" | jq -r .id_str)"
echo "$body $url" | cache_body "$id"
else
log ' => failed to quote'
log " result: $result"
fi
done
else
local queue_file="$command_queue_dir/$id.quote"
touch "$queue_file"
echo "$bodies" | while read -r body
do
echo "reply $id $body $url" >> "$queue_file"
done
log " => reactions are queued at $queue_file"
fi
}
cache_body() {
local id="$1"
cat > "$body_cache_dir/$id"
# remove too old caches - store only for recent N bodies
ls "$body_cache_dir/" | sort | head -n -$MAX_BODY_CACHE | while read path
do
rm -rf "$path"
done
}
# for DM
is_already_processed_dm() {
local id="$1"
[ -f "$already_processed_dir/$id" ]
}
on_dm_processed() {
local id="$1"
touch "$already_processed_dir/$id"
# remove too old files - store only for recent N messages
ls "$already_processed_dir/" | sort | head -n -200 | while read path
do
rm -rf "$path"
done
}
get_screen_name() {
local id="$1"
local name=''
local cached="$(egrep ":$id$" "$status_dir/screen_name_to_user_id" 2>/dev/null | tail -n 1 | tr -d '\n')"
if [ "$cached" != '' ]
then
name="$(echo -n "$(echo -n "$cached" | cut -d : -f 1)")"
if [ "$name" != '' ]
then
echo -n "$name"
return 0
fi
fi
name="$("$tweet_sh" get-screen-name "$id")"
if [ "$name" != '' ]
then
echo "$name:$id" >> "$status_dir/screen_name_to_user_id"
fi
echo -n "$name"
}
get_user_id() {
local id=''
local name="$1"
local cached="$(egrep "^$name:" "$status_dir/screen_name_to_user_id" 2>/dev/null | tail -n 1 | tr -d '\n')"
if [ "$cached" != '' ]
then
id="$(echo -n "$(echo -n "$cached" | cut -d : -f 2)")"
if [ "$id" != '' ]
then
echo -n "$id"
return 0
fi
fi
id="$("$tweet_sh" get-user-id "$name")"
if [ "$id" != '' ]
then
echo "$name:$id" >> "$status_dir/screen_name_to_user_id"
fi
echo -n "$id"
}
#=============================================================
# Utilities for randomization
# Succeeds with the probability N% (0-100)
run_with_probability() {
[ $(($RANDOM % 100)) -lt $1 ]
}
echo_with_probability() {
if run_with_probability $1
then
cat
fi
}
# ・計算の始点は00:00
# ・指定間隔の1/3か10分の短い方を、「投稿時間の振れ幅」とする。
# 30分間隔なら、振れ幅は10分。00:25から00:35の間のどこかで投稿する。
# ・指定間隔ちょうどを90%、振れ幅最大の時を10%として、その確率で投稿する。
# ・その振れ幅の中のどこかで投稿済みであれば、その振れ幅の中では多重投稿はしない。
# ・ただし、振れ幅の最後のタイミングでまだ投稿されていなければ、必ず投稿する。
run_periodically() {
local interval_minutes="$1"
local last_processed="$2"
local active_time_range="$3"
local period_range=$(( $interval_minutes / 3 ))
[ $period_range -gt 10 ] && period_range=10
local max_lag=$(( $period_range / 2 ))
local half_interval=$(( $interval_minutes / 2 ))
calculate_probability() {
local target_minutes=$1
# 目標時刻から何分ずれているかを求める
local lag=$(($target_minutes % $interval_minutes))
# 目標時刻からのずれがhalf_intervalを超えている場合、目標時刻より手前方向のずれと見なす
[ $lag -gt $half_interval ] && lag=$(($interval_minutes - $lag))
local probability=$(( (($max_lag - $lag) * 100 / $max_lag) * 80 / 100 + 10 ))
if [ $probability -lt 10 ]
then
echo 0
else
echo $probability
fi
}
local process_interval=1m
local one_day_in_minutes=$(( 24 * 60 ))
debug 'Initiating new periodical task...'
debug " interval = $interval_minutes minutes"
debug " last = $last_processed"
debug " active time = $active_time_range"
while true
do
if [ "$active_time_range" != '' ]
then
if is_not_in_time_range "$active_time_range"
then
sleep $process_interval
continue
fi
fi
debug 'Processing periodical task...'
local current_minutes=$(date +%H:%M | time_to_minutes)
debug " $current_minutes minutes past from 00:00"
# 同じ振れ幅の中で既に投稿済みだったなら、何もしない
if [ "$last_processed" != '' ]
then
local delta=$(($current_minutes - $last_processed))
debug " delta from $last_processed: $delta"
if [ $delta -lt 0 ]
then
delta=$(( $one_day_in_minutes - $last_processed + $current_minutes ))
debug " delta => $delta"
fi
if [ $delta -le $period_range ]
then
debug 'Already processed in this period.'
sleep $process_interval
continue
fi
fi
# 振れ幅の最後のタイミングかどうかを判定
lag=$(($current_minutes % $interval_minutes))
if [ $lag -eq $max_lag ]
then
debug "Nothing was processed in this period."
probability=100
else
probability=$(calculate_probability $current_minutes)
fi
debug "Probability to process: $probability %"
if run_with_probability $probability
then
debug "Let's process!"
last_processed=$current_minutes
echo $current_minutes
fi
sleep $process_interval
done
unset calculate_probability
}
#=============================================================
# Misc.
try_lock() {
local name="$1"
mkdir "$status_dir/lock.$name" 2> /dev/null
}
try_lock_until_success() {
local name="$1"
while true
do
try_lock "$name" && break
sleep 1s
done
}
unlock() {
local name="$1"
[ "$name" = '' ] && return 0
rm -rf "$status_dir/lock.$name"
}
clear_all_lock() {
(cd $status_dir &&
rm -rf lock.*)
}
#=============================================================
# Initialize list of search queries
query=''
keywords=''
keywords_matcher=''
if [ "$WATCH_KEYWORDS" != '' ]
then
query="$(echo "$WATCH_KEYWORDS" |
$esed -e "s/^[$whitespaces]*,[$whitespaces]*|[$whitespaces]*,[$whitespaces]*$//g" \
-e "s/[$whitespaces]*,[$whitespaces]*/ OR /g" \
-e "s/^[$whitespaces]*OR[$whitespaces]+|[$whitespaces]+OR[$whitespaces]*$//g") -from:$MY_SCREEN_NAME"
keywords="$(echo ",$WATCH_KEYWORDS," |
$esed -e "s/^[$whitespaces]*,[$whitespaces]*|[$whitespaces]*,[$whitespaces]*$//g" \
-e "s/[$whitespaces]*,+[$whitespaces]*/,/g" \
-e 's/^,|,$//g')"
keywords_matcher="$(echo "$WATCH_KEYWORDS" |
$esed -e "s/^[$whitespaces]*,[$whitespaces]*|[$whitespaces]*,[$whitespaces]*$//g" \
-e "s/[$whitespaces]*,+[$whitespaces]*/|/g" \
-e 's/^\||\|$//g')"
fi
|
piroor/tweetbot.sh
|
common.sh
|
Shell
|
mit
| 19,333 |
make clean && make && ./out
|
j-rock/jr
|
examples/simple_game/run.sh
|
Shell
|
mit
| 28 |
#!/bin/sh
for i in *.mp4; do o=`basename "$i" .mp4`.mkv; mkvmerge -o "$o" --no-chapters "$i" || echo "Error in $i" >>error.log; done
|
pkunk/animutils
|
mp4mkv.sh
|
Shell
|
mit
| 134 |
#! /bin/bash
LLVM_ROOT=${1:-/src/llvm-build}
export PATH=$LLVM_ROOT/bin:$PATH
export LD_LIBRARY_PATH=$LLVM_ROOT/lib:$LD_LIBRARY_PATH
export INCLUDE=$LLVM_ROOT/include:$INCLUDE
cmake -G Xcode ..
|
jdemeule/tidy-tools
|
configure.sh
|
Shell
|
mit
| 194 |
#!/bin/bash
#SBATCH --job-name=UF.1k.np20
#SBATCH -p RM
#SBATCH --time=12:00:00
#SBATCH --nodes 1
#SBATCH --ntasks-per-node 20
export OMP_NUM_THREADS=1
OUTDIR=$2
MATDIR=~/pylon2/UF_Collection_Matrix-Market
EXEDIR=~/pylon2/trilinos-prediction/tpetra_solvers
INPUT=$1
mkdir -p $OUTDIR
set -x
IFS=,
[ ! -f $INPUT ] && { echo "$INPUT file not found"; exit 99; }
while read matrix solved <&3
do
mpirun ${EXEDIR}/tpetra_solvers ${MATDIR}/${matrix} -d ${OUTDIR}
done 3< $INPUT
|
patemotter/trilinos-prediction
|
tpetra_solvers/batch_scripts/bridges/bridges_np20.sh
|
Shell
|
mit
| 480 |
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR/client
for process in {"dojo","dijit","dojox","util"}; do
if [ ! -d $process ]; then
git clone https://github.com/dojo/$process.git $process
else
echo $process already installed
fi
done
# Local variables:
# coding: utf-8
# End:
|
tbaud0n/dojoBuilder
|
example/initExample.sh
|
Shell
|
mit
| 336 |
#!/bin/bash
# this script will setup custom database configuration
# will be skipped by default
if [ "${DATABASE_DRIVER}" = "postgres" ]; then
apt-get install -y php5-pgsql
sed -i -e "s/'DB_DRIVER', 'sqlite'/'DB_DRIVER', '${DATABASE_DRIVER}'/g" config.default.php
sed -i -e "s/'DB_USERNAME', 'root'/'DB_USERNAME', '${DATABASE_USER}'/g" config.default.php
sed -i -e "s/'DB_PASSWORD', ''/'DB_DRIVER', '${DATABASE_PW}'/g" config.default.php
sed -i -e "s/'DB_HOSTNAME', 'localhost'/'DB_DRIVER', '${DATABASE_HOST}'/g" config.default.php
sed -i -e "s/'DB_NAME', 'kanboard'/'DB_NAME', '${DATABASE_NAME}'/g" config.default.php
fi
|
mko-x/docker-kanban
|
bootstrap/db_handler.sh
|
Shell
|
mit
| 642 |
#!/bin/bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# PIMMIT DR + FAIRFAX TOWERS
"$DIR"/nextBus.sh -s 5001507 -t 1 -d Mclean -r 3T "$@"
|
happylance/NextBus
|
3TP1.sh
|
Shell
|
mit
| 152 |
#!/usr/bin/env bash
source /vagrant/vagrant/bashurator/init.sh
# Setup the environment.
configure_apt_get() {
apt-get update
# Required for add-apt-repository
apt-get install -y software-properties-common build-essential zip unzip
# Required for nodejs
curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash -
# Required for mongodb
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
echo "deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list
# Required for the latest Git
add-apt-repository ppa:git-core/ppa
# Required for latest Ruby.
apt-add-repository ppa:brightbox/ruby-ng
# update apt-get
apt-get update
}
# Execute the function above, in an idempotent function.
bashurator.configure "apt-get" configure_apt_get
|
linzjs/linz-development
|
vagrant/apt-get.sh
|
Shell
|
mit
| 882 |
#!/bin/bash
#
# This is an example of how to use _ftw to enumerate a set of plugin files.
# The techniques avoids subshells (forking) and pipelines, using shell builtins
# instead of stuff like the following:
#
# for file in *.plug ; do
# echo "about to process $file"
# source $file
# done
source "../../ftw/ftw.shf"
source "../../spinner/spinner.shf"
__ftw_cb_ProcessFile()
{
case "$1" in
*.plug)
#echo "about to process $1"
source "$1"
_spinner --next
;;
*)
;;
esac
return 0
}
main()
{
_spinner --start "Processing ..."
_ftw \
--breadthfirst \
--callback __ftw_cb_ProcessFile \
$PWD
_spinner --end "done."
plugin
}
main
plugin
plugin
plugin
#
#
#
|
bluemarble/bau
|
investigate/main.sh
|
Shell
|
mit
| 789 |
#!/bin/bash
gem install sass
|
TeaMeow/Avane
|
test/sass.sh
|
Shell
|
mit
| 29 |
#!/bin/bash
DIR=$(cd $(dirname $0); pwd)
cd ${DIR}
# 宿主机执行 当前文件
mkdir -p /Volumes/work/php
mkdir -p /Volumes/work/logs
mkdir -p ${DIR}/mysql/data
mkdir -p ${DIR}/elasticsearch/data
mkdir -p ${DIR}/elasticsearch/logs
mkdir -p ${DIR}/logstash/data
mkdir -p ${DIR}/logstash/pipeline/tmp
chmod -R 777 ${DIR}/mysql/data
chmod -R 777 ${DIR}/elasticsearch/data
chmod -R 777 ${DIR}/elasticsearch/logs
chmod -R 777 ${DIR}/logstash/data
chmod -R 777 ${DIR}/logstash/logs
chmod -R 777 ${DIR}/logstash/pipeline
docker-compose up
|
foxiswho/docker-compose-nginx-php-mysql
|
mac-php73-swoole/start.sh
|
Shell
|
mit
| 545 |
#! /bin/bash
# ./record_projector my_recording_name 1024x768
if [[ "x$2" == "x" ]] ; then
res="1440x900"
else
res=$2
fi
ffmpeg -f alsa -i pulse -f x11grab -r 25 -s $res -i :0.0+1440,0 -vcodec libx264 -pre:0 ultrafast -threads 4 $1.mkv
|
codedsk/qs
|
recording/record_projector.sh
|
Shell
|
mit
| 246 |
#!/bin/bash
# Slow method to finds whether a given number is prime or not
echo -n "Enter a number: "
read num
i=2
while [ $i -lt $num ]
do
if [ `expr $num % $i` -eq 0 ]
then
echo "$num is not a prime number"
echo "Since it is divisible by $i"
exit
fi
i=`expr $i + 1`
done
echo "$num is a prime number "
|
pinam45/PFC_LaTeX
|
listings/exemple_code_files/prime_test.sh
|
Shell
|
mit
| 332 |
#!/usr/bin/env sh
#
# Delete the given feed.
#
# Author: Alastair Hughes
# Contact: hobbitalastair at yandex dot com
set -e
[ -z "${FEED_DIR}" ] && FEED_DIR="${XDG_CONFIG_DIR:-${HOME}/.config}/feeds/"
export PATH="${PATH}:$(dirname "$0")"
if [ ! -d "${FEED_DIR}" ]; then
printf "%s: feed dir '%s' does not exist\n" "$0" "${FEED_DIR}" 1>&2
exit 1
fi
if [ "$#" -ne 1 ]; then
printf 'usage: %s <name>\n' "$0" 1>&2
exit 1
fi
name="$1"
cd "${FEED_DIR}"
if [ ! -d "${name}" ]; then
printf "%s: no such feed: '%s'\n" "$0" "${name}" 1>&2
exit 1
fi
rm -rf "${name}"
|
hobbitalastair/feedutils
|
feed-delete.sh
|
Shell
|
mit
| 590 |
# !/bin/bash
#PBS -l nodes=1:ppn=24
#PBS -N abc_narrSFS_noabias
#PBS -m bea
#PBS -M [email protected]
cd $PBS_O_WORKDIR
export NPROCS=`wc -l $PBS_NODEFILE |gawk '//{print $1}'`
export PATH="/home/users/hahn/anaconda2/bin:$PATH"
export CENTRALMS_DIR="/mount/sirocco1/hahn/centralms/"
export CENTRALMS_CODEDIR="/home/users/hahn/projects/centralMS/"
tduty="0.5"
sfs="flex"
mpirun -np $NPROCS python /home/users/hahn/projects/centralMS/run/abc.py narrow_noabias $tduty $sfs 15 1000 > "/home/users/hahn/projects/centralMS/run/rSFH_0.2sfs_"$tduty"gyr.sfs"$sfs".log"
|
changhoonhahn/centralMS
|
run/siro_narrow_noabias.sh
|
Shell
|
mit
| 567 |
#!/usr/bin/env bash
##
# common functions
##
title () {
echo -e "📝 $1"
}
info () {
echo -e "📍 $1"
}
warn () {
echo -e "🚨 $1"
}
user () {
echo -e "❓ $1"
}
success () {
echo -e "✅ $1"
}
fail () {
echo -e "🚫 \"$1\""
echo ""
exit 1
}
link_files () {
ln -s $1 $2
success "Linked $1 to $2"
}
sudo_link_files () {
sudo ln -s $1 $2
success "Linked $1 to $2"
}
install_dotfiles () {
src="$1"
dest="$2"
root="$3"
if [ -L $dest ] || [ -f $dest ]; then
overwrite=false
backup=false
skip=false
while true
do
user "File already exists: $src, what do you want to do? [s]kip, [o]verwrite, [b]ackup?"
read -n 1 action
case "$action" in
o )
overwrite=true
break
;;
b )
backup=true
break
;;
s )
skip=true
break
;;
* )
warn "Invalid input: \"$action\""
;;
esac
done
if [ "$overwrite" == "true" ]; then
if [ ! -z "$root" ]; then
sudo rm -rf $dest
else
rm -rf $dest
fi
success "removed $dest"
fi
if [ "$backup" == "true" ]; then
mv $dest $dest\.backup
success "moved $dest to $dest.backup"
fi
if [ "$skip" == "false" ]; then
if [ ! -z "$root" ]; then
sudo_link_files $src $dest
else
link_files $src $dest
fi
else
success "skipped $src"
fi
else
if [ -z "$root" ]; then
sudo_link_files $src $dest
else
link_files $src $dest
fi
fi
}
|
atosatto/dotfiles
|
scripts/functions.sh
|
Shell
|
mit
| 1,615 |
ffmpeg -i /vagrant/HoneyBees.mp4 \
-c:v libx265 -preset medium -crf 28 \
-c:a libfdk_aac -b:a 128k \
/vagrant/HoneyBees_hvec24.mp4
ffmpeg -i /vagrant/HoneyBees.mp4 \
-c:v libx265 -preset medium -crf 20 \
-c:a libfdk_aac -b:a 128k \
/vagrant/HoneyBees_hvec20.mp4
|
abejenaru/vagrant-boxes
|
experiments/hevc/encode.sh
|
Shell
|
mit
| 275 |
#!/bin/bash
# Put this in a folder ABOVE the app/ folder, which contains the extension files (this repo)
# run to make a bundle.
rm -rf bundle
cp -R app bundle
rm bundle/lib/lodash.js
rm -rf bundle/scratch
rm bundle/*.scss
rm bundle/*.map
rm bundle/.gitignore
rm -rf bundle/.git
rm -rf bundle/.idea
zip -r bundle_$(date +%Y-%m-%d_%H-%M-%S).zip bundle/*
|
MightyPork/pokevision-filter
|
scratch/bundle.sh
|
Shell
|
mit
| 358 |
#!/bin/bash
set -e
pip3 install clint pyserial setuptools adafruit-nrfutil
sudo apt-get update
sudo apt-get install -y libllvm8 -V
sudo apt install -fy cppcheck clang-format-8
if [ ! -f /usr/bin/clang-format ]; then
sudo ln -s /usr/bin/clang-format-8 /usr/bin/clang-format
fi
# make all our directories we need for files and libraries
mkdir ${HOME}/.arduino15
mkdir ${HOME}/.arduino15/packages
mkdir ${HOME}/Arduino
mkdir ${HOME}/Arduino/libraries
# install arduino IDE
export PATH=$PATH:$GITHUB_WORKSPACE/bin
curl -fsSL https://raw.githubusercontent.com/arduino/arduino-cli/master/install.sh | sh -s 0.11.0 2>&1
arduino-cli config init > /dev/null
arduino-cli core update-index > /dev/null
|
adafruit/travis-ci-arduino
|
actions_install.sh
|
Shell
|
mit
| 699 |
#!/bin/bash
sudo docker run -ti --device /dev/nvidia0:/dev/nvidia0 --device \
/dev/nvidiactl:/dev/nvidiactl --device /dev/nvidia-uvm:/dev/nvidia-uvm \
--net=host djpetti/tensorflow /run_tests.sh
|
djpetti/rpinets
|
tensorflow/run_tensorflow_tests.sh
|
Shell
|
mit
| 200 |
#!/bin/sh -e
# Copyright (c) 2008-2013 LG Electronics, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This has only been tested on Ubuntu-12.04 amd64.
check_sanity=true
usage="$0 [--help|-h] [--version|-V]"
version="2.0.0"
for i ; do
case "$i" in
--help|-h) echo ${usage}; exit 0 ;;
--version|-V) echo ${version}; exit 0 ;;
*)
echo Unrecognized option: $i 1>&2
echo ${usage}
exit 1
;;
esac
done
sane=true
distributor_id_sane="^((Ubuntu))$"
release_sane="^((12.04)|(12.10))$"
codename_sane="^((precise)|(quantal))$"
arch_sane="^((i386)|(amd64))$"
case "${check_sanity}" in
true)
if [ ! -x /usr/bin/lsb_release ] ; then
echo 'WARNING: /usr/bin/lsb_release not available, cannot test sanity of this system.' 1>&2
sane=false
else
distributor_id=`/usr/bin/lsb_release -s -i`
release=`/usr/bin/lsb_release -s -r`
codename=`/usr/bin/lsb_release -s -c`
if ! echo "${distributor_id}" | egrep -q "${distributor_id_sane}"; then
echo "WARNING: Distributor ID reported by lsb_release '${distributor_id}' not in '${distributor_id_sane}'" 1>&2
sane=false
fi
if ! echo "${release}" | egrep -q "${release_sane}"; then
echo "WARNING: Release reported by lsb_release '${release}' not in '${release_sane}'" 1>&2
sane=false
fi
if ! echo "${codename}" | egrep -q "${codename_sane}"; then
echo "WARNING: Codename reported by lsb_release '${codename}' not in '${codename_sane}'" 1>&2
sane=false
fi
fi
if [ ! -x /usr/bin/dpkg ] ; then
echo 'WARNING: /usr/bin/dpkg not available, cannot test architecture of this system.' 1>&2
sane=false
else
arch=`/usr/bin/dpkg --print-architecture`
if ! echo "${arch}" | egrep -q "${arch_sane}"; then
echo "WARNING: Architecture reported by dpkg --print-architecture '${arch}' not in '${arch_sane}'" 1>&2
sane=false
fi
fi
case "${sane}" in
true) ;;
false)
echo 'WARNING: This system configuration is untested. Let us know if it works.' 1>&2
;;
esac
;;
false) ;;
esac
apt-get update
# These are essential on ubuntu
essential="\
bzip2 \
gzip \
tar \
wget \
"
# And we need these when on 64-bit Ubuntu ...
amd64_specific="\
g++-multilib \
gcc-multilib \
libc6-dev-i386 \
zlib1g:i386 \
"
[ "${arch}" = amd64 ] && essential="${essential} ${amd64_specific}"
apt-get install --yes \
${essential} \
bison \
build-essential \
chrpath \
diffstat \
gawk \
git \
language-pack-en \
python3 \
python3-jinja2 \
texi2html \
texinfo \
|
openwebos/deprecated-build-webos
|
scripts/prerequisites.sh
|
Shell
|
mit
| 3,466 |
export PATH="./bin:/usr/local/bin:/usr/local/sbin:$PATH"
export MANPATH="/usr/local/man:$MANPATH"
|
scottgc/dotfiles
|
osx/path.zsh
|
Shell
|
mit
| 98 |
# -*- sh -*-
list_command() {
local plugin_name=$1
local query=$2
if [ -z "$plugin_name" ]; then
local plugins_path
plugins_path=$(get_plugin_path)
if find "$plugins_path" -mindepth 1 -type d &>/dev/null; then
for plugin_path in "$plugins_path"/*; do
plugin_name=$(basename "$plugin_path")
printf "%s\\n" "$plugin_name"
display_installed_versions "$plugin_name" "$query"
done
else
printf "%s\\n" 'No plugins installed'
fi
else
check_if_plugin_exists "$plugin_name"
display_installed_versions "$plugin_name" "$query"
fi
}
display_installed_versions() {
local plugin_name=$1
local query=$2
local versions
versions=$(list_installed_versions "$plugin_name")
if [[ $query ]]; then
versions=$(printf "%s\n" "$versions" | grep -E "^\s*$query")
if [ -z "${versions}" ]; then
display_error "No compatible versions installed ($plugin_name $query)"
exit 1
fi
fi
if [ -n "${versions}" ]; then
for version in $versions; do
printf " %s\\n" "$version"
done
else
display_error ' No versions installed'
fi
}
list_command "$@"
|
asdf-vm/asdf
|
lib/commands/command-list.bash
|
Shell
|
mit
| 1,159 |
#! /bin/bash -e
# $Id$
# -----------------------------------------------------------------------------
# CppAD: C++ Algorithmic Differentiation: Copyright (C) 2003-14 Bradley M. Bell
#
# CppAD is distributed under multiple licenses. This distribution is under
# the terms of the
# Eclipse Public License Version 1.0.
#
# A copy of this license is included in the COPYING file of this distribution.
# Please visit http://www.coin-or.org/CppAD/ for information on other licenses.
# -----------------------------------------------------------------------------
if [ ! -e "bin/check_op_code.sh" ]
then
echo "bin/check_op_code.sh: must be executed from its parent directory"
exit 1
fi
echo "bin/check_op_code.sh: checking that op codes are in alphabetical order:"
# ---------------------------------------------------------------------------
# check enum list of codes are in alphabetical order
sed -n -e '/^enum/,/^\tNumberOp$/p' cppad/local/op_code.hpp | \
sed -e '/^enum/d' -e '/^\tNumberOp$/d' \
-e 's/^[ ]*//' -e 's/Op[, ].*//' -e '/^\/\//d' > bin/op_code.1.$$
#
sort --ignore-case bin/op_code.1.$$ > bin/op_code.2.$$
if ! diff bin/op_code.1.$$ bin/op_code.2.$$
then
echo "check_op_code.sh: enum list is not in alphabetical order"
rm bin/op_code.*.$$
exit 1
fi
# -----------------------------------------------------------------------------
# check NumArgTable
sed -n -e '/NumArgTable\[\]/,/^[ \t]*};/p' cppad/local/op_code.hpp | \
sed \
-e '/NumArgTable\[\]/d' \
-e '/^[ \t]*};/d' \
-e 's|^[ \t]*[0-9],* *// *||' \
-e 's|Op.*||' \
> bin/op_code.3.$$
#
if ! diff bin/op_code.1.$$ bin/op_code.3.$$
then
echo "check_op_code.sh: NumArgTable list is not in alphabetical order"
rm bin/op_code.*.$$
exit 1
fi
# -----------------------------------------------------------------------------
# check NumResTable (last line of NumResTable is not used)
sed -n -e '/NumResTable\[\]/,/^[ \t]*};/p' cppad/local/op_code.hpp | \
sed \
-e '/NumResTable\[\]/d' \
-e '/^[ \t]*};/d' \
-e '/Last entry not used/d' \
-e 's|^[ \t]*[0-9],* *// *||' \
-e 's|Op.*||' \
> bin/op_code.4.$$
#
if ! diff bin/op_code.1.$$ bin/op_code.4.$$
then
echo "check_op_code.sh: NumResTable list is not in alphabetical order"
echo "(or missing last line)"
rm bin/op_code.*.$$
exit 1
fi
# -----------------------------------------------------------------------------
# check OpNameTable
sed -n -e '/const char \*OpNameTable\[\]/,/^[ \t]*};/p' cppad/local/op_code.hpp | \
sed \
-e '/OpNameTable\[\]/d' \
-e '/^[ \t]*};/d' \
-e 's|^[ \t]*"||' \
-e 's|".*||' \
> bin/op_code.5.$$
#
if ! diff bin/op_code.1.$$ bin/op_code.5.$$
then
echo "check_op_code.sh: OpName list is not in alphabetical order"
rm bin/op_code.*.$$
exit 1
fi
# -----------------------------------------------------------------------------
# clean up
rm bin/op_code.*.$$
echo "bin/check_op_code.sh: OK"
|
utke1/cppad
|
bin/check_op_code.sh
|
Shell
|
epl-1.0
| 2,902 |
#!/bin/bash
#######################################################################
#
# Copyright (c) 2014 Eclipse Foundation and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Thanh Ha (Eclipse Foundation) - initial implementation
#
#######################################################################
MYSQL_HOST=${MYSQL_HOST:-127.0.0.1}
MYSQL_PORT=${MYSQL_PORT:-3306}
if [ "$MYSQL_HOST" != "127.0.0.1" ]
then
echo "Not initializing embedded MySQL due to user configuration"
sed -i 's/^autostart=.*/autostart=false/' /etc/supervisor/conf.d/mysqld.conf
fi
exec /usr/sbin/service supervisor start
|
zxiiro/lamp
|
scripts/apache/supervisord-start.sh
|
Shell
|
epl-1.0
| 838 |
#! /bin/sh -e
# tup - A file-based build system
#
# Copyright (C) 2018 Mike Shal <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Make sure an external dependency that has a symlink directory component
# works.
. ./tup.sh
check_no_windows symlink
check_tup_suid
# Re-init in a subdir so we can control the external directory contents.
mkdir external
mkdir external/arch-x86
cd external
ln -s arch-x86 arch
cd ..
echo foo1 > external/arch-x86/foo.h
mkdir tmp
cd tmp
re_init
set_full_deps
# Use readlink to get a dependency directly on the symlink, and also read from
# a file using it as a directory.
cat > Tupfile << HERE
: |> readlink ../external/arch; cat ../external/arch/foo.h > %o |> out.txt
HERE
tup touch Tupfile
update
echo foo1 | diff - out.txt
update > .tup/.tupoutput
if ! grep 'No commands to execute' .tup/.tupoutput > /dev/null; then
cat .tup/.tupoutput
echo "Error: No files should have been recompiled when nothing was changed." 1>&2
exit 1
fi
sleep 1
echo foo2 > ../external/arch-x86/foo.h
update
echo foo2 | diff - out.txt
eotup
|
jonatanolofsson/tup
|
test/t4207-full-deps8.sh
|
Shell
|
gpl-2.0
| 1,673 |
#!/bin/bash
THIS=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)
cd $THIS
export PATH=/usr/local/bin:/bin:/sbin:/usr/bin:/usr/sbin
export PATH=$PATH:$(pwd)/casperjs/bin:$(pwd)/phantomjs/bin
./makemaps.py
|
necrolyte2/commutetime
|
cron.sh
|
Shell
|
gpl-2.0
| 201 |
convert images/OCS-520-A.png -crop 1549x4598+107+327 +repage images/OCS-520-A.png
#
#
#/OCS-520.png
convert images/OCS-520-B.png -crop 1539x4602+56+329 +repage images/OCS-520-B.png
#
#
#/OCS-520.png
|
jonnymwalker/Staroslavjanskij-Slovar
|
scripts/cropedges.OCS-520.sh
|
Shell
|
gpl-2.0
| 199 |
# Copyright (c) 2009-2011 Emanuele Tomasi
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# Compila il modulo per il kernel
function _make_module()
{
local MODULE_DIR=lib/modules/fglrx/build_mod
cd ${ROOT_DIR}
# 1)
# Compilo il modulo del kernel
# 1.1) Copy arch-depend files
cp arch/${ARCH}/${MODULE_DIR}/* common/${MODULE_DIR}
cd common/${MODULE_DIR}
# 1.2) Se ci sono, applico le patch
if _check_external_resource 'x' '_files' ${ROOT_DIR}/${SCRIPT_DIR}/patch_functions.sh; then
source ${ROOT_DIR}/${SCRIPT_DIR}/patch_functions.sh
_module_patch
fi
# 1.3) Make modules with ati's script
if ! sh make.sh; then
_print '' '' "`gettext "ERROR: I didn't make module"`"
return 1
fi
# 2)
# Copio il modulo
mkdir -p ${WORKING_DIRECTORY}/lib/modules/${KNL_RELEASE}/external
cat ../fglrx*.ko | gzip -c >> ${WORKING_DIRECTORY}/lib/modules/${KNL_RELEASE}/external/${ATI_DRIVER_NAME}.ko.gz
return 0
}
|
Scorpio92/linux_kernel_3.18.5
|
drv/packages/Slackware/make_module.sh
|
Shell
|
gpl-2.0
| 1,985 |
#!/bin/bash
featType=$1
featdim=$2
ubmsize=$3
wavlist=$4
trainset='train'
cd ubm
mkdir -p cfg/ models/ out/ feature_list/
cfgfile='ubm.'${ubmsize}'.'${featType}'.'${trainset}'.cfg'
featlist=`basename $wavlist`
featlist=${featlist%.*}.txt
echo $featlist
cp -f $wavlist feature_list/$featlist
sed -i 's/\.wav/\.'$featType'\.mfc/' feature_list/$featlist
sed -i 's/wavs\//feats\//' feature_list/$featlist
ubmdir=ubm${ubmsize}.${featType}.${trainset}
cp TEMPLATE.cfg cfg/$cfgfile
sed -i 's/featlist/'${featlist}'/g' cfg/$cfgfile
sed -i 's/ubmsize/'$ubmsize'/g' cfg/$cfgfile
sed -i 's/featdimension/'$featdim'/g' cfg/$cfgfile
sed -i 's/ubmdir/'${ubmdir}'/g' cfg/$cfgfile
mkdir -p models/$ubmdir/UBM_1
./GMM_UBM_train_linux_em64t_MultiThread16 cfg/$cfgfile > out/log.${cfgfile%.*}
# convert ubm file to new format
ubm_file_old=models/$ubmdir/UBM_$ubmsize/gmm_train12.param
ubm_file=models/$ubmdir.param
./oldUBM2newUBM $ubm_file_old $ubm_file &
wait
# convert ubm file to txt
./ReadGmmModel -i $ubm_file -o ${ubm_file}.txt > out/ReadGmmModel_${ubmdir} &
wait
echo "ubm conversion finished"
\rm -rf models/$ubmdir
echo "ubm dirs cleaned up"
|
mvansegbroeck/ivectool
|
ivec/train_ubm.bash
|
Shell
|
gpl-2.0
| 1,148 |
#!/bin/bash
set -x
## if [[ 1 == 0 ]] ; then
## compute the union and intersection of the L_DLPFC from the between-group results
## cd ../data/Group.results
# 3dcalc -a clorder.fwhm4.2.mddAndCtrl.L_BLA.weight.3mm+tlrc.HEAD -expr "equals(a, 2)" -prefix L_DLPFC.fwhm4.2.mddAndCtrl.L_BLA.weight.3mm
# 3dcalc -a clorder.fwhm4.2.mddAndCtrl.R_BLA.weight.3mm+tlrc.HEAD -expr "equals(a, 2)" -prefix L_DLPFC.fwhm4.2.mddAndCtrl.R_BLA.weight.3mm
# 3dcalc -a clorder.fwhm4.2.mddAndCtrl.L_SFA.weight.3mm+tlrc.HEAD -expr "equals(a, 6)" -prefix L_DLPFC.fwhm4.2.mddAndCtrl.L_SFA.weight.3mm
# 3dMean -mask_union -prefix mask.L_DLPFC.union \
# L_DLPFC.fwhm4.2.mddAndCtrl.L_BLA.weight.3mm+tlrc.HEAD \
# L_DLPFC.fwhm4.2.mddAndCtrl.R_BLA.weight.3mm+tlrc.HEAD \
# L_DLPFC.fwhm4.2.mddAndCtrl.L_SFA.weight.3mm+tlrc.HEAD
# 3dMean -mask_inter -prefix mask.L_DLPFC.intersection \
# L_DLPFC.fwhm4.2.mddAndCtrl.L_BLA.weight.3mm+tlrc.HEAD \
# L_DLPFC.fwhm4.2.mddAndCtrl.R_BLA.weight.3mm+tlrc.HEAD \
# L_DLPFC.fwhm4.2.mddAndCtrl.L_SFA.weight.3mm+tlrc.HEAD
## compute the union and intersection of the L_DLPFC from the CDRS-R regression results
cd ../data/Group.results.CDRS.t.score.scaled.diff.withAandC.reversed
3dcalc -a clorder.regression.fwhm4.2.restingstate.mddOnly.L_CMA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc.HEAD \
-expr "or(equals(a, 2), equals(a, 3), equals(a, 14))" \
-prefix L_DLPFC.regression.fwhm4.2.restingstate.mddOnly.L_CMA.weight.3mm.and.CDRS.t.score.scaled.diff
3dcalc -a clorder.regression.fwhm4.2.restingstate.mddOnly.R_CMA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc.HEAD \
-expr "equals(a, 8)" \
-prefix L_DLPFC.regression.fwhm4.2.restingstate.mddOnly.R_CMA.weight.3mm.and.CDRS.t.score.scaled.diff
3dcalc -a clorder.regression.fwhm4.2.restingstate.mddOnly.R_SFA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc.HEAD \
-expr "equals(a, 1)" \
-prefix L_DLPFC.regression.fwhm4.2.restingstate.mddOnly.R_SFA.weight.3mm.and.CDRS.t.score.scaled.diff
## compute the unions an dintersections of the ROIs extracted above
3dMean -mask_union -prefix mask.L_DLPFC.union \
L_DLPFC.regression.fwhm4.2.restingstate.mddOnly.L_CMA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc.HEAD \
L_DLPFC.regression.fwhm4.2.restingstate.mddOnly.R_CMA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc.HEAD \
L_DLPFC.regression.fwhm4.2.restingstate.mddOnly.R_SFA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc.HEAD
3dMean -mask_inter -prefix mask.L_DLPFC.intersection \
L_DLPFC.regression.fwhm4.2.restingstate.mddOnly.L_CMA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc.HEAD \
L_DLPFC.regression.fwhm4.2.restingstate.mddOnly.R_CMA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc.HEAD \
L_DLPFC.regression.fwhm4.2.restingstate.mddOnly.R_SFA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc.HEAD
## now compute the overlap of the between-group and CDRS-R regression L_DLPFC masks
3dcalc -a ../Group.results/mask.L_DLPFC.union+tlrc.HEAD \
-b mask.L_DLPFC.union+tlrc.HEAD \
-expr "a+2*b" \
-prefix union.masks.L_DLPFC.from.between-group.and.CDRS-R.regression
3drefit -cmap INT_CMAP union.masks.L_DLPFC.from.between-group.and.CDRS-R.regression+tlrc.HEAD
3dcalc -a ../Group.results/mask.L_DLPFC.intersection+tlrc.HEAD \
-b mask.L_DLPFC.intersection+tlrc.HEAD \
-expr "a+2*b" \
-prefix intersection.masks.L_DLPFC.from.between-group.and.CDRS-R.regression
3drefit -cmap INT_CMAP intersection.masks.L_DLPFC.from.between-group.and.CDRS-R.regression+tlrc.HEAD
3dclust -isomerge 1.01 0 union.masks.L_DLPFC.from.between-group.and.CDRS-R.regression+tlrc.HEAD > clust.union.masks.L_DLPFC.from.between-group.and.CDRS-R.regression.txt
3dclust -isomerge 1.01 0 intersection.masks.L_DLPFC.from.between-group.and.CDRS-R.regression+tlrc.HEAD > clust.intersection.L_DLPFC.from.between-group.and.CDRS-R.regression.txt
\@DiceMetric ../Group.results/mask.L_DLPFC.union+tlrc.HEAD mask.L_DLPFC.union+tlrc.HEAD -save_match -save_diff > dice_metric.union.masks.L_DLPFC.from.between-group.and.CDRS-R.regression.txt
## fi
# cd ../data/Group.results.CDRS.t.score.scaled.diff.withAandC
# ## now compute the union of the R DLPFC ROIS, separate by positive or negative correlation
# 3dcalc -a clorder.regression.fwhm4.2.restingstate.mddOnly.L_CMA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc.HEAD \
# -expr "or(equals(a, 1), equals(a, 9))" \
# -prefix R_DLPFC.regression.fwhm4.2.restingstate.mddOnly.L_CMA.weight.3mm.and.CDRS.t.score.scaled.diff
# 3dcalc -a clorder.regression.fwhm4.2.restingstate.mddOnly.L_SFA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc.HEAD \
# -expr "equals(a, 2)" \
# -prefix R_DLPFC.regression.fwhm4.2.restingstate.mddOnly.L_SFA.weight.3mm.and.CDRS.t.score.scaled.diff
# 3dcalc -a clorder.regression.fwhm4.2.restingstate.mddOnly.R_BLA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc.HEAD \
# -expr "equals(a, 5)" \
# -prefix R_DLPFC.regression.fwhm4.2.restingstate.mddOnly.R_BLA.weight.3mm.and.CDRS.t.score.scaled.diff
# 3dcalc -a R_DLPFC.regression.fwhm4.2.restingstate.mddOnly.L_CMA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc \
# -b R_DLPFC.regression.fwhm4.2.restingstate.mddOnly.L_SFA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc \
# -c R_DLPFC.regression.fwhm4.2.restingstate.mddOnly.R_BLA.weight.3mm.and.CDRS.t.score.scaled.diff+tlrc \
# -expr "step(a + b) + 2*c" \
# -prefix R_DLPFC.CDRS.t.score.scaled.diff
# 3drefit -cmap INT_CMAP R_DLPFC.CDRS.t.score.scaled.diff+tlrc.HEAD
|
colmconn/rsfcGraphAnalysis
|
makeDlpfcMasks-reversed.sh
|
Shell
|
gpl-2.0
| 5,591 |
#!/bin/bash
# Copyright 1999-2010 Gentoo Foundation
# revdep-rebuild: Reverse dependency rebuilder.
# Original Author: Stanislav Brabec
# Rewrite Author: Michael A. Smith
# Current Maintainer: Paul Varner <[email protected]>
# TODO:
# - Use more /etc/init.d/functions.sh
# - Try to reduce the number of global vars
##
# Global Variables:
# Must-be-blank:
unset GREP_OPTIONS
# Readonly variables:
declare -r APP_NAME="revdep-rebuild" # # The name of this application
declare -r VERSION="git"
declare -r OIFS="$IFS" # Save the IFS
declare -r ENV_FILE=0_env.rr # Contains environment variables
declare -r FILES_FILE=1_files.rr # Contains a list of files to search
declare -r LDPATH_FILE=2_ldpath.rr # Contains the LDPATH
declare -r BROKEN_FILE=3_broken.rr # Contains the list of broken files
declare -r ERRORS_FILE=3_errors.rr # Contains the ldd error output
declare -r RAW_FILE=4_raw.rr # Contains the raw list of packages
declare -r OWNERS_FILE=4_owners.rr # Contains the file owners
declare -r PKGS_FILE=4_pkgs.rr # Contains the unsorted bare package names
declare -r EBUILDS_FILE=4_ebuilds.rr # Contains the unsorted atoms
# (Appropriately slotted or versioned)
declare -r ORDER_FILE=5_order.rr # Contains the sorted atoms
declare -r STATUS_FILE=6_status.rr # Contains the ldd error output
declare -ra FILES=(
"$ENV_FILE"
"$FILES_FILE"
"$LDPATH_FILE"
"$BROKEN_FILE"
"$ERRORS_FILE"
"$RAW_FILE"
"$OWNERS_FILE"
"$PKGS_FILE"
"$EBUILDS_FILE"
"$ORDER_FILE"
"$STATUS_FILE"
)
# "Boolean" variables: Considered "true" if it has any value at all
# "True" indicates we should...
declare FULL_LD_PATH # ...search across the COMPLETE_LD_LIBRARY_PATH
declare KEEP_TEMP # ...not delete tempfiles from the current run
declare ORDER_PKGS # ...sort the atoms in deep dependency order
declare PACKAGE_NAMES # ...emerge by slot, not by versionated atom
declare RM_OLD_TEMPFILES # ...remove tempfiles from prior runs
declare SEARCH_BROKEN # ...search for broken libraries and binaries
declare SEARCH_SYMBOLS # ...search for broken binaries with undefined symbols
declare VERBOSE # ...give verbose output
# Globals that impact portage directly:
declare EMERGE_DEFAULT_OPTS # String of options portage assumes to be set
declare EMERGE_OPTIONS # Array of options to pass to portage
declare PORTAGE_NICENESS # Renice to this value
declare PORTAGE_ROOT # The root path for portage
declare REVDEP_REBUILD_DEFAULT_OPTS # String of default emerge options for revdep-rebuild
# Customizable incremental variables:
# These variables can be prepended to either by setting the variable in
# your environment prior to execution, or by placing an entry in
# /etc/make.conf.
#
# An entry of "-*" means to clear the variable from that point forward.
# Example: env SEARCH_DIRS="/usr/bin -*" revdep-rebuild will set SEARCH_DIRS
# to contain only /usr/bin
declare LD_LIBRARY_MASK # Mask of specially evaluated libraries
declare SEARCH_DIRS # List of dirs to search for executables and libraries
declare SEARCH_DIRS_MASK # List of dirs not to search
# Other globals:
declare OLDPROG # Previous pass through the progress meter
declare EXACT_PKG # Versionated atom to emerge
declare HEAD_TEXT # Feedback string about the search
declare NOCOLOR # Set to "true" not to output term colors
declare OK_TEXT # Feedback about a search which found no errors
declare RC_NOCOLOR # Hack to insure we respect NOCOLOR
declare REBUILD_LIST # Array of atoms to emerge
declare SKIP_LIST # Array of atoms that cannot be emerged (masked?)
declare SONAME # Soname/soname path pattern given on commandline
declare SONAME_SEARCH # Value of SONAME modified to match ldd's output
declare WORKING_TEXT # Feedback about the search
declare WORKING_DIR # Working directory where cache files are kept
main() {
# preliminary setup
portage_settings
get_opts "$@"
setup_portage
setup_search_paths_and_masks
get_search_env
[[ $QUIET -ne 1 ]] && echo
# Search for broken binaries
get_files
get_ldpath
main_checks
# Associate broken binaries with packages to rebuild
if [[ $PACKAGE_NAMES ]]; then
get_packages
clean_packages
assign_packages_to_ebuilds
else
get_exact_ebuilds
fi
# Rebuild packages owning broken binaries
get_build_order
rebuild
# All done
cleanup
}
##
# Refuse to delete anything before we cd to our tmpdir
# (See mkdir_and_cd_to_tmpdir()
rm() {
eerror "I was instructed to rm '$@'"
die 1 "Refusing to delete anything before changing to temporary directory."
}
: <<'EW'
##
# GNU find has -executable, but if our users' finds do not have that flag
# we emulate it with this function. Also emulates -writable and -readable.
# Usage: find PATH ARGS -- use find like normal, except use -executable instead
# of various versions of -perm /+ blah blah and hacks
find() {
hash find || { die 1 'find not found!'; }
# We can be pretty sure find itself should be executable.
local testsubject="$(type -P find)"
if [[ $(command find "$testsubject" -executable 2> /dev/null) ]]; then
unset -f find # We can just use the command find
elif [[ $(command find "$testsubject" -perm /u+x 2> /dev/null) ]]; then
find() {
a=(${@//-executable/-perm \/u+x})
a=(${a[@]//-writable/-perm \/u+w})
a=(${a[@]//-readable/-perm \/r+w})
command find "${a[@]}"
}
elif [[ $(command find "$testsubject" -perm +u+x 2> /dev/null) ]]; then
find() {
a=(${@//-executable/-perm +u+x})
a=(${a[@]//-writable/-perm +u+w})
a=(${a[@]//-readable/-perm +r+w})
command find "${a[@]}"
}
else # Last resort
find() {
a=(${@//-executable/-exec test -x '{}' \; -print})
a=(${a[@]//-writable/-exec test -w '{}' \; -print})
a=(${a[@]//-readable/-exec test -r '{}' \; -print})
command find "${a[@]}"
}
fi
find "$@"
}
EW
print_usage() {
cat << EOF
${APP_NAME}: (${VERSION})
Copyright (C) 2003-2010 Gentoo Foundation, Inc.
This is free software; see the source for copying conditions.
Usage: $APP_NAME [OPTIONS] [--] [EMERGE_OPTIONS]
Broken reverse dependency rebuilder.
-C, --nocolor Turn off colored output
-d, --debug Print way too much information (uses bash's set -xv)
-e, --exact Emerge based on exact package version
-h, --help Print this usage
-i, --ignore Ignore temporary files from previous runs
-k, --keep-temp Do not delete temporary files on exit
-L, --library NAME Unconditionally emerge existing packages that use the
--library=NAME library with NAME. NAME can be a full path to the
library or a basic regular expression (man grep)
-l, --no-ld-path Do not set LD_LIBRARY_PATH
-o, --no-order Do not check the build order
(Saves time, but may cause breakage.)
-p, --pretend Do a trial run without actually emerging anything
(also passed to emerge command)
-P, --no-progress Turn off the progress meter
-q, --quiet Be less verbose (also passed to emerge command)
-u, --search-symbols Search for undefined symbols (may have false positives)
-v, --verbose Be more verbose (also passed to emerge command)
Calls emerge, options after -- are ignored by $APP_NAME
and passed directly to emerge.
Report bugs to <http://bugs.gentoo.org>
EOF
}
##
# Usage: progress i n
# i: current item
# n: total number of items to process
progress() {
if [[ -t 1 ]]; then
progress() {
local curProg=$(( $1 * 100 / $2 ))
(( curProg == OLDPROG )) && return # no change, output nothing
OLDPROG="$curProg" # must be a global variable
(( $1 == $2 )) && local lb=$'\n'
echo -ne '\r \r'"[ $curProg% ] $lb"
}
progress $@
else # STDOUT is not a tty. Disable progress meter.
progress() { :; }
fi
}
##
# Usage: countdown n
# n: number of seconds to count
countdown() {
local i
for ((i=1; i<$1; i++)); do
echo -ne '\a.'
((i<$1)) && sleep 1
done
echo -e '\a.'
}
##
# Replace whitespace with linebreaks, normalize repeated '/' chars, and sort -u
# (If any libs have whitespace in their filenames, someone needs punishment.)
clean_var() {
gawk 'BEGIN {RS="[[:space:]]"}
/-\*/ {exit}
/[^[:space:]]/ {gsub(/\/\/+/, "/"); print}' | sort -u
}
##
# Exit and optionally output to sterr
die() {
local status=$1
shift
# Check if eerror has been loaded.
# Its loaded _after_ opt parsing but not before due to RC_NOCOLOR.
type eerror &> /dev/null
if [[ $? -eq 0 ]];
then
eerror "$@"
else
echo " * ${@}" >> /dev/stderr
fi
exit $status
}
##
# What to do when dynamic linking is consistent
clean_exit() {
if [[ ! $KEEP_TEMP ]]; then
rm -f "${FILES[@]}"
if [[ "$WORKING_DIR" != "/var/cache/${APP_NAME}" ]]; then
# Remove the working directory
builtin cd; rmdir "$WORKING_DIR"
fi
fi
if [[ $QUIET -ne 1 ]];
then
echo
einfo "$OK_TEXT... All done. "
fi
exit 0
}
##
# Get the name of the package that owns a file or list of files given as args.
# NOTE: depends on app-misc/realpath!
get_file_owner() {
local IFS=$'\n'
rpath=$(realpath "${*}" 2>/dev/null)
# To ensure we always have something in rpath...
[[ -z $rpath ]] && rpath=${*}
# Workaround for bug 280341
mlib=$(echo ${*}|sed 's:/lib/:/lib64/:')
[[ "${*}" == "${mlib}" ]] && mlib=$(echo ${*}|sed 's:/lib64/:/lib/:')
# Add a space to the end of each object name to prevent false
# matches, for example /usr/bin/dia matching /usr/bin/dialog (bug #196460).
# The same for "${rpath} ".
# Don't match an entry with a '-' at the start of the package name. This
# prevents us from matching invalid -MERGING entries. (bug #338031)
find -L /var/db/pkg -type f -name CONTENTS -print0 |
xargs -0 grep -m 1 -Fl -e "${*} " -e "${rpath} " -e "${mlib} " |
sed 's:/var/db/pkg/\(.*\)/\([^-].*\)/CONTENTS:\1/\2:'
}
##
# Normalize some EMERGE_OPTIONS
normalize_emerge_opts() {
# Normalize some EMERGE_OPTIONS
EMERGE_OPTIONS=(${EMERGE_OPTIONS[@]/%-p/--pretend})
EMERGE_OPTIONS=(${EMERGE_OPTIONS[@]/%-f/--fetchonly})
EMERGE_OPTIONS=(${EMERGE_OPTIONS[@]/%-v/--verbose})
}
##
# Use the color preference from portage
setup_color() {
# This should still work if NOCOLOR is set by the -C flag or in the user's
# environment.
[[ $NOCOLOR = yes || $NOCOLOR = true ]] && export RC_NOCOLOR=yes # HACK! (grr)
# TODO: Change location according to Bug 373219
# Remove /etc/init.d/functions.sh once everything is migrated
if [ -e /lib/gentoo/functions.sh ]; then
. /lib/gentoo/functions.sh
elif [ -e /etc/init.d/functions.sh ]; then
. /etc/init.d/functions.sh
else
echo "Unable to find functions.sh"
exit 1
fi
}
##
# Die if an argument is missing.
die_if_missing_arg() {
[[ ! $2 || $2 = -* ]] && die 1 "Missing expected argument to $1"
}
##
# Die because an option is not recognized.
die_invalid_option() {
# Can't use eerror and einfo because this gets called before function.sh
# is sourced
echo
echo "Encountered unrecognized option $1." >&2
echo
echo "$APP_NAME no longer automatically passes unrecognized options to portage."
echo "Separate emerge-only options from revdep-rebuild options with the -- flag."
echo
echo "For example, $APP_NAME -v -- --ask"
echo
echo "See the man page or $APP_NAME -h for more detail."
echo
exit 1
}
##
# Warn about deprecated options.
warn_deprecated_opt() {
# Can't use eerror and einfo because this gets called before function.sh
# is sourced
echo
echo "Encountered deprecated option $1." >&2
[[ $2 ]] && echo "Please use $2 instead." >&2
}
##
# Get whole-word commandline options preceded by two dashes.
get_longopts() {
case $1 in
--nocolor) export NOCOLOR="yes";;
--no-color) warn_deprecated_opt "$1" "--nocolor"
export NOCOLOR="yes";;
--debug) set -xv;;
--exact) unset PACKAGE_NAMES;;
--help) print_usage
exit 0;;
--ignore) RM_OLD_TEMPFILES=1;;
--keep-temp) KEEP_TEMP=1;;
--library=*) # TODO: check for invalid values
SONAME="${1#*=}"
unset SEARCH_BROKEN;;
--soname=*|--soname-regexp=*) # TODO: check for invalid values
warn_deprecated_opt "${1%=*}" "--library"
SONAME="${1#*=}"
unset SEARCH_BROKEN;;
--library) # TODO: check for invalid values
die_if_missing_arg $1 $2
shift
SONAME="$1"
unset SEARCH_BROKEN;;
--soname|--soname-regexp) # TODO: check for invalid values
warn_deprecated_opt "$1" "--library"
die_if_missing_arg $1 $2
shift
SONAME="$1"
unset SEARCH_BROKEN;;
--no-ld-path) unset FULL_LD_PATH;;
--no-order) unset ORDER_PKGS;;
--no-progress) progress() { :; };;
--pretend) EMERGE_OPTIONS+=("--pretend")
PRETEND=1;;
--quiet) progress() { :; }
QUIET=1
EMERGE_OPTIONS+=($1);;
--search-symbols) SEARCH_SYMBOLS=1;;
--verbose) VERBOSE=1
EMERGE_OPTIONS+=("--verbose");;
--extra-verbose) warn_deprecated_opt "$1" "--verbose"
VERBOSE=1
EMERGE_OPTIONS+=("--verbose");;
--package-names) # No longer used, since it is the
# default. We accept it for
# backwards compatibility.
warn_deprecated_opt "$1"
PACKAGE_NAMES=1;;
*) die_invalid_option $1;;
esac
}
##
# Get single-letter commandline options preceded by a single dash.
get_shortopts() {
local OPT OPTSTRING OPTARG OPTIND
while getopts ":CdehikL:loPpquvX" OPT; do
case "$OPT" in
C) # TODO: Match syntax with the rest of gentoolkit
export NOCOLOR="yes";;
d) set -xv;;
e) unset PACKAGE_NAMES;;
h) print_usage
exit 0;;
i) RM_OLD_TEMPFILES=1;;
k) KEEP_TEMP=1;;
L) # TODO: Check for invalid values
SONAME="${OPTARG#*=}"
unset SEARCH_BROKEN;;
l) unset FULL_LD_PATH;;
o) unset ORDER_PKGS;;
P) progress() { :; };;
p) EMERGE_OPTIONS+=("--pretend")
PRETEND=1;;
q) progress() { :; }
QUIET=1
EMERGE_OPTIONS+=("--quiet");;
u) SEARCH_SYMBOLS=1;;
v) VERBOSE=1
EMERGE_OPTIONS+=("--verbose");;
X) # No longer used, since it is the default.
# We accept it for backwards compatibility.
warn_deprecated_opt "-X"
PACKAGE_NAMES=1;;
*) die_invalid_option "-$OPTARG";;
esac
done
}
##
# Get command-line options.
get_opts() {
local avoid_utils
local -a args
echo_v() { ewarn "$@"; }
unset VERBOSE KEEP_TEMP EMERGE_OPTIONS RM_OLD_TEMPFILES
ORDER_PKGS=1
PACKAGE_NAMES=1
SONAME="not found"
SEARCH_BROKEN=1
FULL_LD_PATH=1
while [[ $1 ]]; do
case $1 in
--) shift
EMERGE_OPTIONS+=("$@")
break;;
-*) while true; do
args+=("$1")
shift
[[ ${1:--} = -* ]] && break
done
if [[ ${args[0]} = --* ]]; then
get_longopts "${args[@]}"
else
get_shortopts "${args[@]}"
fi;;
*) die_invalid_option "$1";;
esac
unset args
done
setup_color
normalize_emerge_opts
# If the user is not super, add --pretend to EMERGE_OPTIONS
if [[ ${EMERGE_OPTIONS[@]} != *--pretend* && $UID -ne 0 ]]; then
ewarn "You are not superuser. Adding --pretend to emerge options."
EMERGE_OPTIONS+=(--pretend)
fi
}
##
# Is there a --pretend or --fetchonly flag in the EMERGE_OPTIONS array?
is_real_merge() {
[[ ${EMERGE_OPTIONS[@]} != *--pretend* &&
${EMERGE_OPTIONS[@]} != *--fetchonly* ]]
}
##
# Clean up temporary files and exit
cleanup_and_die() {
rm -f "$@"
die 1 " ...terminated. Removing incomplete $@."
}
##
# Clean trap
clean_trap() {
trap "cleanup_and_die $*" SIGHUP SIGINT SIGQUIT SIGABRT SIGTERM
rm -f "$@"
}
##
# Returns 0 if the first arg is found in the remaining args, 1 otherwise
# (Returns 2 if given fewer than 2 arguments)
has() {
(( $# > 1 )) || return 2
local IFS=$'\a' target="$1"
shift
[[ $'\a'"$*"$'\a' = *$'\a'$target$'\a'* ]]
}
##
# Dies when it can't change directories
cd() {
if builtin cd -P "$@"; then
if [[ $1 != $PWD ]]; then
# Some symlink malfeasance is going on
die 1 "Working directory expected to be $1, but it is $PWD"
fi
else
die 1 "Unable to change working directory to '$@'"
fi
}
##
# Tries not to delete any files or directories it shouldn't
setup_rm() {
##
# Anything in the FILES array in tmpdir is fair game for removal
rm() {
local i IFS=$'\a'
[[ $APP_NAME ]] || die 1 '$APP_NAME is not defined! (This is a bug.)'
case $@ in
*/*|*-r*|*-R*) die 1 "Oops, I'm not allowed to delete that. ($@)";;
esac
for i; do
# Don't delete files that are not listed in the array
# Allow no slashes or recursive deletes at all.
case $i in
*/*|-*r*|-*R*) :;; # Not OK
-*) continue;; # OK
esac
has "$i" "${FILES[@]}" && continue
die 1 "Oops, I'm not allowed to delete that. ($@)"
done
command rm "$@"
}
# delete this setup function so it's harmless to re-run
setup_rm() { :; }
}
##
# Make our temporary files directory
# $1 - directory name
# $2 - user name
verify_tmpdir() {
if [[ ! $1 ]]; then
die 1 'Temporary file path is unset! (This is a bug.)'
elif [[ -d $1 ]]; then
cd "$1"
else
die 1 "Unable to find a satisfactory location for temporary files ($1)"
fi
[[ $VERBOSE ]] && einfo "Temporary cache files are located in $PWD"
setup_rm
}
get_search_env() {
local new_env
local old_env
local uid=$(python -c 'import os; import pwd; print(pwd.getpwuid(os.getuid())[0])')
# Find a place to put temporary files
if [[ "$uid" == "root" ]]; then
local tmp_target="/var/cache/${APP_NAME}"
else
local tmp_target="$(mktemp -d -t revdep-rebuild.XXXXXXXXXX)"
fi
# From here on all work is done inside the temporary directory
verify_tmpdir "$tmp_target"
WORKING_DIR="$tmp_target"
if [[ $SEARCH_BROKEN ]]; then
SONAME_SEARCH="$SONAME"
HEAD_TEXT="broken by a package update"
OK_TEXT="Dynamic linking on your system is consistent"
WORKING_TEXT="consistency"
else
# first case is needed to test against /path/to/foo.so
if [[ $SONAME = /* ]]; then
# Set to "<space>$SONAME<space>"
SONAME_SEARCH=" $SONAME "
# Escape the "/" characters
SONAME_SEARCH="${SONAME_SEARCH//\//\\/}"
else
# Set to "<tab>$SONAME<space>"
SONAME_SEARCH=$'\t'"$SONAME "
fi
HEAD_TEXT="using $SONAME"
OK_TEXT="There are no dynamic links to $SONAME"
unset WORKING_TEXT
fi
# If any of our temporary files are older than 1 day, remove them all
if [[ ! $KEEP_TEMP ]]; then
while read; do
RM_OLD_TEMPFILES=1
break
done < <(find -L . -maxdepth 1 -type f -name '*.rr' -mmin +1440 -print 2>/dev/null)
fi
# Compare old and new environments
# Don't use our previous files if environment doesn't match
new_env=$(
# We do not care if these emerge options change
EMERGE_OPTIONS=(${EMERGE_OPTIONS[@]//--pretend/})
EMERGE_OPTIONS=(${EMERGE_OPTIONS[@]//--fetchonly/})
EMERGE_OPTIONS=(${EMERGE_OPTIONS[@]//--verbose/})
cat <<- EOF
SEARCH_DIRS="$SEARCH_DIRS"
SEARCH_DIRS_MASK="$SEARCH_DIRS_MASK"
LD_LIBRARY_MASK="$LD_LIBRARY_MASK"
PORTAGE_ROOT="$PORTAGE_ROOT"
EMERGE_OPTIONS="${EMERGE_OPTIONS[@]}"
ORDER_PKGS="$ORDER_PKGS"
FULL_LD_PATH="$FULL_LD_PATH"
EOF
)
if [[ -r "$ENV_FILE" && -s "$ENV_FILE" ]]; then
old_env=$(<"$ENV_FILE")
if [[ $old_env != $new_env ]]; then
ewarn 'Environment mismatch from previous run, deleting temporary files...'
RM_OLD_TEMPFILES=1
fi
else
# No env file found, silently delete any other tempfiles that may exist
RM_OLD_TEMPFILES=1
fi
# If we should remove old tempfiles, do so
if [[ $RM_OLD_TEMPFILES ]]; then
rm -f "${FILES[@]}"
else
for file in "${FILES[@]}"; do
if [ -e "$file" ]; then
chown ${uid}:portage "$file"
chmod 600 "$file"
fi
done
fi
# Save the environment in a file for next time
echo "$new_env" > "$ENV_FILE"
[[ $VERBOSE ]] && echo $'\n'"$APP_NAME environment:"$'\n'"$new_env"
if [[ $QUIET -ne 1 ]];
then
echo
einfo "Checking reverse dependencies"
einfo "Packages containing binaries and libraries $HEAD_TEXT"
einfo "will be emerged."
fi
}
get_files() {
[[ $QUIET -ne 1 ]] && einfo "Collecting system binaries and libraries"
if [[ -r "$FILES_FILE" && -s "$FILES_FILE" ]]; then
[[ $QUIET -ne 1 ]] && einfo "Found existing $FILES_FILE"
else
# Be safe and remove any extraneous temporary files
# Don't remove 0_env.rr - The first file in the array
rm -f "${FILES[@]:1}"
clean_trap "$FILES_FILE"
if [[ $SEARCH_DIRS_MASK ]]; then
findMask=($SEARCH_DIRS_MASK)
findMask="${findMask[@]/#/-o -path }"
findMask="( ${findMask#-o } ) -prune -o"
fi
# TODO: Check this -- afaict SEARCH_DIRS isn't an array, so this should just be $SEARCH_DIRS?
find ${SEARCH_DIRS[@]} $findMask -type f \( -perm -u+x -o -perm -g+x -o -perm -o+x -o \
-name '*.so' -o -name '*.so.*' -o -name '*.la' \) -print 2> /dev/null |
sort -u > "$FILES_FILE" ||
die $? "find failed to list binary files (This is a bug.)"
[[ $QUIET -ne 1 ]] && einfo "Generated new $FILES_FILE"
fi
}
parse_ld_so_conf() {
# FIXME: not safe for paths with spaces
local include
for path in $(sed '/^#/d;s/#.*$//' < /etc/ld.so.conf); do
if [[ $include = true ]]; then
for include_path in $(sed '/^#/d;s/#.*$//' /etc/${path} 2>/dev/null); do
echo $include_path
done
include=""
continue
fi
if [[ $path != include ]]; then
echo $path
else
include="true"
continue
fi
done
}
get_ldpath() {
local COMPLETE_LD_LIBRARY_PATH
[[ $SEARCH_BROKEN && $FULL_LD_PATH ]] || return
[[ $QUIET -ne 1 ]] && einfo 'Collecting complete LD_LIBRARY_PATH'
if [[ -r "$LDPATH_FILE" && -s "$LDPATH_FILE" ]]; then
[[ $QUIET -ne 1 ]] && einfo "Found existing $LDPATH_FILE."
else
clean_trap "$LDPATH_FILE"
# Ensure that the "trusted" lib directories are at the start of the path
COMPLETE_LD_LIBRARY_PATH=(
/lib*
/usr/lib*
$(parse_ld_so_conf)
$(sed 's:/[^/]*$::' < "$FILES_FILE" | sort -ru)
)
IFS=':'
COMPLETE_LD_LIBRARY_PATH="${COMPLETE_LD_LIBRARY_PATH[*]}"
IFS="$OIFS"
echo "$COMPLETE_LD_LIBRARY_PATH" > "$LDPATH_FILE"
[[ $QUIET -ne 1 ]] && einfo "Generated new $LDPATH_FILE"
fi
}
main_checks() {
local target_file
local -a files
local i=0
local ldd_output
local ldd_status
local numFiles
local COMPLETE_LD_LIBRARY_PATH
local message
local broken_lib
if [[ $SEARCH_BROKEN && $FULL_LD_PATH ]]; then
[[ -r "$LDPATH_FILE" && -s "$LDPATH_FILE" ]] ||
die 1 "Unable to find $LDPATH_FILE"
COMPLETE_LD_LIBRARY_PATH=$(<"$LDPATH_FILE")
fi
[[ $QUIET -ne 1 ]] && einfo "Checking dynamic linking $WORKING_TEXT"
if [[ -r "$BROKEN_FILE" && -s "$BROKEN_FILE" ]]; then
[[ $QUIET -ne 1 ]] && einfo "Found existing $BROKEN_FILE."
else
clean_trap "$BROKEN_FILE" "$ERRORS_FILE"
files=($(<"$FILES_FILE"))
numFiles="${#files[@]}"
for target_file in "${files[@]}"; do
if [[ $target_file != *.la ]]; then
# Note: double checking seems to be faster than single with complete path
# (special add ons are rare).
ldd_output=$(ldd -d -r "$target_file" 2>> "$ERRORS_FILE" | sort -u)
ldd_status=$? # TODO: Check this for problems with sort
# HACK: if LD_LIBRARY_MASK is null or undefined grep -vF doesn't work
if grep -vF "${LD_LIBRARY_MASK:=$'\a'}" <<< "$ldd_output" |
grep -q -E "$SONAME_SEARCH"; then
if [[ $SEARCH_BROKEN && $FULL_LD_PATH ]]; then
if LD_LIBRARY_PATH="$COMPLETE_LD_LIBRARY_PATH" ldd "$target_file" 2>/dev/null |
grep -vF "$LD_LIBRARY_MASK" | grep -q -E "$SONAME_SEARCH"; then
# FIXME: I hate duplicating code
# Only build missing direct dependencies
MISSING_LIBS=$(
expr='s/[[:space:]]*\([^[:space:]]*\) => not found/\1/p'
sed -n "$expr" <<< "$ldd_output"
)
REQUIRED_LIBS=$(
expr='s/^[[:space:]]*NEEDED[[:space:]]*\([^[:space:]]*\).*/\1/p';
objdump -x "$target_file" | grep NEEDED | sed "$expr" | sort -u
)
MISSING_LIBS=$(grep -F "$REQUIRED_LIBS" <<< "$MISSING_LIBS")
if [[ $MISSING_LIBS ]]; then
echo "obj $target_file" >> "$BROKEN_FILE"
echo_v " broken $target_file (requires $MISSING_LIBS)"
fi
fi
else
# FIXME: I hate duplicating code
# Only rebuild for direct dependencies
MISSING_LIBS=$(
expr="s/^[[:space:]]*\([^[:space:]]*\).*$/\1/p"
sort -u <<< "$ldd_output" | grep -E "$SONAME" | sed -n "$expr"
)
REQUIRED_LIBS=$(
expr='s/^[[:space:]]*NEEDED[[:space:]]*\([^[:space:]]*\).*/\1/p';
objdump -x "$target_file" | grep NEEDED | sed "$expr" | sort -u
)
MISSING_LIBS=$(grep -F "$REQUIRED_LIBS" <<< "$MISSING_LIBS")
if [[ $MISSING_LIBS ]]; then
echo "obj $target_file" >> "$BROKEN_FILE"
if [[ $SEARCH_BROKEN ]]; then
echo_v " broken $target_file (requires $MISSING_LIBS)"
else
echo_v " found $target_file"
fi
fi
fi
fi
# Search for symbols not defined
if [[ $SEARCH_BROKEN ]]; then
# Look for symbol not defined errors
if grep -vF "${LD_LIBRARY_MASK:=$'\a'}" <<< "$ldd_output" |
grep -q -E 'symbol .* not defined'; then
message=$(gawk '/symbol .* not defined/ {NF--; print $0}' <<< "$ldd_output")
broken_lib=$(gawk '/symbol .* not defined/ {print $NF}' <<< "$ldd_output" | \
sed 's/[()]//g')
echo "obj $broken_lib" >> "$BROKEN_FILE"
echo_v " broken $broken_lib ($message)"
fi
fi
# Look for undefined symbol error if not a .so file
if [[ $SEARCH_BROKEN && $SEARCH_SYMBOLS ]]; then
case $target_file in
*.so|*.so.*)
;;
*)
if grep -vF "${LD_LIBRARY_MASK:=$'\a'}" <<< "$ldd_output" |
grep -q -F 'undefined symbol:'; then
message=$(gawk '/undefined symbol:/ {print $3}' <<< "$ldd_output")
message="${message//$'\n'/ }"
echo "obj $target_file" >> "$BROKEN_FILE"
echo_v " broken $target_file (undefined symbols(s): $message)"
fi
;;
esac
fi
elif [[ $SEARCH_BROKEN ]]; then
# Look for broken .la files
la_SEARCH_DIRS="$(parse_ld_so_conf)"
la_search_dir=""
la_broken=""
la_lib=""
for depend in $(
gawk -F"[=']" '/^dependency_libs/{
print $3
}' "$target_file"
); do
if [[ $depend = /* && ! -e $depend ]]; then
echo "obj $target_file" >> "$BROKEN_FILE"
echo_v " broken $target_file (requires $depend)"
elif [[ $depend = -[LR]/* ]]; then
if ! [[ $'\n'${la_SEARCH_DIRS}$'\n' == *$'\n'${depend#-?}$'\n'* ]]; then
la_SEARCH_DIRS+=$'\n'"${depend#-?}"
fi
elif [[ $depend = "-l"* ]]; then
la_lib="lib${depend#-l}"
la_broken="yes"
IFS=$'\n'
for la_search_dir in $la_SEARCH_DIRS; do
if [[ -e ${la_search_dir}/${la_lib}.so || -e ${la_search_dir}/${la_lib}.a ]]; then
la_broken="no"
fi
done
IFS="$OIFS"
if [[ $la_broken = yes ]]; then
echo "obj $target_file" >> "$BROKEN_FILE"
echo_v " broken $target_file (requires $depend)"
fi
fi
done
unset la_SEARCH_DIRS la_search_dir la_broken la_lib
fi
[[ $VERBOSE ]] &&
progress $((++i)) $numFiles $target_file ||
progress $((++i)) $numFiles
done
if [[ $SEARCH_BROKEN && -f $ERRORS_FILE ]]; then
# Look for missing version
while read target_file; do
echo "obj $target_file" >> "$BROKEN_FILE"
echo_v " broken $target_file (no version information available)"
done < <(
# Regexify LD_LIBRARY_MASK. Exclude it from the search.
LD_LIBRARY_MASK="${LD_LIBRARY_MASK//$'\n'/|}"
gawk -v ldmask="(${LD_LIBRARY_MASK//./\\\\.})" '
/no version information available/ && $0 !~ ldmask {
gsub(/[()]/, "", $NF)
if (seen[$NF]++) next
print $NF
}' "$ERRORS_FILE"
)
fi
[[ -r "$BROKEN_FILE" && -s "$BROKEN_FILE" ]] || clean_exit
sort -u "$BROKEN_FILE" -o "$BROKEN_FILE"
[[ $QUIET -ne 1 ]] && einfo "Generated new $BROKEN_FILE"
fi
}
get_packages() {
local target_file
local EXACT_PKG
local PKG
local obj
einfo 'Assigning files to packages'
if [[ -r "$RAW_FILE" && -s "$RAW_FILE" ]]; then
einfo "Found existing $RAW_FILE"
else
clean_trap "$RAW_FILE" "$OWNERS_FILE"
while read obj target_file; do
EXACT_PKG=$(get_file_owner $target_file)
if [[ $EXACT_PKG ]]; then
# Strip version information
PKG="${EXACT_PKG%%-r[[:digit:]]*}"
PKG="${PKG%-*}"
echo "$EXACT_PKG" >> "$RAW_FILE"
echo "$target_file -> $EXACT_PKG" >> "$OWNERS_FILE"
echo_v " $target_file -> $PKG"
else
ewarn " !!! $target_file not owned by any package is broken !!!"
echo "$target_file -> (none)" >> "$OWNERS_FILE"
echo_v " $target_file -> (none)"
fi
done < "$BROKEN_FILE"
[[ $QUIET -ne 1 ]] && einfo "Generated new $RAW_FILE and $OWNERS_FILE"
fi
# if we find '(none)' on every line, exit out
if ! grep -qvF '(none)' "$OWNERS_FILE"; then
ewarn "Found some broken files, but none of them were associated with known packages"
ewarn "Unable to proceed with automatic repairs."
ewarn "The broken files are listed in $OWNERS_FILE"
if [[ $VERBOSE ]]; then
ewarn "The broken files are:"
while read filename junk; do
ewarn " $filename"
done < "$OWNERS_FILE"
fi
exit 0 # FIXME: Should we exit 1 here?
fi
}
clean_packages() {
[[ $QUIET -ne 1 ]] && einfo 'Cleaning list of packages to rebuild'
if [[ -r "$PKGS_FILE" && -s "$PKGS_FILE" ]]; then
[[ $QUIET -ne 1 ]] && einfo "Found existing $PKGS_FILE"
else
sort -u "$RAW_FILE" > "$PKGS_FILE"
[[ $QUIET -ne 1 ]] && einfo "Generated new $PKGS_FILE"
fi
}
assign_packages_to_ebuilds() {
local EXACT_PKG
local PKG
local SLOT
einfo 'Assigning packages to ebuilds'
if [[ -r "$EBUILDS_FILE" && -s "$EBUILDS_FILE" ]]; then
einfo "Found existing $EBUILDS_FILE"
elif [[ -r "$PKGS_FILE" && -s "$PKGS_FILE" ]]; then
clean_trap "$EBUILDS_FILE"
while read EXACT_PKG; do
# Get the slot
PKG="${EXACT_PKG%%-r[[:digit:]]*}"
PKG="${PKG%-*}"
SLOT=$(</var/db/pkg/$EXACT_PKG/SLOT)
echo "$PKG:$SLOT"
done < "$PKGS_FILE" > "$EBUILDS_FILE"
[[ $QUIET -ne 1 ]] && einfo "Generated new $EBUILDS_FILE"
else
einfo 'Nothing to rebuild.'
die 1 '(The program should have already quit, so this is a minor bug.)'
fi
}
get_exact_ebuilds() {
einfo 'Assigning files to ebuilds'
if [[ -r $EBUILDS_FILE && -s $EBUILDS_FILE ]]; then
einfo "Found existing $EBUILDS_FILE"
elif [[ -r $BROKEN_FILE && -s $BROKEN_FILE ]]; then
rebuildList=" $(<"$BROKEN_FILE") "
rebuildList=(${rebuildList//[[:space:]]obj[[:space:]]/ })
get_file_owner "${rebuildList[@]}" | sed 's/^/=/' > "$EBUILDS_FILE"
[[ $QUIET -ne 1 ]] && einfo "Generated new $EBUILDS_FILE"
else
einfo 'Nothing to rebuild.'
die 1 '(The program should have already quit, so this is a minor bug.)'
fi
}
list_skipped_packages() {
ewarn
ewarn 'Portage could not find any version of the following packages it could build:'
ewarn "${SKIP_LIST[@]}"
ewarn
ewarn '(Perhaps they are masked, blocked, or removed from portage.)'
ewarn 'Try to emerge them manually.'
ewarn
}
get_build_order() {
local -a OLD_EMERGE_DEFAULT_OPTS=("${EMERGE_DEFAULT_OPTS[@]}")
local RAW_REBUILD_LIST
local REBUILD_GREP
local i
if [[ ! $ORDER_PKGS ]]; then
einfo 'Skipping package ordering'
return
fi
[[ $QUIET -ne 1 ]] && einfo 'Evaluating package order'
if [[ -r "$ORDER_FILE" && -s "$ORDER_FILE" ]]; then
einfo "Found existing $ORDER_FILE"
else
clean_trap "$ORDER_FILE"
RAW_REBUILD_LIST=$(<"$EBUILDS_FILE")
if [[ $RAW_REBUILD_LIST ]]; then
EMERGE_DEFAULT_OPTS=(--nospinner --pretend --oneshot --quiet)
RAW_REBUILD_LIST=($RAW_REBUILD_LIST) # convert into array
# If PACKAGE_NAMES is defined we're using slots, not versions
if [[ $PACKAGE_NAMES ]]; then
# Eliminate atoms that can't be built
for i in "${!RAW_REBUILD_LIST[@]}"; do
if [[ "${RAW_REBUILD_LIST[i]}" = *[A-Za-z]* ]]; then
portageq best_visible "$PORTAGE_ROOT" "${RAW_REBUILD_LIST[i]}" >/dev/null && continue
SKIP_LIST+=("${RAW_REBUILD_LIST[i]}")
fi
unset RAW_REBUILD_LIST[i]
done
# If RAW_REBUILD_LIST is empty, then we have nothing to build.
if (( ${#RAW_REBUILD_LIST[@]} == 0 )); then
if (( ${#SKIP_LIST[@]} == 0 )); then
ewarn "The list of packages to skip is empty, but there are no"
ewarn "packages listed to rebuild either. (This is a bug.)"
else
list_skipped_packages
fi
die 1 'Warning: Portage cannot rebuild any of the necessary packages.'
fi
fi
RAW_REBUILD_LIST="${RAW_REBUILD_LIST[@]}"
# We no longer determine the package order ourselves. Instead we call emerge
# with --complete-graph=y in the rebuild function.
if false ; then
REBUILD_GREP=$(emerge --nodeps $RAW_REBUILD_LIST | sed 's/\[[^]]*\]//g')
if (( ${PIPESTATUS[0]} == 0 )); then
emerge --deep $RAW_REBUILD_LIST |
sed 's/\[[^]]*\]//g' |
grep -F "$REBUILD_GREP" > "$ORDER_FILE"
fi
# Here we use the PIPESTATUS from the second emerge, the --deep one.
if (( ${PIPESTATUS[0]} != 0 )); then
eerror
eerror 'Warning: Failed to resolve package order.'
eerror 'Will merge in arbitrary order'
eerror
cat <<- EOF
Possible reasons:
- An ebuild is no longer in the portage tree.
- An ebuild is masked, use /etc/portage/packages.keyword
and/or /etc/portage/package.unmask to unmask it
EOF
countdown 5
rm -f "$ORDER_FILE"
fi
else
echo "$RAW_REBUILD_LIST" > "$ORDER_FILE"
fi
EMERGE_DEFAULT_OPTS=("${OLD_EMERGE_DEFAULT_OPTS[@]}")
else
einfo 'Nothing to rebuild.'
die 1 '(The program should have already quit, so this is a minor bug.)'
fi
fi
[[ -r "$ORDER_FILE" && -s "$ORDER_FILE" && $QUIET -ne 1 ]] && einfo "Generated new $ORDER_FILE"
}
show_unowned_files() {
if grep -qF '(none)' "$OWNERS_FILE"; then
ewarn "Found some broken files that weren't associated with known packages"
ewarn "The broken files are:"
while read filename junk; do
[[ $junk = *none* ]] && ewarn " $filename"
done < "$OWNERS_FILE" | gawk '!s[$0]++' # (omit dupes)
fi
}
# Get multiple portage variables at once to speedup revdep-rebuild.
portage_settings() {
local ORIG_SEARCH_DIRS="$SEARCH_DIRS"
local ORIG_SEARCH_DIRS_MASK="$SEARCH_DIRS_MASK"
local ORIG_LD_LIBRARY_MASK="$LD_LIBRARY_MASK"
unset SEARCH_DIRS
unset SEARCH_DIRS_MASK
unset LD_LIBRARY_MASK
eval $(portageq envvar -v PORTAGE_ROOT PORTAGE_NICENESS EMERGE_DEFAULT_OPTS NOCOLOR SEARCH_DIRS SEARCH_DIRS_MASK LD_LIBRARY_MASK REVDEP_REBUILD_DEFAULT_OPTS)
export NOCOLOR
# Convert quoted paths to array.
eval "EMERGE_DEFAULT_OPTS=(${EMERGE_DEFAULT_OPTS})"
eval "REVDEP_REBUILD_DEFAULT_OPTS=(${REVDEP_REBUILD_DEFAULT_OPTS})"
SEARCH_DIRS="$ORIG_SEARCH_DIRS $SEARCH_DIRS"
SEARCH_DIRS_MASK="$ORIG_SEARCH_DIRS_MASK $SEARCH_DIRS_MASK"
LD_LIBRARY_MASK="$ORIG_LD_LIBRARY_MASK $LD_LIBRARY_MASK"
# Replace EMERGE_DEFAULT_OPTS with REVDEP_REBUILD_DEFAULT_OPTS (if it exists)
if [[ -n ${REVDEP_REBUILD_DEFAULT_OPTS} ]]; then
EMERGE_DEFAULT_OPTS=("${REVDEP_REBUILD_DEFAULT_OPTS[@]}")
fi
}
##
# Setup portage and the search paths
setup_portage() {
# Obey PORTAGE_NICENESS (which is incremental to the current nice value)
if [[ $PORTAGE_NICENESS ]]; then
current_niceness=$(nice)
let PORTAGE_NICENESS=${current_niceness}+${PORTAGE_NICENESS}
renice $PORTAGE_NICENESS $$ > /dev/null
# Since we have already set our nice value for our processes,
# reset PORTAGE_NICENESS to zero to avoid having emerge renice again.
export PORTAGE_NICENESS="0"
fi
PORTAGE_ROOT="${PORTAGE_ROOT:-/}"
}
##
# Setup the paths to search (and filter the ones to avoid)
setup_search_paths_and_masks() {
local configfile sdir mdir skip_me filter_SEARCH_DIRS
[[ $QUIET -ne 1 ]] && einfo "Configuring search environment for $APP_NAME"
# Update the incremental variables using /etc/profile.env, /etc/ld.so.conf,
# portage, and the environment
# Read the incremental variables from environment and portage
# Until such time as portage supports these variables as incrementals
# The value will be what is in /etc/make.conf
# SEARCH_DIRS+=" "$(unset SEARCH_DIRS; portageq envvar SEARCH_DIRS)
# SEARCH_DIRS_MASK+=" "$(unset SEARCH_DIRS_MASK; portageq envvar SEARCH_DIRS_MASK)
# LD_LIBRARY_MASK+=" "$(unset LD_LIBRARY_MASK; portageq envvar LD_LIBRARY_MASK)
# Add the defaults
if [[ -d /etc/revdep-rebuild ]]; then
for configfile in /etc/revdep-rebuild/*; do
SEARCH_DIRS+=" "$(. $configfile; echo $SEARCH_DIRS)
SEARCH_DIRS_MASK+=" "$(. $configfile; echo $SEARCH_DIRS_MASK)
LD_LIBRARY_MASK+=" "$(. $configfile; echo $LD_LIBRARY_MASK)
done
else
SEARCH_DIRS+=" /bin /sbin /usr/bin /usr/sbin /lib* /usr/lib*"
SEARCH_DIRS_MASK+=" /opt/OpenOffice /usr/lib/openoffice"
LD_LIBRARY_MASK+=" libodbcinst.so libodbc.so libjava.so libjvm.so"
fi
# Get the ROOTPATH and PATH from /etc/profile.env
if [[ -r "/etc/profile.env" && -s "/etc/profile.env" ]]; then
SEARCH_DIRS+=" "$(. /etc/profile.env; /usr/bin/tr ':' ' ' <<< "$ROOTPATH $PATH")
fi
# Get the directories from /etc/ld.so.conf
if [[ -r /etc/ld.so.conf && -s /etc/ld.so.conf ]]; then
SEARCH_DIRS+=" "$(parse_ld_so_conf)
fi
# Set the final variables
SEARCH_DIRS=$(clean_var <<< "$SEARCH_DIRS")
SEARCH_DIRS_MASK=$(clean_var <<< "$SEARCH_DIRS_MASK")
LD_LIBRARY_MASK=$(clean_var <<< "$LD_LIBRARY_MASK")
# Filter masked paths from SEARCH_DIRS
for sdir in ${SEARCH_DIRS} ; do
skip_me=
for mdir in ${SEARCH_DIRS_MASK}; do
[[ ${sdir} == ${mdir}/* ]] && skip_me=1 && break
done
[[ -n ${skip_me} ]] || filter_SEARCH_DIRS+=" ${sdir}"
done
SEARCH_DIRS=$(clean_var <<< "${filter_SEARCH_DIRS}")
[[ $SEARCH_DIRS ]] || die 1 "No search defined -- this is a bug."
}
##
# Rebuild packages owning broken binaries
rebuild() {
if [[ -r $ORDER_FILE && -s $ORDER_FILE ]]; then
# The rebuild list contains category/package:slot atoms.
# Do not prepend with an '=' sign.
# REBUILD_LIST=( $(<"$ORDER_FILE") )
# REBUILD_LIST="${REBUILD_LIST[@]/#/=}"
REBUILD_LIST=$(<"$ORDER_FILE")
else
REBUILD_LIST=$(sort -u "$EBUILDS_FILE")
fi
trap "kill 0" SIGHUP SIGINT SIGQUIT SIGABRT SIGTERM
[[ $QUIET -ne 1 ]] && einfo 'All prepared. Starting rebuild'
echo "emerge --complete-graph=y --oneshot ${EMERGE_DEFAULT_OPTS[@]} ${EMERGE_OPTIONS[@]} $REBUILD_LIST"
is_real_merge && countdown 10
# Link file descriptor #6 with stdin so --ask will work
exec 6<&0
# Run in background to correctly handle Ctrl-C
{
emerge --complete-graph=y --oneshot "${EMERGE_DEFAULT_OPTS[@]}" ${EMERGE_OPTIONS[@]} $REBUILD_LIST <&6
echo $? > "$STATUS_FILE"
} &
wait
# Now restore stdin from fd #6, where it had been saved, and close fd #6 ( 6<&- ) to free it for other processes to use.
exec 0<&6 6<&-
}
##
# Finish up
cleanup() {
EMERGE_STATUS=$(<"$STATUS_FILE")
if is_real_merge; then
if [[ (( $EMERGE_STATUS != 0 )) ]]; then
ewarn
ewarn "$APP_NAME failed to emerge all packages."
ewarn 'you have the following choices:'
einfo "- If emerge failed during the build, fix the problems and re-run $APP_NAME."
einfo '- Use /etc/portage/package.keywords to unmask a newer version of the package.'
einfo " (and remove $ORDER_FILE to be evaluated again)"
einfo '- Modify the above emerge command and run it manually.'
einfo '- Compile or unmerge unsatisfied packages manually,'
einfo ' remove temporary files, and try again.'
einfo ' (you can edit package/ebuild list first)'
einfo
einfo 'To remove temporary files, please run:'
einfo "rm ${WORKING_DIR}/*.rr"
show_unowned_files
exit $EMERGE_STATUS
else
trap_cmd() {
eerror "terminated. Please remove the temporary files manually:"
eerror "rm ${WORKING_DIR}/*.rr"
exit 1
}
[[ "${SKIP_LIST[@]}" != "" ]] && list_skipped_packages
trap trap_cmd SIGHUP SIGINT SIGQUIT SIGABRT SIGTERM
einfo 'Build finished correctly. Removing temporary files...'
einfo
einfo 'You can re-run revdep-rebuild to verify that all libraries and binaries'
einfo 'are fixed. Possible reasons for remaining inconsistencies include:'
einfo ' orphaned files'
einfo ' deep dependencies'
einfo " packages installed outside of portage's control"
einfo ' specially-evaluated libraries'
if [[ -r "$OWNERS_FILE" && -s "$OWNERS_FILE" ]]; then
show_unowned_files
fi
[[ $KEEP_TEMP ]] || rm -f "${FILES[@]}"
fi
else
einfo 'Now you can remove -p (or --pretend) from arguments and re-run revdep-rebuild.'
fi
}
main "$@"
|
zmedico/gentoolkit
|
bin/revdep-rebuild.sh
|
Shell
|
gpl-2.0
| 42,169 |
#!/bin/bash
DEST="../lib/";
CLONEDIR="/tmp/jquery.webfonts";
HERE=`pwd`;
UPSTREAM="https://github.com/wikimedia/jquery.webfonts.git";
echo -e "Getting latest jquery.webfonts from $UPSTREAM\n";
if cd $CLONEDIR; then git pull; else git clone $UPSTREAM $CLONEDIR; fi
cd $HERE;
cp -rf $CLONEDIR/src/* $DEST
|
Niharika29/UniversalLanguageSelector
|
scripts/update-jquery-webfonts.sh
|
Shell
|
gpl-2.0
| 303 |
#
# project-local sharness code for Flux
#
#
# Extra functions for Flux testsuite
#
run_timeout() {
perl -e 'alarm shift @ARGV; exec @ARGV' "$@"
}
#
# Echo on stdout a reasonable size for a large test session,
# controllable test-wide via env vars FLUX_TEST_SIZE_MIN and
# FLUX_TEST_SIZE_MAX.
#
test_size_large() {
min=${FLUX_TEST_SIZE_MIN:-4}
max=${FLUX_TEST_SIZE_MAX:-17}
size=$(($(nproc)+1))
test ${size} -lt ${min} && size=$min
test ${size} -gt ${max} && size=$max
echo ${size}
}
#
# Reinvoke a test file under a flux comms instance
#
# Usage: test_under_flux <size>
#
test_under_flux() {
size=${1:-1}
personality=${2:-full}
log_file="$TEST_NAME.broker.log"
if test -n "$TEST_UNDER_FLUX_ACTIVE" ; then
cleanup rm "${SHARNESS_TEST_DIRECTORY:-..}/$log_file"
return
fi
quiet="-o -q,-Slog-filename=${log_file},-Slog-forward-level=7"
if test "$verbose" = "t" -o -n "$FLUX_TESTS_DEBUG" ; then
flags="${flags} --verbose"
quiet=""
fi
if test "$debug" = "t" -o -n "$FLUX_TESTS_DEBUG" ; then
flags="${flags} --debug"
fi
if test "$chain_lint" = "t"; then
flags="${flags} --chain-lint"
fi
if test -n "$logfile" -o -n "$FLUX_TESTS_LOGFILE" ; then
flags="${flags} --logfile"
fi
if test -n "$SHARNESS_TEST_DIRECTORY"; then
cd $SHARNESS_TEST_DIRECTORY
fi
timeout="-o -Sinit.rc2_timeout=300"
if test -n "$FLUX_TEST_DISABLE_TIMEOUT"; then
timeout=""
fi
if test "$personality" = "minimal"; then
export FLUX_RC1_PATH=""
export FLUX_RC3_PATH=""
elif test "$personality" != "full"; then
export FLUX_RC1_PATH=$FLUX_SOURCE_DIR/t/rc/rc1-$personality
export FLUX_RC3_PATH=$FLUX_SOURCE_DIR/t/rc/rc3-$personality
test -x $FLUX_RC1_PATH || error "cannot execute $FLUX_RC1_PATH"
test -x $FLUX_RC3_PATH || error "cannot execute $FLUX_RC3_PATH"
else
unset FLUX_RC1_PATH
unset FLUX_RC3_PATH
fi
TEST_UNDER_FLUX_ACTIVE=t \
TERM=${ORIGINAL_TERM} \
exec flux start --bootstrap=selfpmi --size=${size} ${quiet} ${timeout} \
"sh $0 ${flags}"
}
mock_bootstrap_instance() {
if test -z "${TEST_UNDER_FLUX_ACTIVE}"; then
unset FLUX_URI
fi
}
#
# Execute arguments $2-N on rank or ranks specified in arg $1
# using the flux-exec utility
#
test_on_rank() {
test "$#" -ge 2 ||
error "test_on_rank expects at least two parameters"
test -n "$TEST_UNDER_FLUX_ACTIVE" ||
error "test_on_rank: test_under_flux not active ($TEST_UNDER_FLUX_ACTIVE)"
ranks=$1; shift;
flux exec --rank=${ranks} "$@"
}
# Export a shorter name for this test
TEST_NAME=$SHARNESS_TEST_NAME
export TEST_NAME
# Test requirements for testsuite
if ! lua -e 'require "posix"'; then
error "failed to find lua posix module in path"
fi
# Some tests in flux don't work with --chain-lint, add a prereq for
# --no-chain-lint:
test "$chain_lint" = "t" || test_set_prereq NO_CHAIN_LINT
# vi: ts=4 sw=4 expandtab
|
lipari/flux-core
|
t/sharness.d/flux-sharness.sh
|
Shell
|
gpl-2.0
| 3,098 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.