code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
echo "Removing old dEQP Ondevice Package..."
adb $* uninstall com.drawelements.deqp
echo "Installing dEQP Ondevice Package..."
adb $* install -r dEQP.apk
|
endlessm/chromium-browser
|
third_party/angle/third_party/VK-GL-CTS/src/targets/android/install.sh
|
Shell
|
bsd-3-clause
| 999 |
. ./init.sh
# Fast position search should fail
mysql $S1 test -e "set global relay_log_purge=0"
mysql $S2 test -e "set global relay_log_purge=0"
mysql $S3 test -e "set global relay_log_purge=0"
mysql $S4 test -e "set global relay_log_purge=0"
mysql $S1 test -e "set global max_relay_log_size=65536"
mysql $S2 test -e "set global max_relay_log_size=65536"
mysql $S3 test -e "set global max_relay_log_size=65536"
mysql $S4 test -e "set global max_relay_log_size=65536"
perl insert.pl $MP $MYSQL_USER $MYSQL_PWD 2 1000 0
mysql $S1 test -e "stop slave io_thread"
perl insert.pl $MP $MYSQL_USER $MYSQL_PWD 1001 100000 1
sleep 1
./kill_m.sh
./run.sh
fail_if_nonzero $0 $?
mysql $S1 test -e "insert into t1 values(10000003, 300, 'bbbaaaaaaa');"
./check $0 100001
|
ZuoGuocai/mha4mysql-manager
|
tests/t/t_large_data_slow.sh
|
Shell
|
gpl-2.0
| 767 |
#!/bin/sh
# Copyright (C) 2005-2019 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
# This file is part of the FreeType project, and may only be used, modified,
# and distributed under the terms of the FreeType project license,
# LICENSE.TXT. By continuing to use, modify, or distribute this file you
# indicate that you have read the license and understand and accept it
# fully.
run ()
{
echo "running \`$*'"
eval $*
if test $? != 0 ; then
echo "error while running \`$*'"
exit 1
fi
}
get_major_version ()
{
echo $1 | sed -e 's/\([0-9][0-9]*\)\..*/\1/g'
}
get_minor_version ()
{
echo $1 | sed -e 's/[0-9][0-9]*\.\([0-9][0-9]*\).*/\1/g'
}
get_patch_version ()
{
# tricky: some version numbers don't include a patch
# separated with a point, but something like 1.4-p6
patch=`echo $1 | sed -e 's/[0-9][0-9]*\.[0-9][0-9]*\.\([0-9][0-9]*\).*/\1/g'`
if test "$patch" = "$1"; then
patch=`echo $1 | sed -e 's/[0-9][0-9]*\.[0-9][0-9]*\-p\([0-9][0-9]*\).*/\1/g'`
# if there isn't any patch number, default to 0
if test "$patch" = "$1"; then
patch=0
fi
fi
echo $patch
}
# $1: version to check
# $2: minimum version
compare_to_minimum_version ()
{
MAJOR1=`get_major_version $1`
MAJOR2=`get_major_version $2`
if test $MAJOR1 -lt $MAJOR2; then
echo 0
return
else
if test $MAJOR1 -gt $MAJOR2; then
echo 1
return
fi
fi
MINOR1=`get_minor_version $1`
MINOR2=`get_minor_version $2`
if test $MINOR1 -lt $MINOR2; then
echo 0
return
else
if test $MINOR1 -gt $MINOR2; then
echo 1
return
fi
fi
PATCH1=`get_patch_version $1`
PATCH2=`get_patch_version $2`
if test $PATCH1 -lt $PATCH2; then
echo 0
else
echo 1
fi
}
# check the version of a given tool against a minimum version number
#
# $1: tool path
# $2: tool usual name (e.g. `aclocal')
# $3: tool variable (e.g. `ACLOCAL')
# $4: minimum version to check against
# $5: option field index used to extract the tool version from the
# output of --version
check_tool_version ()
{
field=$5
# assume the output of "[TOOL] --version" is "toolname (GNU toolname foo bar) version"
if test "$field"x = x; then
field=3 # default to 3 for all GNU autotools, after filtering enclosed string
fi
version=`$1 --version | head -1 | sed 's/([^)]*)/()/g' | cut -d ' ' -f $field`
version_check=`compare_to_minimum_version $version $4`
if test "$version_check"x = 0x; then
echo "ERROR: Your version of the \`$2' tool is too old."
echo " Minimum version $4 is required (yours is version $version)."
echo " Please upgrade or use the $3 variable to point to a more recent one."
echo ""
exit 1
fi
}
if test ! -f ./builds/unix/configure.raw; then
echo "You must be in the same directory as \`autogen.sh'."
echo "Bootstrapping doesn't work if srcdir != builddir."
exit 1
fi
# On MacOS X, the GNU libtool is named `glibtool'.
HOSTOS=`uname`
if test "$LIBTOOLIZE"x != x; then
:
elif test "$HOSTOS"x = Darwinx; then
LIBTOOLIZE=glibtoolize
else
LIBTOOLIZE=libtoolize
fi
if test "$ACLOCAL"x = x; then
ACLOCAL=aclocal
fi
if test "$AUTOCONF"x = x; then
AUTOCONF=autoconf
fi
check_tool_version $ACLOCAL aclocal ACLOCAL 1.10.1
check_tool_version $LIBTOOLIZE libtoolize LIBTOOLIZE 2.2.4
check_tool_version $AUTOCONF autoconf AUTOCONF 2.62
# This sets freetype_major, freetype_minor, and freetype_patch.
eval `sed -nf version.sed include/freetype/freetype.h`
# We set freetype-patch to an empty value if it is zero.
if test "$freetype_patch" = ".0"; then
freetype_patch=
fi
cd builds/unix
echo "generating \`configure.ac'"
sed -e "s;@VERSION@;$freetype_major$freetype_minor$freetype_patch;" \
< configure.raw > configure.ac
run aclocal -I . --force
run $LIBTOOLIZE --force --copy --install
run autoconf --force
chmod +x install-sh
cd ../..
chmod +x ./configure
# EOF
|
FabianHahn/shoveler
|
thirdparty/freetype/autogen.sh
|
Shell
|
mit
| 3,935 |
#!/bin/bash
if [ "$1" = "" ]; then
echo -n 'usage: '
echo -n `/bin/basename $0`
echo ' <your language code>'
exit
fi
/bin/cat en-gb/lang.php | grep LANG | cut -d= -f1 > /tmp/lang.en.tmp
/bin/cat $1/lang.php | grep LANG | cut -d= -f1 > /tmp/lang.$1.tmp
/usr/bin/diff -C 1 /tmp/lang.en.tmp /tmp/lang.$1.tmp
rm -f /tmp/lang.en.tmp /tmp/lang.$1.tmp
|
maent45/PersianFeast
|
simpleinvoices/lang/get-new-strings.sh
|
Shell
|
gpl-2.0
| 364 |
default="-O2 -msse"
sse3="-O2 -msse3"
sse4="-O2 -msse4"
avx='-O2 -mavx'
avx2='-O2 -mavx2'
native='-O2 -march=native'
option=$default
g++ $option -Q --help=target > default.txt
|
hoangt/sirius
|
sirius-suite/scripts/see-options.sh
|
Shell
|
bsd-3-clause
| 177 |
#!/bin/bash
action="$1"
case $action in
add)
rc-update -q add vmware-workstation-server default
rc-service vmware-workstation-server start
;;
remove)
rc-update -q del vmware-workstation-server default
rc-service vmware-workstation-server stop
;;
status)
rc-service -q vmware-workstation-server status
;;
*)
exit 1
;;
esac
|
theeternalsw0rd/ports-2012
|
app-emulation/vmware-workstation/files/configure-hostd.sh
|
Shell
|
gpl-2.0
| 372 |
#!/bin/sh
# don't let udev and this script step on eachother's toes
for x in 64-lvm.rules 70-mdadm.rules 99-mount-rules; do
> "/etc/udev/rules.d/$x"
done
rm -f -- /etc/lvm/lvm.conf
udevadm control --reload
set -e
# save a partition at the beginning for future flagging purposes
sfdisk /dev/sda <<EOF
,1M
,
EOF
udevadm settle
mkfs.ext3 -L dracut /dev/sda2
mkdir -p /root
mount /dev/sda2 /root
cp -a -t /root /source/*
mkdir -p /root/run
umount /root
echo "dracut-root-block-created" >/dev/sda1
poweroff -f
|
matlinuxer2/dracut
|
test/TEST-02-SYSTEMD/create-root.sh
|
Shell
|
gpl-2.0
| 511 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
function detect-master () {
KUBE_MASTER_IP=$MASTER_IP
echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2
}
# Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[]
function detect-nodes {
echo "Nodes already detected" 1>&2
KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}")
}
# Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so
# that our Vagrantfile doesn't error out.
function verify-prereqs {
for x in vagrant; do
if ! which "$x" >/dev/null; then
echo "Can't find $x in PATH, please fix and retry."
exit 1
fi
done
local vagrant_plugins=$(vagrant plugin list | sed '-es% .*$%%' '-es% *% %g' | tr ' ' $'\n')
local providers=(
# Format is:
# provider_ctl_executable vagrant_provider_name vagrant_provider_plugin_re
# either provider_ctl_executable or vagrant_provider_plugin_re can
# be blank (i.e., '') if none is needed by Vagrant (see, e.g.,
# virtualbox entry)
'' vmware_fusion vagrant-vmware-fusion
'' vmware_workstation vagrant-vmware-workstation
prlctl parallels vagrant-parallels
VBoxManage virtualbox ''
virsh libvirt vagrant-libvirt
)
local provider_found=''
local provider_bin
local provider_name
local provider_plugin_re
while [ "${#providers[@]}" -gt 0 ]; do
provider_bin=${providers[0]}
provider_name=${providers[1]}
provider_plugin_re=${providers[2]}
providers=("${providers[@]:3}")
# If the provider is explicitly set, look only for that provider
if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ] \
&& [ "${VAGRANT_DEFAULT_PROVIDER}" != "${provider_name}" ]; then
continue
fi
if ([ -z "${provider_bin}" ] \
|| which "${provider_bin}" >/dev/null 2>&1) \
&& ([ -z "${provider_plugin_re}" ] \
|| [ -n "$(echo "${vagrant_plugins}" | grep -E "^${provider_plugin_re}$")" ]); then
provider_found="${provider_name}"
# Stop after finding the first viable provider
break
fi
done
if [ -z "${provider_found}" ]; then
if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ]; then
echo "Can't find the necessary components for the ${VAGRANT_DEFAULT_PROVIDER} vagrant provider."
echo "Possible reasons could be: "
echo -e "\t- vmrun utility is not in your path"
echo -e "\t- Vagrant plugin was not found."
echo -e "\t- VAGRANT_DEFAULT_PROVIDER is set, but not found."
echo "Please fix and retry."
else
echo "Can't find the necessary components for any viable vagrant providers (e.g., virtualbox), please fix and retry."
fi
exit 1
fi
# Set VAGRANT_CWD to KUBE_ROOT so that we find the right Vagrantfile no
# matter what directory the tools are called from.
export VAGRANT_CWD="${KUBE_ROOT}"
export USING_KUBE_SCRIPTS=true
}
# Create a temp dir that'll be deleted at the end of this bash session.
#
# Vars set:
# KUBE_TEMP
function ensure-temp-dir {
if [[ -z ${KUBE_TEMP-} ]]; then
export KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
trap 'rm -rf "${KUBE_TEMP}"' EXIT
fi
}
# Create a set of provision scripts for the master and each of the nodes
function create-provision-scripts {
ensure-temp-dir
(
echo "#! /bin/bash"
echo-kube-env
echo "NODE_IP='${MASTER_IP}'"
echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'"
echo "CONTAINER_NETMASK='${MASTER_CONTAINER_NETMASK}'"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-master.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh"
) > "${KUBE_TEMP}/master-start.sh"
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
(
echo "#! /bin/bash"
echo-kube-env
echo "NODE_NAME=(${NODE_NAMES[$i]})"
echo "NODE_IP='${NODE_IPS[$i]}'"
echo "NODE_ID='$i'"
echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'"
echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-node.sh"
awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-node.sh"
) > "${KUBE_TEMP}/node-start-${i}.sh"
done
}
function echo-kube-env() {
echo "KUBE_ROOT=/vagrant"
echo "INSTANCE_PREFIX='${INSTANCE_PREFIX}'"
echo "MASTER_NAME='${INSTANCE_PREFIX}-master'"
echo "MASTER_IP='${MASTER_IP}'"
echo "NODE_NAMES=(${NODE_NAMES[@]})"
echo "NODE_IPS=(${NODE_IPS[@]})"
echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'"
echo "CLUSTER_IP_RANGE='${CLUSTER_IP_RANGE}'"
echo "MASTER_CONTAINER_SUBNET='${MASTER_CONTAINER_SUBNET}'"
echo "NODE_CONTAINER_NETMASKS='${NODE_CONTAINER_NETMASKS[@]}'"
echo "NODE_CONTAINER_SUBNETS=(${NODE_CONTAINER_SUBNETS[@]})"
echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "MASTER_USER='${MASTER_USER}'"
echo "MASTER_PASSWD='${MASTER_PASSWD}'"
echo "KUBE_USER='${KUBE_USER}'"
echo "KUBE_PASSWORD='${KUBE_PASSWORD}'"
echo "KUBE_BEARER_TOKEN='${KUBE_BEARER_TOKEN}'"
echo "ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING}'"
echo "ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'"
echo "ELASTICSEARCH_LOGGING_REPLICAS='${ELASTICSEARCH_LOGGING_REPLICAS:-1}'"
echo "ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "ENABLE_CLUSTER_UI='${ENABLE_CLUSTER_UI}'"
echo "LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
echo "ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
echo "DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
echo "DNS_DOMAIN='${DNS_DOMAIN:-}'"
echo "DNS_REPLICAS='${DNS_REPLICAS:-}'"
echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'"
echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS:-}'"
echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'"
echo "KUBELET_TOKEN='${KUBELET_TOKEN:-}'"
echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'"
echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'"
echo "ENABLE_CPU_CFS_QUOTA='${ENABLE_CPU_CFS_QUOTA}'"
echo "NETWORK_PROVIDER='${NETWORK_PROVIDER:-}'"
echo "OPENCONTRAIL_TAG='${OPENCONTRAIL_TAG:-}'"
echo "OPENCONTRAIL_KUBERNETES_TAG='${OPENCONTRAIL_KUBERNETES_TAG:-}'"
echo "OPENCONTRAIL_PUBLIC_SUBNET='${OPENCONTRAIL_PUBLIC_SUBNET:-}'"
echo "E2E_STORAGE_TEST_ENVIRONMENT='${E2E_STORAGE_TEST_ENVIRONMENT:-}'"
}
function verify-cluster {
# TODO: How does the user know the difference between "tak[ing] some
# time" and "loop[ing] forever"? Can we give more specific feedback on
# whether "an error" has occurred?
echo "Each machine instance has been created/updated."
echo " Now waiting for the Salt provisioning process to complete on each machine."
echo " This can take some time based on your network, disk, and cpu speed."
echo " It is possible for an error to occur during Salt provision of cluster and this could loop forever."
# verify master has all required daemons
echo "Validating master"
local machine="master"
local -a required_processes=("kube-apiserver" "kube-scheduler" "kube-controller-manager" "kubelet" "docker")
local validated="1"
until [[ "$validated" == "0" ]]; do
validated="0"
for process in "${required_processes[@]}"; do
vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || {
printf "."
validated="1"
sleep 2
}
done
done
# verify each node has all required daemons
local i
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
echo "Validating ${VAGRANT_NODE_NAMES[$i]}"
local machine=${VAGRANT_NODE_NAMES[$i]}
local -a required_processes=("kube-proxy" "kubelet" "docker")
local validated="1"
until [[ "${validated}" == "0" ]]; do
validated="0"
for process in "${required_processes[@]}"; do
vagrant ssh "${machine}" -c "pgrep -f ${process}" >/dev/null 2>&1 || {
printf "."
validated="1"
sleep 2
}
done
done
done
echo
echo "Waiting for each node to be registered with cloud provider"
for (( i=0; i<${#NODE_NAMES[@]}; i++)); do
local validated="0"
until [[ "$validated" == "1" ]]; do
local nodes=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o name --api-version=v1)
validated=$(echo $nodes | grep -c "${NODE_NAMES[i]}") || {
printf "."
sleep 2
validated="0"
}
done
done
# By this time, all kube api calls should work, so no need to loop and retry.
echo "Validating we can run kubectl commands."
vagrant ssh master --command "kubectl get pods" || {
echo "WARNING: kubectl to localhost failed. This could mean localhost is not bound to an IP"
}
(
# ensures KUBECONFIG is set
get-kubeconfig-basicauth
get-kubeconfig-bearertoken
echo
echo "Kubernetes cluster is running."
echo
echo "The master is running at:"
echo
echo " https://${MASTER_IP}"
echo
echo "Administer and visualize its resources using Cockpit:"
echo
echo " https://${MASTER_IP}:9090"
echo
echo "For more information on Cockpit, visit http://cockpit-project.org"
echo
echo "The user name and password to use is located in ${KUBECONFIG}"
echo
)
}
# Instantiate a kubernetes cluster
function kube-up {
load-or-gen-kube-basicauth
load-or-gen-kube-bearertoken
get-tokens
create-provision-scripts
vagrant up --no-parallel
export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt"
export KUBE_KEY="/tmp/$RANDOM-kubecfg.key"
export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt"
export CONTEXT="vagrant"
(
umask 077
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null
vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null
vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null
create-kubeconfig
)
verify-cluster
}
# Delete a kubernetes cluster
function kube-down {
vagrant destroy -f
}
# Update a kubernetes cluster with latest source
function kube-push {
get-kubeconfig-basicauth
get-kubeconfig-bearertoken
create-provision-scripts
vagrant provision
}
# Execute prior to running tests to build a release if required for env
function test-build-release {
# Make a release
"${KUBE_ROOT}/build/release.sh"
}
# Execute prior to running tests to initialize required structure
function test-setup {
"${KUBE_ROOT}/cluster/kube-up.sh"
echo "Vagrant test setup complete" 1>&2
}
# Execute after running tests to perform any required clean-up
function test-teardown {
kube-down
}
# Find the node name based on the IP address
function find-vagrant-name-by-ip {
local ip="$1"
local ip_pattern="${NODE_IP_BASE}(.*)"
# This is subtle. We map 10.245.2.2 -> node-1. We do this by matching a
# regexp and using the capture to construct the name.
[[ $ip =~ $ip_pattern ]] || {
return 1
}
echo "node-$((${BASH_REMATCH[1]} - 1))"
}
# Find the vagrant machine name based on the host name of the node
function find-vagrant-name-by-node-name {
local ip="$1"
if [[ "$ip" == "${INSTANCE_PREFIX}-master" ]]; then
echo "master"
return $?
fi
local ip_pattern="${INSTANCE_PREFIX}-node-(.*)"
[[ $ip =~ $ip_pattern ]] || {
return 1
}
echo "node-${BASH_REMATCH[1]}"
}
# SSH to a node by name or IP ($1) and run a command ($2).
function ssh-to-node {
local node="$1"
local cmd="$2"
local machine
machine=$(find-vagrant-name-by-ip $node) || true
[[ -n ${machine-} ]] || machine=$(find-vagrant-name-by-node-name $node) || true
[[ -n ${machine-} ]] || {
echo "Cannot find machine to ssh to: $1"
return 1
}
vagrant ssh "${machine}" -c "${cmd}"
}
# Restart the kube-proxy on a node ($1)
function restart-kube-proxy {
ssh-to-node "$1" "sudo systemctl restart kube-proxy"
}
# Restart the apiserver
function restart-apiserver {
ssh-to-node "$1" "sudo systemctl restart kube-apiserver"
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
echo "Vagrant doesn't need special preparations for e2e tests" 1>&2
}
function get-tokens() {
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
}
|
hurf/kubernetes
|
cluster/vagrant/util.sh
|
Shell
|
apache-2.0
| 13,233 |
# NPX Plugin
# https://www.npmjs.com/package/npx
# Maintainer: Pooya Parsa <[email protected]>
(( $+commands[npx] )) && {
source <(npx --shell-auto-fallback zsh)
}
|
okubax/dotfiles
|
zsh/oh-my-zsh/plugins/npx/npx.plugin.zsh
|
Shell
|
gpl-3.0
| 161 |
#!/bin/sh
test_description='fetch/push involving alternates'
. ./test-lib.sh
count_objects () {
loose=0 inpack=0
eval "$(
git count-objects -v |
sed -n -e 's/^count: \(.*\)/loose=\1/p' \
-e 's/^in-pack: \(.*\)/inpack=\1/p'
)" &&
echo $(( $loose + $inpack ))
}
test_expect_success setup '
(
git init original &&
cd original &&
i=0 &&
while test $i -le 100
do
echo "$i" >count &&
git add count &&
git commit -m "$i" || exit
i=$(($i + 1))
done
) &&
(
git clone --reference=original "file:///$(pwd)/original" one &&
cd one &&
echo Z >count &&
git add count &&
git commit -m Z &&
count_objects >../one.count
) &&
A=$(pwd)/original/.git/objects &&
git init receiver &&
echo "$A" >receiver/.git/objects/info/alternates &&
git init fetcher &&
echo "$A" >fetcher/.git/objects/info/alternates
'
test_expect_success 'pushing into a repository with the same alternate' '
(
cd one &&
git push ../receiver master:refs/heads/it
) &&
(
cd receiver &&
count_objects >../receiver.count
) &&
test_cmp one.count receiver.count
'
test_expect_success 'fetching from a repository with the same alternate' '
(
cd fetcher &&
git fetch ../one master:refs/heads/it &&
count_objects >../fetcher.count
) &&
test_cmp one.count fetcher.count
'
test_done
|
robertpfeiffer/git
|
t/t5501-fetch-push-alternates.sh
|
Shell
|
gpl-2.0
| 1,311 |
#!/bin/bash
fw_installed grails && return 0
VERSION="2.4.4"
GRAILS_HOME=$IROOT/grails-$VERSION
fw_get -O http://dist.springframework.org.s3.amazonaws.com/release/GRAILS/grails-$VERSION.zip
fw_unzip grails-$VERSION.zip
echo "export GRAILS_HOME=${GRAILS_HOME}" > $IROOT/grails.installed
echo -e "export PATH=\$GRAILS_HOME/bin:\$PATH" >> $IROOT/grails.installed
echo "export GRAILS_AGENT_CACHE_DIR=${IROOT}/.grails/.slcache" >> $IROOT/grails.installed
source $IROOT/grails.installed
|
saturday06/FrameworkBenchmarks
|
toolset/setup/linux/frameworks/grails.sh
|
Shell
|
bsd-3-clause
| 485 |
#!/bin/bash
fw_depends java maven
mvn clean compile assembly:single
cd target
java -server \
-XX:+UseNUMA -XX:+UseParallelGC -XX:+AggressiveOpts -cp \
rapidoid-1.0-jar-with-dependencies.jar \
highlevel.Main profiles=production dbhost="$DBHOST" &
|
mfirry/FrameworkBenchmarks
|
frameworks/Java/rapidoid/setup-default.sh
|
Shell
|
bsd-3-clause
| 256 |
#!/bin/bash
mkdir -p "$PREFIX/bin"
export MACHTYPE=x86_64
export BINDIR=$(pwd)/bin
export L="${LDFLAGS}"
mkdir -p "$BINDIR"
(cd kent/src/lib && make)
(cd kent/src/htslib && make)
(cd kent/src/jkOwnLib && make)
(cd kent/src/hg/lib && make)
(cd kent/src/utils/subChar && make)
cp bin/subChar "$PREFIX/bin"
chmod +x "$PREFIX/bin/subChar"
|
phac-nml/bioconda-recipes
|
recipes/ucsc-subchar/build.sh
|
Shell
|
mit
| 335 |
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
ALL_TESTS="ping_ipv4 ping_ipv6 multipath_test"
NUM_NETIFS=8
source lib.sh
h1_create()
{
vrf_create "vrf-h1"
ip link set dev $h1 master vrf-h1
ip link set dev vrf-h1 up
ip link set dev $h1 up
ip address add 192.0.2.2/24 dev $h1
ip address add 2001:db8:1::2/64 dev $h1
ip route add 198.51.100.0/24 vrf vrf-h1 nexthop via 192.0.2.1
ip route add 2001:db8:2::/64 vrf vrf-h1 nexthop via 2001:db8:1::1
}
h1_destroy()
{
ip route del 2001:db8:2::/64 vrf vrf-h1
ip route del 198.51.100.0/24 vrf vrf-h1
ip address del 2001:db8:1::2/64 dev $h1
ip address del 192.0.2.2/24 dev $h1
ip link set dev $h1 down
vrf_destroy "vrf-h1"
}
h2_create()
{
vrf_create "vrf-h2"
ip link set dev $h2 master vrf-h2
ip link set dev vrf-h2 up
ip link set dev $h2 up
ip address add 198.51.100.2/24 dev $h2
ip address add 2001:db8:2::2/64 dev $h2
ip route add 192.0.2.0/24 vrf vrf-h2 nexthop via 198.51.100.1
ip route add 2001:db8:1::/64 vrf vrf-h2 nexthop via 2001:db8:2::1
}
h2_destroy()
{
ip route del 2001:db8:1::/64 vrf vrf-h2
ip route del 192.0.2.0/24 vrf vrf-h2
ip address del 2001:db8:2::2/64 dev $h2
ip address del 198.51.100.2/24 dev $h2
ip link set dev $h2 down
vrf_destroy "vrf-h2"
}
router1_create()
{
vrf_create "vrf-r1"
ip link set dev $rp11 master vrf-r1
ip link set dev $rp12 master vrf-r1
ip link set dev $rp13 master vrf-r1
ip link set dev vrf-r1 up
ip link set dev $rp11 up
ip link set dev $rp12 up
ip link set dev $rp13 up
ip address add 192.0.2.1/24 dev $rp11
ip address add 2001:db8:1::1/64 dev $rp11
ip address add 169.254.2.12/24 dev $rp12
ip address add fe80:2::12/64 dev $rp12
ip address add 169.254.3.13/24 dev $rp13
ip address add fe80:3::13/64 dev $rp13
}
router1_destroy()
{
ip route del 2001:db8:2::/64 vrf vrf-r1
ip route del 198.51.100.0/24 vrf vrf-r1
ip address del fe80:3::13/64 dev $rp13
ip address del 169.254.3.13/24 dev $rp13
ip address del fe80:2::12/64 dev $rp12
ip address del 169.254.2.12/24 dev $rp12
ip address del 2001:db8:1::1/64 dev $rp11
ip address del 192.0.2.1/24 dev $rp11
ip nexthop del id 103
ip nexthop del id 101
ip nexthop del id 102
ip nexthop del id 106
ip nexthop del id 104
ip nexthop del id 105
ip link set dev $rp13 down
ip link set dev $rp12 down
ip link set dev $rp11 down
vrf_destroy "vrf-r1"
}
router2_create()
{
vrf_create "vrf-r2"
ip link set dev $rp21 master vrf-r2
ip link set dev $rp22 master vrf-r2
ip link set dev $rp23 master vrf-r2
ip link set dev vrf-r2 up
ip link set dev $rp21 up
ip link set dev $rp22 up
ip link set dev $rp23 up
ip address add 198.51.100.1/24 dev $rp21
ip address add 2001:db8:2::1/64 dev $rp21
ip address add 169.254.2.22/24 dev $rp22
ip address add fe80:2::22/64 dev $rp22
ip address add 169.254.3.23/24 dev $rp23
ip address add fe80:3::23/64 dev $rp23
}
router2_destroy()
{
ip route del 2001:db8:1::/64 vrf vrf-r2
ip route del 192.0.2.0/24 vrf vrf-r2
ip address del fe80:3::23/64 dev $rp23
ip address del 169.254.3.23/24 dev $rp23
ip address del fe80:2::22/64 dev $rp22
ip address del 169.254.2.22/24 dev $rp22
ip address del 2001:db8:2::1/64 dev $rp21
ip address del 198.51.100.1/24 dev $rp21
ip nexthop del id 201
ip nexthop del id 202
ip nexthop del id 204
ip nexthop del id 205
ip link set dev $rp23 down
ip link set dev $rp22 down
ip link set dev $rp21 down
vrf_destroy "vrf-r2"
}
routing_nh_obj()
{
ip nexthop add id 101 via 169.254.2.22 dev $rp12
ip nexthop add id 102 via 169.254.3.23 dev $rp13
ip nexthop add id 103 group 101/102
ip route add 198.51.100.0/24 vrf vrf-r1 nhid 103
ip nexthop add id 104 via fe80:2::22 dev $rp12
ip nexthop add id 105 via fe80:3::23 dev $rp13
ip nexthop add id 106 group 104/105
ip route add 2001:db8:2::/64 vrf vrf-r1 nhid 106
ip nexthop add id 201 via 169.254.2.12 dev $rp22
ip nexthop add id 202 via 169.254.3.13 dev $rp23
ip nexthop add id 203 group 201/202
ip route add 192.0.2.0/24 vrf vrf-r2 nhid 203
ip nexthop add id 204 via fe80:2::12 dev $rp22
ip nexthop add id 205 via fe80:3::13 dev $rp23
ip nexthop add id 206 group 204/205
ip route add 2001:db8:1::/64 vrf vrf-r2 nhid 206
}
multipath4_test()
{
local desc="$1"
local weight_rp12=$2
local weight_rp13=$3
local t0_rp12 t0_rp13 t1_rp12 t1_rp13
local packets_rp12 packets_rp13
# Transmit multiple flows from h1 to h2 and make sure they are
# distributed between both multipath links (rp12 and rp13)
# according to the configured weights.
sysctl_set net.ipv4.fib_multipath_hash_policy 1
ip nexthop replace id 103 group 101,$weight_rp12/102,$weight_rp13
t0_rp12=$(link_stats_tx_packets_get $rp12)
t0_rp13=$(link_stats_tx_packets_get $rp13)
ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
-d 1msec -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
let "packets_rp12 = $t1_rp12 - $t0_rp12"
let "packets_rp13 = $t1_rp13 - $t0_rp13"
multipath_eval "$desc" $weight_rp12 $weight_rp13 $packets_rp12 $packets_rp13
# Restore settings.
ip nexthop replace id 103 group 101/102
sysctl_restore net.ipv4.fib_multipath_hash_policy
}
multipath6_l4_test()
{
local desc="$1"
local weight_rp12=$2
local weight_rp13=$3
local t0_rp12 t0_rp13 t1_rp12 t1_rp13
local packets_rp12 packets_rp13
# Transmit multiple flows from h1 to h2 and make sure they are
# distributed between both multipath links (rp12 and rp13)
# according to the configured weights.
sysctl_set net.ipv6.fib_multipath_hash_policy 1
ip nexthop replace id 106 group 104,$weight_rp12/105,$weight_rp13
t0_rp12=$(link_stats_tx_packets_get $rp12)
t0_rp13=$(link_stats_tx_packets_get $rp13)
$MZ $h1 -6 -q -p 64 -A 2001:db8:1::2 -B 2001:db8:2::2 \
-d 1msec -t udp "sp=1024,dp=0-32768"
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
let "packets_rp12 = $t1_rp12 - $t0_rp12"
let "packets_rp13 = $t1_rp13 - $t0_rp13"
multipath_eval "$desc" $weight_rp12 $weight_rp13 $packets_rp12 $packets_rp13
ip nexthop replace id 106 group 104/105
sysctl_restore net.ipv6.fib_multipath_hash_policy
}
multipath6_test()
{
local desc="$1"
local weight_rp12=$2
local weight_rp13=$3
local t0_rp12 t0_rp13 t1_rp12 t1_rp13
local packets_rp12 packets_rp13
ip nexthop replace id 106 group 104,$weight_rp12/105,$weight_rp13
t0_rp12=$(link_stats_tx_packets_get $rp12)
t0_rp13=$(link_stats_tx_packets_get $rp13)
# Generate 16384 echo requests, each with a random flow label.
for _ in $(seq 1 16384); do
ip vrf exec vrf-h1 $PING6 2001:db8:2::2 -F 0 -c 1 -q >/dev/null 2>&1
done
t1_rp12=$(link_stats_tx_packets_get $rp12)
t1_rp13=$(link_stats_tx_packets_get $rp13)
let "packets_rp12 = $t1_rp12 - $t0_rp12"
let "packets_rp13 = $t1_rp13 - $t0_rp13"
multipath_eval "$desc" $weight_rp12 $weight_rp13 $packets_rp12 $packets_rp13
ip nexthop replace id 106 group 104/105
}
multipath_test()
{
log_info "Running IPv4 multipath tests"
multipath4_test "ECMP" 1 1
multipath4_test "Weighted MP 2:1" 2 1
multipath4_test "Weighted MP 11:45" 11 45
log_info "Running IPv6 multipath tests"
multipath6_test "ECMP" 1 1
multipath6_test "Weighted MP 2:1" 2 1
multipath6_test "Weighted MP 11:45" 11 45
log_info "Running IPv6 L4 hash multipath tests"
multipath6_l4_test "ECMP" 1 1
multipath6_l4_test "Weighted MP 2:1" 2 1
multipath6_l4_test "Weighted MP 11:45" 11 45
}
setup_prepare()
{
h1=${NETIFS[p1]}
rp11=${NETIFS[p2]}
rp12=${NETIFS[p3]}
rp22=${NETIFS[p4]}
rp13=${NETIFS[p5]}
rp23=${NETIFS[p6]}
rp21=${NETIFS[p7]}
h2=${NETIFS[p8]}
vrf_prepare
h1_create
h2_create
router1_create
router2_create
routing_nh_obj
forwarding_enable
}
cleanup()
{
pre_cleanup
forwarding_restore
router2_destroy
router1_destroy
h2_destroy
h1_destroy
vrf_cleanup
}
ping_ipv4()
{
ping_test $h1 198.51.100.2
}
ping_ipv6()
{
ping6_test $h1 2001:db8:2::2
}
ip nexthop ls >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Nexthop objects not supported; skipping tests"
exit 0
fi
trap cleanup EXIT
setup_prepare
setup_wait
routing_nh_obj
tests_run
exit $EXIT_STATUS
|
chenshuo/linux-study
|
tools/testing/selftests/net/forwarding/router_mpath_nh.sh
|
Shell
|
gpl-2.0
| 8,144 |
#!/bin/bash
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#----------------------------------------
function importFunctions() {
local script=$(readlink -f "$0")
local scriptdir=$(dirname "$script")
export TO_DIR=$(dirname "$scriptdir")
export TC_DIR=$(dirname "$TO_DIR")
functions_sh="$TC_DIR/build/functions.sh"
if [[ ! -r $functions_sh ]]; then
echo "error: can't find $functions_sh"
exit 1
fi
. "$functions_sh"
}
# ---------------------------------------
function initBuildArea() {
echo "Initializing the build area."
mkdir -p "$RPMBUILD"/{SPECS,SOURCES,RPMS,SRPMS,BUILD,BUILDROOT} || { echo "Could not create $RPMBUILD: $?"; exit 1; }
local to_dest=$(createSourceDir traffic_ops)
cd "$TO_DIR" || \
{ echo "Could not cd to $TO_DIR: $?"; exit 1; }
rsync -av doc etc install "$to_dest"/ || \
{ echo "Could not copy to $to_dest: $?"; exit 1; }
rsync -av app/{bin,conf,cpanfile,db,lib,public,script,templates} "$to_dest"/app/ || \
{ echo "Could not copy to $to_dest/app: $?"; exit 1; }
tar -czvf "$to_dest".tgz -C "$RPMBUILD"/SOURCES $(basename "$to_dest") || \
{ echo "Could not create tar archive $to_dest.tgz: $?"; exit 1; }
cp "$TO_DIR"/build/*.spec "$RPMBUILD"/SPECS/. || \
{ echo "Could not copy spec files: $?"; exit 1; }
# Create traffic_ops_ort source area
to_ort_dest=$(createSourceDir traffic_ops_ort)
cp -p bin/traffic_ops_ort.pl "$to_ort_dest"
cp -p bin/supermicro_udev_mapper.pl "$to_ort_dest"
tar -czvf "$to_ort_dest".tgz -C "$RPMBUILD"/SOURCES $(basename "$to_ort_dest") || \
{ echo "Could not create tar archive $to_ort_dest: $?"; exit 1; }
echo "The build area has been initialized."
}
# ---------------------------------------
importFunctions
checkEnvironment go
initBuildArea
buildRpm traffic_ops traffic_ops_ort
|
orifinkelman/incubator-trafficcontrol
|
traffic_ops/build/build_rpm.sh
|
Shell
|
apache-2.0
| 2,286 |
#!/bin/sh
#
# Copyright (c) 2006 Junio C Hamano
#
test_description='commit and log output encodings'
. ./test-lib.sh
compare_with () {
git show -s $1 | sed -e '1,/^$/d' -e 's/^ //' >current &&
case "$3" in
'')
test_cmp "$2" current ;;
?*)
iconv -f "$3" -t UTF-8 >current.utf8 <current &&
iconv -f "$3" -t UTF-8 >expect.utf8 <"$2" &&
test_cmp expect.utf8 current.utf8
;;
esac
}
test_expect_success setup '
: >F &&
git add F &&
T=$(git write-tree) &&
C=$(git commit-tree $T <"$TEST_DIRECTORY"/t3900/1-UTF-8.txt) &&
git update-ref HEAD $C &&
git tag C0
'
test_expect_success 'no encoding header for base case' '
E=$(git cat-file commit C0 | sed -ne "s/^encoding //p") &&
test z = "z$E"
'
test_expect_success 'UTF-16 refused because of NULs' '
echo UTF-16 >F &&
test_must_fail git commit -a -F "$TEST_DIRECTORY"/t3900/UTF-16.txt
'
test_expect_success 'UTF-8 invalid characters refused' '
test_when_finished "rm -f \"\$HOME/stderr\" \"\$HOME/invalid\"" &&
echo "UTF-8 characters" >F &&
printf "Commit message\n\nInvalid surrogate:\355\240\200\n" \
>"$HOME/invalid" &&
git commit -a -F "$HOME/invalid" 2>"$HOME"/stderr &&
test_i18ngrep "did not conform" "$HOME"/stderr
'
test_expect_success 'UTF-8 overlong sequences rejected' '
test_when_finished "rm -f \"\$HOME/stderr\" \"\$HOME/invalid\"" &&
rm -f "$HOME/stderr" "$HOME/invalid" &&
echo "UTF-8 overlong" >F &&
printf "\340\202\251ommit message\n\nThis is not a space:\300\240\n" \
>"$HOME/invalid" &&
git commit -a -F "$HOME/invalid" 2>"$HOME"/stderr &&
test_i18ngrep "did not conform" "$HOME"/stderr
'
test_expect_success 'UTF-8 non-characters refused' '
test_when_finished "rm -f \"\$HOME/stderr\" \"\$HOME/invalid\"" &&
echo "UTF-8 non-character 1" >F &&
printf "Commit message\n\nNon-character:\364\217\277\276\n" \
>"$HOME/invalid" &&
git commit -a -F "$HOME/invalid" 2>"$HOME"/stderr &&
test_i18ngrep "did not conform" "$HOME"/stderr
'
test_expect_success 'UTF-8 non-characters refused' '
test_when_finished "rm -f \"\$HOME/stderr\" \"\$HOME/invalid\"" &&
echo "UTF-8 non-character 2." >F &&
printf "Commit message\n\nNon-character:\357\267\220\n" \
>"$HOME/invalid" &&
git commit -a -F "$HOME/invalid" 2>"$HOME"/stderr &&
test_i18ngrep "did not conform" "$HOME"/stderr
'
for H in ISO8859-1 eucJP ISO-2022-JP
do
test_expect_success "$H setup" '
git config i18n.commitencoding $H &&
git checkout -b $H C0 &&
echo $H >F &&
git commit -a -F "$TEST_DIRECTORY"/t3900/$H.txt
'
done
for H in ISO8859-1 eucJP ISO-2022-JP
do
test_expect_success "check encoding header for $H" '
E=$(git cat-file commit '$H' | sed -ne "s/^encoding //p") &&
test "z$E" = "z'$H'"
'
done
test_expect_success 'config to remove customization' '
git config --unset-all i18n.commitencoding &&
if Z=$(git config --get-all i18n.commitencoding)
then
echo Oops, should have failed.
false
else
test z = "z$Z"
fi &&
git config i18n.commitencoding UTF-8
'
test_expect_success 'ISO8859-1 should be shown in UTF-8 now' '
compare_with ISO8859-1 "$TEST_DIRECTORY"/t3900/1-UTF-8.txt
'
for H in eucJP ISO-2022-JP
do
test_expect_success "$H should be shown in UTF-8 now" '
compare_with '$H' "$TEST_DIRECTORY"/t3900/2-UTF-8.txt
'
done
test_expect_success 'config to add customization' '
git config --unset-all i18n.commitencoding &&
if Z=$(git config --get-all i18n.commitencoding)
then
echo Oops, should have failed.
false
else
test z = "z$Z"
fi
'
for H in ISO8859-1 eucJP ISO-2022-JP
do
test_expect_success "$H should be shown in itself now" '
git config i18n.commitencoding '$H' &&
compare_with '$H' "$TEST_DIRECTORY"/t3900/'$H'.txt
'
done
test_expect_success 'config to tweak customization' '
git config i18n.logoutputencoding UTF-8
'
test_expect_success 'ISO8859-1 should be shown in UTF-8 now' '
compare_with ISO8859-1 "$TEST_DIRECTORY"/t3900/1-UTF-8.txt
'
for H in eucJP ISO-2022-JP
do
test_expect_success "$H should be shown in UTF-8 now" '
compare_with '$H' "$TEST_DIRECTORY"/t3900/2-UTF-8.txt
'
done
for J in eucJP ISO-2022-JP
do
if test "$J" = ISO-2022-JP
then
ICONV=$J
else
ICONV=
fi
git config i18n.logoutputencoding $J
for H in eucJP ISO-2022-JP
do
test_expect_success "$H should be shown in $J now" '
compare_with '$H' "$TEST_DIRECTORY"/t3900/'$J'.txt $ICONV
'
done
done
for H in ISO8859-1 eucJP ISO-2022-JP
do
test_expect_success "No conversion with $H" '
compare_with "--encoding=none '$H'" "$TEST_DIRECTORY"/t3900/'$H'.txt
'
done
test_commit_autosquash_flags () {
H=$1
flag=$2
test_expect_success "commit --$flag with $H encoding" '
git config i18n.commitencoding $H &&
git checkout -b $H-$flag C0 &&
echo $H >>F &&
git commit -a -F "$TEST_DIRECTORY"/t3900/$H.txt &&
test_tick &&
echo intermediate stuff >>G &&
git add G &&
git commit -a -m "intermediate commit" &&
test_tick &&
echo $H $flag >>F &&
git commit -a --$flag HEAD~1 &&
E=$(git cat-file commit '$H-$flag' |
sed -ne "s/^encoding //p") &&
test "z$E" = "z$H" &&
git config --unset-all i18n.commitencoding &&
git rebase --autosquash -i HEAD^^^ &&
git log --oneline >actual &&
test_line_count = 3 actual
'
}
test_commit_autosquash_flags eucJP fixup
test_commit_autosquash_flags ISO-2022-JP squash
test_done
|
devzero2000/git-core
|
t/t3900-i18n-commit.sh
|
Shell
|
gpl-2.0
| 5,290 |
#!/bin/sh
#
# Blackbox test for net conf/registry roundtrips.
#
# Copyright (C) 2010 Gregor Beck <[email protected]>
# Copyright (C) 2011 Michael Adam <[email protected]>
if [ $# -lt 3 ]; then
cat <<EOF
Usage: test_net_registry_roundtrip.sh SCRIPTDIR SERVERCONFFILE CONFIGURATION
EOF
exit 1;
fi
SCRIPTDIR="$1"
SERVERCONFFILE="$2"
CONFIGURATION="$3"
NET="$VALGRIND ${NET:-$BINDIR/net} $CONFIGURATION"
if test "x${RPC}" = "xrpc" ; then
NETREG="${NET} -U${USERNAME}%${PASSWORD} -I ${SERVER_IP} rpc registry"
else
NETREG="${NET} registry"
fi
test x"$TEST_FUNCTIONS_SH" != x"INCLUDED" && {
incdir=`dirname $0`/../../../testprogs/blackbox
. $incdir/subunit.sh
}
failed=0
SED_INVALID_PARAMS="{
s/lock directory/;&/g
s/lock dir/;&/g
s/modules dir/;&/g
s/logging/;&/g
s/status/;&/g
s/logdir/;&/g
s/read prediction/;&/g
s/mkprofile/;&/g
s/valid chars/;&/g
s/timesync/;&/g
s/sambaconf/;&/g
s/logtype/;&/g
s/servername/;&/g
s/postscript/;&/g
}"
REGPATH="HKLM\Software\Samba"
conf_roundtrip_step() {
echo "CMD: $*" >>$LOG
$@ 2>>$LOG
RC=$?
echo "RC: $RC" >> $LOG
test "x$RC" = "x0" || {
echo "ERROR: $@ failed (RC=$RC)" | tee -a $LOG
}
return $RC
# echo -n .
}
LOGDIR_PREFIX="conf_roundtrip"
conf_roundtrip()
{
local DIR=$(mktemp -d ${PREFIX}/${LOGDIR_PREFIX}_XXXXXX)
local LOG=$DIR/log
echo conf_roundtrip $1 > $LOG
sed -e "$SED_INVALID_PARAMS" $1 >$DIR/conf_in
conf_roundtrip_step $NET conf drop
test "x$?" = "x0" || {
return 1
}
test -z "$($NET conf list)" 2>>$LOG
if [ "$?" = "1" ]; then
echo "ERROR: conf drop failed" | tee -a $LOG
return 1
fi
conf_roundtrip_step $NET conf import $DIR/conf_in
test "x$?" = "x0" || {
return 1
}
conf_roundtrip_step $NET conf list > $DIR/conf_exp
test "x$?" = "x0" || {
return 1
}
grep "\[global\]" $DIR/conf_exp >/dev/null 2>>$LOG
if [ "$?" = "1" ]; then
echo "ERROR: conf import => conf export failed" | tee -a $LOG
return 1
fi
conf_roundtrip_step $NET -d10 registry export $REGPATH $DIR/conf_exp.reg
test "x$?" = "x0" || {
return 1
}
conf_roundtrip_step $NET conf drop
test "x$?" = "x0" || {
return 1
}
test -z "$($NET conf list)" 2>>$LOG
if [ "$?" = "1" ]; then
echo "ERROR: conf drop failed" | tee -a $LOG
return 1
fi
conf_roundtrip_step $NET registry import $DIR/conf_exp.reg
test "x$?" = "x0" || {
return 1
}
conf_roundtrip_step $NET conf list >$DIR/conf_out
test "x$?" = "x0" || {
return 1
}
diff -q $DIR/conf_out $DIR/conf_exp >> $LOG
if [ "$?" = "1" ]; then
echo "ERROR: registry import => conf export failed" | tee -a $LOG
return 1
fi
conf_roundtrip_step $NET registry export $REGPATH $DIR/conf_out.reg
test "x$?" = "x0" || {
return 1
}
diff -q $DIR/conf_out.reg $DIR/conf_exp.reg >>$LOG
if [ "$?" = "1" ]; then
echo "Error: registry import => registry export failed" | tee -a $LOG
return 1
fi
rm -r $DIR
}
CONF_FILES=${CONF_FILES:-$(find $SRCDIR/ -name '*.conf' | grep -v examples/logon | xargs grep -l "\[global\]")}
# remove old logs:
for OLDDIR in $(find ${PREFIX} -type d -name "${LOGDIR_PREFIX}_*") ; do
echo "removing old directory ${OLDDIR}"
rm -rf ${OLDDIR}
done
for conf_file in $CONF_FILES
do
testit "conf_roundtrip $conf_file" \
conf_roundtrip $conf_file \
|| failed=`expr $failed + 1`
done
testok $0 $failed
|
zarboz/XBMC-PVR-mac
|
tools/darwin/depends/samba/samba-3.6.6/source3/script/tests/test_net_registry_roundtrip.sh
|
Shell
|
gpl-2.0
| 3,485 |
#!/bin/bash -e
. shared.sh
curl \
-H 'X-Broker-API-Version: 2.9' \
-v \
$curlargs \
$endpoint/v2/service_instances/$instanceUUID/last_operation'?operation=provisioning'
|
adelton/origin
|
pkg/template/servicebroker/test-scripts/lastoperation-provision.sh
|
Shell
|
apache-2.0
| 179 |
#!/bin/sh
git init
gbs build -A armv7l --include-all -B ~/GBS-ROOT-NEW
|
WojciechLuczkow/iotivity
|
resource/csdk/connectivity/samples/tizen/gbsbuild.sh
|
Shell
|
apache-2.0
| 75 |
# Copyright (C) 2009-2012 OpenWrt.org
fw__uci_state_add() {
local var="$1"
local item="$2"
local val="$(uci_get_state firewall core $var)"
local e1; for e1 in $item; do
local e2; for e2 in $val; do
[ "$e1" = "$e2" ] && e1=""
done
val="${val:+$val${e1:+ }}$e1"
done
uci_toggle_state firewall core $var "$val"
}
fw__uci_state_del() {
local var="$1"
local item="$2"
local rest=""
local val="$(uci_get_state firewall core $var)"
local e1; for e1 in $val; do
local e2; for e2 in $item; do
[ "$e1" = "$e2" ] && e1=""
done
rest="${rest:+$rest${e1:+ }}$e1"
done
uci_toggle_state firewall core $var "$rest"
}
fw_configure_interface() {
local iface=$1
local action=$2
local ifname=$3
local aliasnet=$4
[ "$action" == "add" ] && {
local status=$(uci_get_state network "$iface" up 0)
[ "$status" == 1 ] || [ -n "$aliasnet" ] || return 0
}
[ -n "$ifname" ] || {
ifname=$(uci_get_state network "$iface" ifname)
ifname="${ifname%%:*}"
[ -z "$ifname" ] && return 0
}
[ "$ifname" == "lo" ] && return 0
fw_callback pre interface
fw__do_rules() {
local action=$1
local zone=$2
local chain=zone_${zone}
local ifname=$3
local subnet=$4
local inet onet mode
fw_get_family_mode mode x $zone i
case "$mode/$subnet" in
# Zone supports v6 only or dual, need v6
G6/*:*|i/*:*)
inet="-s $subnet -d ::/0"
onet="-s ::/0 -d $subnet"
mode=6
;;
# Zone supports v4 only or dual, need v4
G4/*.*.*.*|i/*.*.*.*)
inet="-s $subnet -d 0.0.0.0/0"
onet="-s 0.0.0.0/0 -d $subnet"
mode=4
;;
# Need v6 while zone is v4
*/*:*) fw_log info "zone $zone does not support IPv6 address family, skipping"; return ;;
# Need v4 while zone is v6
*/*.*) fw_log info "zone $zone does not support IPv4 address family, skipping"; return ;;
# Strip prefix
*) mode="${mode#G}" ;;
esac
lock /var/run/firewall-interface.lock
fw $action $mode f ${chain}_ACCEPT ACCEPT $ { -o "$ifname" $onet }
fw $action $mode f ${chain}_ACCEPT ACCEPT $ { -i "$ifname" $inet }
fw $action $mode f ${chain}_DROP DROP $ { -o "$ifname" $onet }
fw $action $mode f ${chain}_DROP DROP $ { -i "$ifname" $inet }
fw $action $mode f ${chain}_REJECT reject $ { -o "$ifname" $onet }
fw $action $mode f ${chain}_REJECT reject $ { -i "$ifname" $inet }
[ "$(uci_get_state firewall core "${zone}_tcpmss")" == 1 ] && \
fw $action $mode m ${chain}_MSSFIX TCPMSS $ \
{ -o "$ifname" -p tcp --tcp-flags SYN,RST SYN --clamp-mss-to-pmtu $onet }
fw $action $mode f input ${chain} $ { -i "$ifname" $inet }
fw $action $mode f forward ${chain}_forward $ { -i "$ifname" $inet }
fw $action $mode n PREROUTING ${chain}_prerouting $ { -i "$ifname" $inet }
fw $action $mode r PREROUTING ${chain}_notrack $ { -i "$ifname" $inet }
fw $action $mode n POSTROUTING ${chain}_nat $ { -o "$ifname" $onet }
lock -u /var/run/firewall-interface.lock
}
local old_zones old_ifname old_subnets
config_get old_zones core "${iface}_zone"
[ -n "$old_zones" ] && {
config_get old_ifname core "${iface}_ifname"
config_get old_subnets core "${iface}_subnets"
local z
for z in $old_zones; do
local n
for n in ${old_subnets:-""}; do
fw_log info "removing $iface ($old_ifname${n:+ alias $n}) from zone $z"
fw__do_rules del $z $old_ifname $n
done
[ -n "$old_subnets" ] || {
fw__uci_state_del "${z}_networks" "$iface"
env -i ACTION=remove ZONE="$z" INTERFACE="$iface" DEVICE="$ifname" /sbin/hotplug-call firewall
}
done
local old_aliases
config_get old_aliases core "${iface}_aliases"
local a
for a in $old_aliases; do
fw_configure_interface "$a" del "$old_ifname"
done
uci_revert_state firewall core "${iface}_zone"
uci_revert_state firewall core "${iface}_ifname"
uci_revert_state firewall core "${iface}_subnets"
uci_revert_state firewall core "${iface}_aliases"
}
[ "$action" == del ] && return
[ -z "$aliasnet" ] && {
local aliases
config_get aliases "$iface" aliases
local a
for a in $aliases; do
local ipaddr netmask ip6addr
config_get ipaddr "$a" ipaddr
config_get netmask "$a" netmask
config_get ip6addr "$a" ip6addr
[ -n "$ipaddr" ] && fw_configure_interface "$a" add "" "$ipaddr${netmask:+/$netmask}"
[ -n "$ip6addr" ] && fw_configure_interface "$a" add "" "$ip6addr"
done
fw_sysctl_interface $ifname
fw_callback post interface
uci_toggle_state firewall core "${iface}_aliases" "$aliases"
} || {
local subnets=
config_get subnets core "${iface}_subnets"
append subnets "$aliasnet"
config_set core "${iface}_subnets" "$subnets"
uci_toggle_state firewall core "${iface}_subnets" "$subnets"
}
local new_zones=
load_zone() {
fw_config_get_zone "$1"
list_contains zone_network "$iface" || return
fw_log info "adding $iface ($ifname${aliasnet:+ alias $aliasnet}) to zone $zone_name"
fw__do_rules add ${zone_name} "$ifname" "$aliasnet"
append new_zones $zone_name
[ -n "$aliasnet" ] || {
fw__uci_state_add "${zone_name}_networks" "${zone_network}"
env -i ACTION=add ZONE="$zone_name" INTERFACE="$iface" DEVICE="$ifname" /sbin/hotplug-call firewall
}
}
config_foreach load_zone zone
uci_toggle_state firewall core "${iface}_zone" "$new_zones"
uci_toggle_state firewall core "${iface}_ifname" "$ifname"
}
fw_sysctl_interface() {
local ifname=$1
{
sysctl -w net.ipv4.conf.${ifname}.accept_redirects=$FW_ACCEPT_REDIRECTS
sysctl -w net.ipv6.conf.${ifname}.accept_redirects=$FW_ACCEPT_REDIRECTS
sysctl -w net.ipv4.conf.${ifname}.accept_source_route=$FW_ACCEPT_SRC_ROUTE
sysctl -w net.ipv6.conf.${ifname}.accept_source_route=$FW_ACCEPT_SRC_ROUTE
} >/dev/null 2>/dev/null
}
|
kyak/openwrt-xburst
|
package/network/config/firewall/files/lib/core_interface.sh
|
Shell
|
gpl-2.0
| 5,716 |
#!/usr/bin/env bash
PHP_VERSION=$(php -r 'echo phpversion();' | cut -d '-' -f 1)
IMAGE_FAMILY=$(docker-image-info family)
|
webdevops/Dockerfile
|
provisioning/php/general/provision/bootstrap.d/10-php-init.sh
|
Shell
|
mit
| 123 |
#!/usr/bin/env bash
wait_on_pids()
{
# Wait on the last processes
for job in $1
do
wait $job
if [ "$?" -ne 0 ]
then
TestsFailed=$(($TestsFailed+1))
fi
done
}
usage()
{
echo "Runs .NET CoreFX tests on FreeBSD, Linux, NetBSD or OSX"
echo "usage: run-test [options]"
echo
echo "Input sources:"
echo " --runtime <location> Location of root of the binaries directory"
echo " containing the FreeBSD, Linux, NetBSD or OSX runtime"
echo " default: <repo_root>/bin/testhost/netcoreapp-<OS>-<ConfigurationGroup>-<Arch>"
echo " --corefx-tests <location> Location of the root binaries location containing"
echo " the tests to run"
echo " default: <repo_root>/bin"
echo
echo "Flavor/OS/Architecture options:"
echo " --configurationGroup <config> ConfigurationGroup to run (Debug/Release)"
echo " default: Debug"
echo " --os <os> OS to run (FreeBSD, Linux, NetBSD or OSX)"
echo " default: detect current OS"
echo " --arch <Architecture> Architecture to run (x64, arm, x86, arm64)"
echo " default: detect current architecture"
echo
echo "Execution options:"
echo " --sequential Run tests sequentially (default is to run in parallel)."
echo " --restrict-proj <regex> Run test projects that match regex"
echo " default: .* (all projects)"
echo " --useServerGC Enable Server GC for this test run"
echo " --test-dir <path> Run tests only in the specified directory. Path is relative to the directory"
echo " specified by --corefx-tests"
echo " --test-dir-file <path> Run tests only in the directories specified by the file at <path>. Paths are"
echo " listed one line, relative to the directory specified by --corefx-tests"
echo
echo "Runtime Code Coverage options:"
echo " --coreclr-coverage Optional argument to get coreclr code coverage reports"
echo " --coreclr-objs <location> Location of root of the object directory"
echo " containing the FreeBSD, Linux, NetBSD or OSX coreclr build"
echo " default: <repo_root>/bin/obj/<OS>.x64.<ConfigurationGroup"
echo " --coreclr-src <location> Location of root of the directory"
echo " containing the coreclr source files"
echo
exit 1
}
# Handle Ctrl-C.
function handle_ctrl_c {
local errorSource='handle_ctrl_c'
echo ""
echo "Cancelling test execution."
exit $TestsFailed
}
# Register the Ctrl-C handler
trap handle_ctrl_c INT
ProjectRoot="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Location parameters
# OS/ConfigurationGroup defaults
ConfigurationGroup="Debug"
OSName=$(uname -s)
case $OSName in
Darwin)
OS=OSX
;;
FreeBSD)
OS=FreeBSD
;;
Linux)
OS=Linux
;;
NetBSD)
OS=NetBSD
;;
*)
echo "Unsupported OS $OSName detected, configuring as if for Linux"
OS=Linux
;;
esac
# Use uname to determine what the CPU is.
CPUName=$(uname -p)
# Some Linux platforms report unknown for platform, but the arch for machine.
if [ "$CPUName" == "unknown" ]; then
CPUName=$(uname -m)
fi
case $CPUName in
i686)
echo "Unsupported CPU $CPUName detected, test might not succeed!"
__Arch=x86
;;
x86_64)
__Arch=x64
;;
armv7l)
__Arch=arm
;;
aarch64)
__Arch=arm64
;;
*)
echo "Unknown CPU $CPUName detected, configuring as if for x64"
__Arch=x64
;;
esac
# Misc defaults
TestSelection=".*"
TestsFailed=0
ensure_binaries_are_present()
{
if [ ! -d $Runtime ]
then
echo "error: Coreclr $OS binaries not found at $Runtime"
exit 1
fi
}
# $1 is the path of list file
read_array()
{
local theArray=()
while IFS='' read -r line || [ -n "$line" ]; do
theArray[${#theArray[@]}]=$line
done < "$1"
echo ${theArray[@]}
}
run_selected_tests()
{
local selectedTests=()
if [ -n "$TestDirFile" ]; then
selectedTests=($(read_array "$TestDirFile"))
fi
if [ -n "$TestDir" ]; then
selectedTests[${#selectedTests[@]}]="$TestDir"
fi
run_all_tests ${selectedTests[@]/#/$CoreFxTests/}
}
# $1 is the name of the platform folder (e.g Unix.AnyCPU.Debug)
run_all_tests()
{
for testFolder in $@
do
run_test $testFolder &
pids="$pids $!"
numberOfProcesses=$(($numberOfProcesses+1))
if [ "$numberOfProcesses" -ge $maxProcesses ]; then
wait_on_pids "$pids"
numberOfProcesses=0
pids=""
fi
done
# Wait on the last processes
wait_on_pids "$pids"
pids=""
}
# $1 is the path to the test folder
run_test()
{
testProject=`basename $1`
# Check for project restrictions
if [[ ! $testProject =~ $TestSelection ]]; then
echo "Skipping $testProject"
exit 0
fi
dirName="$1/netcoreapp"
if [ ! -d "$dirName" ]; then
dirName="$1/netstandard"
if [ ! -d "$dirName" ]; then
echo "Nothing to test in $testProject"
return
fi
fi
if [ ! -e "$dirName/RunTests.sh" ]; then
echo "Cannot find $dirName/RunTests.sh"
return
fi
pushd $dirName > /dev/null
echo
echo "Running tests in $dirName"
echo "./RunTests.sh $Runtime"
echo
./RunTests.sh "$Runtime"
exitCode=$?
if [ $exitCode -ne 0 ]
then
echo "error: One or more tests failed while running tests from '$fileNameWithoutExtension'. Exit code $exitCode."
fi
popd > /dev/null
exit $exitCode
}
coreclr_code_coverage()
{
if [ ! "$OS" == "FreeBSD" ] && [ ! "$OS" == "Linux" ] && [ ! "$OS" == "NetBSD" ] && [ ! "$OS" == "OSX" ]
then
echo "error: Code Coverage not supported on $OS"
exit 1
fi
if [ "$CoreClrSrc" == "" ]
then
echo "error: Coreclr source files are required to generate code coverage reports"
echo "Coreclr source files root path can be passed using '--coreclr-src' argument"
exit 1
fi
local coverageDir="$ProjectRoot/bin/Coverage"
local toolsDir="$ProjectRoot/bin/Coverage/tools"
local reportsDir="$ProjectRoot/bin/Coverage/reports"
local packageName="unix-code-coverage-tools.1.0.0.nupkg"
rm -rf $coverageDir
mkdir -p $coverageDir
mkdir -p $toolsDir
mkdir -p $reportsDir
pushd $toolsDir > /dev/null
echo "Pulling down code coverage tools"
which curl > /dev/null 2> /dev/null
if [ $? -ne 0 ]; then
wget -q -O $packageName https://www.myget.org/F/dotnet-buildtools/api/v2/package/unix-code-coverage-tools/1.0.0
else
curl -sSL -o $packageName https://www.myget.org/F/dotnet-buildtools/api/v2/package/unix-code-coverage-tools/1.0.0
fi
echo "Unzipping to $toolsDir"
unzip -q -o $packageName
# Invoke gcovr
chmod a+rwx ./gcovr
chmod a+rwx ./$OS/llvm-cov
echo
echo "Generating coreclr code coverage reports at $reportsDir/coreclr.html"
echo "./gcovr $CoreClrObjs --gcov-executable=$toolsDir/$OS/llvm-cov -r $CoreClrSrc --html --html-details -o $reportsDir/coreclr.html"
echo
./gcovr $CoreClrObjs --gcov-executable=$toolsDir/$OS/llvm-cov -r $CoreClrSrc --html --html-details -o $reportsDir/coreclr.html
exitCode=$?
popd > /dev/null
exit $exitCode
}
# Parse arguments
RunTestSequential=0
((serverGC = 0))
while [[ $# > 0 ]]
do
opt="$1"
case $opt in
-h|--help)
usage
;;
--runtime)
Runtime=$2
;;
--corefx-tests)
CoreFxTests=$2
;;
--restrict-proj)
TestSelection=$2
;;
--configurationGroup)
ConfigurationGroup=$2
;;
--os)
OS=$2
;;
--coreclr-coverage)
CoreClrCoverage=ON
;;
--coreclr-objs)
CoreClrObjs=$2
;;
--coreclr-src)
CoreClrSrc=$2
;;
--sequential)
RunTestSequential=1
;;
--useServerGC)
((serverGC = 1))
;;
--test-dir)
TestDir=$2
;;
--test-dir-file)
TestDirFile=$2
;;
--outerloop)
OuterLoop=""
;;
--IgnoreForCI)
IgnoreForCI="-notrait category=IgnoreForCI"
;;
*)
;;
esac
shift
done
# Compute paths to the binaries if they haven't already been computed
if [ "$Runtime" == "" ]
then
Runtime="$ProjectRoot/bin/testhost/netcoreapp-$OS-$ConfigurationGroup-$__Arch"
fi
if [ "$CoreFxTests" == "" ]
then
CoreFxTests="$ProjectRoot/bin"
fi
# Check parameters up front for valid values:
if [ ! "$ConfigurationGroup" == "Debug" ] && [ ! "$ConfigurationGroup" == "Release" ]
then
echo "error: ConfigurationGroup should be Debug or Release"
exit 1
fi
if [ ! "$OS" == "FreeBSD" ] && [ ! "$OS" == "Linux" ] && [ ! "$OS" == "NetBSD" ] && [ ! "$OS" == "OSX" ]
then
echo "error: OS should be FreeBSD, Linux, NetBSD or OSX"
exit 1
fi
export CORECLR_SERVER_GC="$serverGC"
export PAL_OUTPUTDEBUGSTRING="1"
if [ "$LANG" == "" ]
then
export LANG="en_US.UTF-8"
fi
ensure_binaries_are_present
# Walk the directory tree rooted at src bin/tests/$OS.AnyCPU.$ConfigurationGroup/
TestsFailed=0
numberOfProcesses=0
if [ $RunTestSequential -eq 1 ]
then
maxProcesses=1;
else
if [ `uname` = "NetBSD" ]; then
maxProcesses=$(($(getconf NPROCESSORS_ONLN)+1))
else
maxProcesses=$(($(getconf _NPROCESSORS_ONLN)+1))
fi
fi
if [ -n "$TestDirFile" ] || [ -n "$TestDir" ]
then
run_selected_tests
else
run_all_tests "$CoreFxTests/AnyOS.AnyCPU.$ConfigurationGroup/"*.Tests
run_all_tests "$CoreFxTests/Unix.AnyCPU.$ConfigurationGroup/"*.Tests
run_all_tests "$CoreFxTests/$OS.AnyCPU.$ConfigurationGroup/"*.Tests
fi
if [ "$CoreClrCoverage" == "ON" ]
then
coreclr_code_coverage
fi
if [ "$TestsFailed" -gt 0 ]
then
echo "$TestsFailed test(s) failed"
else
echo "All tests passed."
fi
exit $TestsFailed
|
billwert/corefx
|
run-test.sh
|
Shell
|
mit
| 10,494 |
ttIsql -connStr "DSN=ldap_tt;Overwrite=1" -f backsql_create.sql
ttIsql -connStr "DSN=ldap_tt" -f tttestdb_create.sql
ttIsql -connStr "DSN=ldap_tt" -f tttestdb_data.sql
ttIsql -connStr "DSN=ldap_tt" -f tttestdb_metadata.sql
|
mapr/impala
|
thirdparty/openldap-2.4.25/servers/slapd/back-sql/rdbms_depend/timesten/ttcreate_schema.sh
|
Shell
|
apache-2.0
| 223 |
#!/bin/bash
TGT_BASE=share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
TGT="$PREFIX/$TGT_BASE"
[ -d "$TGT" ] || mkdir -p "$TGT"
[ -d "${PREFIX}/bin" ] || mkdir -p "${PREFIX}/bin"
cd "${SRC_DIR}"
# Minimum required for operation
cp RTG.jar $TGT
cp rtg $TGT
# Optional utility scripts (e.g. bash completion)
cp -rvp scripts $TGT
echo "RTG_TALKBACK=true # Attempt to send crash logs to realtime genomics, true to enable
RTG_USAGE=false # Enable simple usage logging, true to enable
RTG_JAVA_OPTS= # Additional arguments passed to the JVM
RTG_JAVA=/opt/anaconda1anaconda2anaconda3/bin/java # point to anaconda installed Java
RTG_JAR=/opt/anaconda1anaconda2anaconda3/${TGT_BASE}/RTG.jar" > $TGT/rtg.cfg
ln -s $TGT/rtg $PREFIX/bin
chmod 0755 "${PREFIX}/bin/rtg"
|
ostrokach/bioconda-recipes
|
recipes/rtg-tools/build.sh
|
Shell
|
mit
| 765 |
#!/bin/sh
# $Id: edit_cfg.sh,v 1.17 2008/08/30 19:44:25 tom Exp $
##############################################################################
# Copyright (c) 1998-2007,2008 Free Software Foundation, Inc. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, distribute #
# with modifications, sublicense, and/or sell copies of the Software, and to #
# permit persons to whom the Software is furnished to do so, subject to the #
# following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL #
# THE ABOVE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
# Except as contained in this notice, the name(s) of the above copyright #
# holders shall not be used in advertising or otherwise to promote the sale, #
# use or other dealings in this Software without prior written #
# authorization. #
##############################################################################
#
# Author: Thomas E. Dickey 1997-on
#
# Edit the default value of the etip.h file based on the autoconf-generated
# values:
#
# $1 = ncurses_cfg.h
# $2 = etip.h
#
echo "substituting autoconf'd values from $1 into $2"
for name in \
CPP_HAS_PARAM_INIT \
CPP_HAS_STATIC_CAST \
ETIP_NEEDS_MATH_EXCEPTION \
ETIP_NEEDS_MATH_H \
HAVE_BUILTIN_H \
HAVE_GPP_BUILTIN_H \
HAVE_GXX_BUILTIN_H \
HAVE_IOSTREAM \
HAVE_TYPEINFO \
HAVE_VALUES_H \
IOSTREAM_NAMESPACE
do
rm -f $2.bak
mv $2 $2.bak
if ( grep "[ ]$name[ ]1" $1 2>&1 >/dev/null)
then
value=1
sed -e 's/define '$name'.*$/define '$name' 1/' $2.bak >$2
else
value=0
sed -e 's/define '$name'.*$/define '$name' 0/' $2.bak >$2
fi
if (cmp -s $2 $2.bak)
then
echo '... '$name $value
mv $2.bak $2
else
echo '... '$name $value
rm -f $2.bak
fi
done
|
pmq20/ruby-compiler
|
vendor/ncurses/c++/edit_cfg.sh
|
Shell
|
mit
| 3,020 |
#!/bin/sh
# Exit if any errors
set -e
cd /root/cdnjs
echo Getting latest libraries
git add .
git diff --quiet --exit-code --cached || git commit -m 'bla'
ls
git pull --rebase origin master
git rebase master
echo npm install for good measure
/usr/local/bin/npm install
/usr/local/bin/npm install -g vows
echo Starting auto update script
/usr/local/bin/node auto-update.js run >> node.log
echo Starting npm test
/usr/local/bin/npm test
if [ "$?" != 0 ]; then
echo Something wrong, force exit.
exit 1
fi
git add .
git diff --quiet --exit-code --cached || git commit -am "Updated packages via NPM auto-update.js"
echo Pushing new versionis if there is a real changing
git push origin autoupdate
#if [ "`git diff -w`" != "" ]; then
# git add .
# git commit -am "Updated packages via auto-update.js"
# git pull --rebase
# git push
#fi
|
svvitale/cdnjs
|
auto-update.sh
|
Shell
|
mit
| 855 |
if [ "$(uname)" == "Darwin" ]
then
echo "› macos softwareupdate"
sudo softwareupdate -i -a
fi
|
vic3lord/dotfiles
|
macos/install.sh
|
Shell
|
isc
| 98 |
#!/bin/bash
sudo arp-scan --interface=wlp1s0 --localnet --numeric --quiet --ignoredups | grep -E '([a-f0-9]{2}:){5}[a-f0-9]{2}'|awk '{print $1}' > killthem.txt
echo routers ip
echo supply as argument to wifikill
route -n|grep ^0.0.0.0|cut -d' ' -f 10
|
EdwardOwusuAdjei/Wifikill
|
listthem.sh
|
Shell
|
mit
| 251 |
#!/bin/bash
#PBS -q batch
#PBS -N rake
#PBS -l nodes=1:ppn=1
#PBS -j oe
#PBS -m abe
cd ${PBS_O_WORKDIR}
umask 002
rake all
|
misshie/Workflows
|
RakefileGATKv1.6-PBS-Dec2012/submit-rake.sh
|
Shell
|
mit
| 124 |
#!/usr/bin/env bash
##############################
# [init]
# Initializes project
sudo sh -c 'echo "source /usr/local/rvm/scripts/rvm" >> ~/.bash_login'
sudo sh -c 'echo "cd /vagrant" >> ~/.bash_login'
sudo sh -c 'echo "rvm use #{project.ruby_version}@#{project.gemset}" >> ~/.bash_login'
source /usr/local/rvm/scripts/rvm
rvm use #{project.ruby_version}@#{project.gemset} --create
gem install --no-rdoc --no-ri bundler
gem install --no-rdoc --no-ri rake
##############################
# [update]
# Updates project
cd "#{project.home}"
source /usr/local/rvm/scripts/rvm
rvm use #{project.ruby_version}@#{project.gemset}
bundle update
##############################
# [start]
# Starts tests
cd #{project.home}
source /usr/local/rvm/scripts/rvm
rvm use #{project.ruby_version}@#{project.gemset}
HEADLESS=1 VIDEO=1 rake
##############################
# [exec]
# Executes arbitrary command
cd #{project.home}
source /usr/local/rvm/scripts/rvm
rvm use #{project.ruby_version}@#{project.gemset}
#{ARGV}
|
shvets/acceptance_test2
|
provision/project_provision.sh
|
Shell
|
mit
| 1,021 |
#!/bin/bash
base_dir="`pwd`"
lib_dir="${base_dir}/lib/"
local_lib_dir="${base_dir}/local_lib/"
CLASSPATH="${base_dir}/build/classes/:${base_dir}/build/tool/classes/:${base_dir}/build/test/:${base_dir}/build/tool/"
for f in ${lib_dir}*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done
for f in ${local_lib_dir}*.jar; do
CLASSPATH=${CLASSPATH}:$f;
done
if [ "$1" = "" ]; then
JAVACMD="echo \"USAGE: runjava.sh <-profile> [JAVA_CMD_PARAS]\""
else
## -Duser.dir=${lib_dir}
JAVACMD="java -Xloggc:jvm.log -server -Xms500m -Xmn400m -Xmx500m -XX:MaxPermSize=212m -Dfile.encoding=UTF8 -classpath $CLASSPATH $*"
fi
## echo "------------------------------------------------------"
## echo $JAVACMD
## echo "------------------------------------------------------"
$JAVACMD
## >> filelog.log 2>&1
|
wfxiang08/rpc_proxy_java
|
scripts/runjava.sh
|
Shell
|
mit
| 802 |
_ssh()
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
opts=$(grep '^Host' ~/.ssh/config | grep -v '[?*]' | cut -d ' ' -f 2-)
COMPREPLY=( $(compgen -W "$opts" -- ${cur}) )
return 0
}
complete -F _ssh ssh
|
martinrist/dotfiles
|
.completion.d/ssh-completion.bash
|
Shell
|
mit
| 287 |
#!/bin/sh
mkdir -p server/public
cd server/public
ln -s ../../client/vendor
ln -s ../../client/src
exit 0
|
zappan/mean-boilerplate
|
scripts/devsymlink.sh
|
Shell
|
mit
| 109 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2016:1137
#
# Security announcement date: 2016-05-31 12:09:14 UTC
# Script generation date: 2017-01-01 21:11:48 UTC
#
# Operating System: CentOS 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - openssl.i686:0.9.8e-40.el5_11
# - openssl-devel.i386:0.9.8e-40.el5_11
# - openssl.x86_64:0.9.8e-40.el5_11
# - openssl-devel.x86_64:0.9.8e-40.el5_11
# - openssl-perl.x86_64:0.9.8e-40.el5_11
#
# Last versions recommanded by security team:
# - openssl.i686:0.9.8e-40.el5_11
# - openssl-devel.i386:0.9.8e-40.el5_11
# - openssl.x86_64:0.9.8e-40.el5_11
# - openssl-devel.x86_64:0.9.8e-40.el5_11
# - openssl-perl.x86_64:0.9.8e-40.el5_11
#
# CVE List:
# - CVE-2016-2108
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install openssl.i686-0.9.8e -y
sudo yum install openssl-devel.i386-0.9.8e -y
sudo yum install openssl.x86_64-0.9.8e -y
sudo yum install openssl-devel.x86_64-0.9.8e -y
sudo yum install openssl-perl.x86_64-0.9.8e -y
|
Cyberwatch/cbw-security-fixes
|
CentOS_5/x86_64/2016/CESA-2016:1137.sh
|
Shell
|
mit
| 1,123 |
#!/bin/zsh
# If fzf and fd-find do not exist => exit
! [ -f "$HOME/.fzf.zsh" ] && return 0
! [ -f "$HOME/.cargo/bin/fd" ] && return 0
# Use fd-find as FZF engine
export FZF_DEFAULT_COMMAND='fd --type file --color=always'
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
export FZF_ALT_C_COMMAND='fd --type directory --color=always -d 1 -L'
export FZF_DEFAULT_OPTS="--ansi"
|
hust921/dotfiles
|
custom/settings.zsh
|
Shell
|
mit
| 374 |
echo " Time LPG CO Smoke Temp Humidity PM 2.5 "
cat [2*
#[2017-03-31 22:05:31] 0.0000 0.0000 0.0000 24.0000 30.0000 20.0816
|
MrDxxK/Air-pollution-monitor
|
Web/ShowData.sh
|
Shell
|
mit
| 157 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
assert_ok "$FLOW" autofix exports --in-place a.js
assert_ok "$FLOW" force-recheck a.js
echo "> cat a.js"
cat a.js
echo "> flow status"
assert_ok "$FLOW" status
|
samwgoldman/flow
|
tests/autofix-numeric-literals/test.sh
|
Shell
|
mit
| 350 |
set -e
bash get_gm12878.sh 1000000 22
bash get_gm12878.sh 100000 22
BEDPATH=hic_data/GM12878_combined_22_100kb.bed
#Chromosome3D
#create input
INPUT_PATH=Chromosome3D/input/GM12878_combined_22_100kb.txt
if [ ! -e $INPUT_PATH ]
then
python chromosome3d_input.py $BEDPATH $INPUT_PATH
fi
#run
perl Chromosome3D/chromosome3D.pl -i $INPUT_PATH -o Chromosome3D/output_models/chr22_100kb -m 1
#process output
cat Chromosome3D/output_models/chr22_100kb/GM12878_combined_22_100kb_1.pdb | awk '$1 == "ATOM" {print $6"\t"$7"\t"$8}' > Chromosome3D/output_models/chr22_100kb/chr22_100kb_coords.tsv
#mMDS
python ../minimds.py -o hic_data/GM12878_combined_22_100kb_mmds_coords.tsv $BEDPATH
#cMDS
python ../minimds.py --classical -o hic_data/GM12878_combined_22_100kb_cmds_coords.tsv $BEDPATH
#miniMDS
python ../minimds.py -l hic_data/GM12878_combined_22_1mb.bed -p 0.01 -m 0.01 -o hic_data/GM12878_combined_22_100kb_minimds_coords.tsv $BEDPATH
#MOGEN
#install
bash install_mogen.sh
#create input
INPUT_PATH=MOGEN/examples/hiC/input/GM12878_combined_22_100kb.tsv
if [ ! -e $INPUT_PATH ]
then
python mogen_input.py $BEDPATH $INPUT_PATH
fi
#run
java -jar MOGEN/examples/hiC/3DGenerator.jar parameters_chr22_100kb.txt
#process output
REP_NUM=1
for f in MOGEN/examples/hiC/output/GM12878_combined_22_100kb_*.pdb
do
cat $f | awk '$1 == "ATOM" {print $6"\t"$7"\t"$8}' > "MOGEN/examples/hiC/output/GM12878_combined_22_100kb_rep"$REP_NUM"_coords.tsv"
REP_NUM=$(($REP_NUM+1))
done
#HSA
#install
bash install_hsa.sh
#create input
INPUT_PATH=hsa/GM12878_combined_22_100kb.tsv
if [ ! -e $INPUT_PATH ]
then
python hsa_input.py $BEDPATH $INPUT_PATH
fi
cd hsa
#run
Rscript myR.R GM12878_combined_22_100kb.tsv 0 GM12878_combined_22_100kb_coords 1
cd ..
#ChromSDE
#install
#bash install_chromsde.sh
#create input
#CONTACTS_PATH=ChromSDE/chr22_100kb_contacts.dat
#IDS_PATH=ChromSDE/chr22_100kb_ids.dat
#python chromsde_input.py $BEDPATH $CONTACTS_PATH $IDS_PATH
#cd ChromSDE
#run
#matlab -nodisplay -nosplash -nodesktop -r "run('run_chromsde_100kb(22)')"
#process output
#cat contacts_100kb.pos.pdb | awk '$1 == "ATOM" {print $6"\t"$7"\t"$8}' > GM12878_combined_22_100kb_coords.tsv
#cd ..
python sup3.py
|
seqcode/miniMDS
|
scripts/sup3.sh
|
Shell
|
mit
| 2,216 |
#!/bin/bash
source ./one.sh
echo "your site: ${url}"
|
xudong7930/centos6
|
shell/w3cshell/13.文件包含.sh
|
Shell
|
mit
| 54 |
#!/usr/bin/env bash
#
# This flushes the site-wide cache
#
DIR="$( builtin cd "$( dirname "$( readlink -f "${BASH_SOURCE[0]}" )" )" && pwd )"
source $DIR/load_config.sh
$VENV_DIR/bin/python $SRC_DIR/manage.py clear_cache
|
seanbell/django-scripts
|
clear_cache.sh
|
Shell
|
mit
| 223 |
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# exit on any error
set -e
source $(dirname $0)/provision-config.sh
MINION_IP=$4
MINION_INDEX=$5
# we will run provision to update code each time we test, so we do not want to do salt install each time
if [ ! -f "/var/salt-vagrant-setup" ]; then
if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then
echo "Adding host entry for $MASTER_NAME"
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
fi
# Prepopulate the name of the Master
mkdir -p /etc/salt/minion.d
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
# Our minions will have a pool role to distinguish them from the master.
cat <<EOF >/etc/salt/minion.d/grains.conf
grains:
minion_ip: $MINION_IP
roles:
- salt-pool
kafka:
broker_id: $MINION_INDEX
EOF
curl -sS -L http://bootstrap.saltstack.com | sh -s -- -X
# a file we touch to state that base-setup is done
echo "Salt configured" > /var/salt-vagrant-setup
fi
|
whisklabs/salt-cassandra-formula
|
vagrant/provision-minion.sh
|
Shell
|
mit
| 1,536 |
#!/bin/sh
# Compress JS
java -jar compiler.jar --js ../src/* --js ../desk.js --js ../libs/bootstrap-dropdown.js --js ../libs/jquery.ui.touch-punch.min.js --js_output_file ../script.js
# Compress CSS
#if [ -e combined.css ]; then rm combined.css; fi
#cat ../*.css > style.css
yui-compressor ../common.css -o ../common.min.css
yui-compressor ../desk.css -o ../desk.min.css
|
holalabs/holadesk
|
tools/compress.sh
|
Shell
|
mit
| 372 |
# Initialization for raspberry pi 3
# Assuming debian installation
sudo apt-get update -y
sudo apt-get dist-upgrade -y
sudo apt-get upgrade -y
sudo apt-get autoremove -y
sudo apt-get install build-essential
sudo apt-get install htop
# Emacs
# sudo nano /etc/apt/sources.list
# uncomment the 3rd line so that archive is on
sudo apt-get update
sudo apt-get build-dep emacs24
cd ~/lib
wget http://ftp.gnu.org/gnu/emacs/emacs-25.2.tar.gz
tar -zxf emacs-25.2.tar.gz
cd emacs-25.2
./configure
sudo make install
sudo lib-src/blessmail /usr/local/libexec/emacs/25.2/armv7l-unknown-linux-gnueabihf/movemail
git clone https://github.com/syl20bnr/spacemacs ~/.emacs.d
# Python
sudo apt-get install python-dev
curl -O https://bootstrap.pypa.io/get-pip.py
sudo python get-pip.py
# sudo pip install virtualenv
sudo apt-get install python-virtualenv
|
zsunjian/sys_bookstrap
|
pi_init.sh
|
Shell
|
mit
| 839 |
#!/usr/bin/env bash
docker build -t vposvistelik/apt-cacher-ng .
|
v2p/pub
|
dockers/apt-cacher-ng/build.sh
|
Shell
|
mit
| 65 |
#!/bin/sh
#
# Script that converts TGnotes documents to Markdown
#
# Copyright © 2014 Tiancheng "Timothy" Gu
# Licensed under the MIT License.
# This script is only tested on GNU Sed.
# Usage: <this file> <input.tgnotes> [<output.md>]
# If the output file is not specified it is assumed to be standard output
if [ "$#" -lt "1" ] || [ "$#" -gt "2" ]; then
echo "Incorrect command syntax" >&2
exit 1
else
in=$1
if [ "$#" -eq "2" ]; then
out="$2"
else
out='/dev/stdout'
fi
fi
sed -r -e 's/^(Chapter)/\# \1/g' \
-e 's/^([IVXLCDM]+\.)\t/\#\# \1 /g' \
-e 's/^\t([A-Z]+\.)\t/\n\#\#\# \1 /g' \
-e 's/\t//g' \
"$in" > "$out"
|
TimothyGu/tgnotes
|
notes2md.sh
|
Shell
|
mit
| 728 |
#!/usr/bin/env bash
set -eo pipefail
mkdir -p ~/Documents/development/code/
mkdir -p ~/Documents/images/deviant/
|
cfalcondair/dotfiles
|
install/dirs.sh
|
Shell
|
mit
| 116 |
#!/bin/bash
#
# Use this file to quickly change the app version.
# It will also tag, commit the change and push it.
#
# Usage: ./version.sh 1.2.0
# Check $1
if [ -z "$1" ]
then
echo "Version is required."
fi
# Replace version in package.json files
sed -i.bak "s/\"version\": \".*\"/\"version\": \"$1\"/g" ./package.json
sed -i.bak "s/\"version\": \".*\"/\"version\": \"$1\"/g" ./src/package.json
sed -i.bak "s/download\/v.*\/iDumai/download\/v$1\/iDumai/g" ./src/package.json
# Clean up
rm ./package.json.bak
rm ./src/package.json.bak
# Edit CHANGELOG
vim ./CHANGELOG
# Git commit
git add .
git commit -m "New version v$1"
git tag -a "v$1" -m "v$1"
# TODO Paste all commits since the last tag into CHANGELOG
|
eyeyunianto/aksara
|
iDumai/version.sh
|
Shell
|
mit
| 720 |
#!/bin/sh
# ---
# image:
# tag: "redis:$VG_REDIS_VERSION"
# network: $VG_DOCKER_NETWORK
# interactive: true
# tty: true
# rm: true
# environment:
# - VG_REDIS_VERSION=latest
# - VG_DOCKER_NETWORK=vg_redis
# ---
if [ -n "$1" ] ; then
redis-cli -u "$REDIS_URL" $@
else
redis-cli -u "$REDIS_URL"
fi
|
vantage-org/redis
|
plugins/redis/run.sh
|
Shell
|
mit
| 318 |
#!/usr/bin/env bash
mkdir -p build
cd build
cmake ..
make
ctest
|
ParadoxZero/stmp
|
run_test.sh
|
Shell
|
mit
| 63 |
#!/bin/bash
echo "Start testing."
./test/bats/bin/bats ./test/tests
|
NicoVIII/CloudBackupEncryption
|
test/test.sh
|
Shell
|
mit
| 69 |
#!/usr/bin/env bash
# set params
#input_dataset_train=data/conll16st-en-01-12-16-trial
#input_dataset_test=data/conll16st-en-01-12-16-trial
input_dataset_train=data/conll16-st-train-en-2016-03-29
input_dataset_test=data/conll16st-en-03-29-16-test
run_type=svm_base
run_name=${run_type}_sup_v2_hier_depemb_tr16test16
if [ -n "$1" ]
then
run_name=$1
fi # $String is null.
#output dir for parsing results - used for test operations
output_dir=output/${run_name}
mkdir -p ${output_dir}
#model dir where output models are saved after train
model_dir=models/${run_name}
rm -rf -- ${model_dir}
mkdir -p ${model_dir}
scale_features=True
# resources
word2vec_model=resources/external/w2v_embeddings/qatarliving_qc_size20_win10_mincnt5_rpl_skip1_phrFalse_2016_02_23.word2vec.bin
# word2vec_model=resources/closed_track/word2vec_google/GoogleNews-vectors-negative300.bin
word2vec_load_bin=False
# word2vec_load_bin=True # for google pretrained embeddings
deps_model=resources/external/dep_embeddings/deps_words_300
brownclusters_file=/resources/closed_track/brown_clusters/brown-rcv1.clean.tokenized-CoNLL03.txt-c320-freq1.txt
log_file=${run_name}_$(date +%y-%m-%d-%H-%M).log
. sup_parser_v2_hierarchy_optimized_run_partial.sh # > ${log_file}
|
jimmycallin/master-thesis
|
architectures/conll16st-hd-sdp/sup_parser_v2_hierarchy_optimized_run_train2016_test2016.sh
|
Shell
|
mit
| 1,250 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for DSA-3394-1
#
# Security announcement date: 2015-11-05 00:00:00 UTC
# Script generation date: 2017-01-01 21:07:36 UTC
#
# Operating System: Debian 7 (Wheezy)
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - libreoffice:1:3.5.4+dfsg2-0+deb7u5
#
# Last versions recommanded by security team:
# - libreoffice:1:3.5.4+dfsg2-0+deb7u8
#
# CVE List:
# - CVE-2015-4551
# - CVE-2015-5212
# - CVE-2015-5213
# - CVE-2015-5214
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libreoffice=1:3.5.4+dfsg2-0+deb7u8 -y
|
Cyberwatch/cbw-security-fixes
|
Debian_7_(Wheezy)/x86_64/2015/DSA-3394-1.sh
|
Shell
|
mit
| 710 |
if [[ $(pamixer --get-mute) == "true" ]]; then
level="mute"
icon=''
else
level=$(pamixer --get-volume)
if [ $level = 0 ]; then
icon=''
elif [ $level -lt 33 ]; then
icon=''
elif [ $level -lt 66 ]; then
icon=''
else
icon=''
fi
level="$level%"
fi
open_cmd="%{A:pavucontrol:}"
set_cmd="%{A3:pamixer --set-volume 20:}"
inc_cmd="%{A4:pamixer -i 5:}"
dec_cmd="%{A5:pamixer -d 5:}"
echo "$icon"
|
Iambecomeroot/dotfiles
|
tint2/.config/tint2/volume-icon.sh
|
Shell
|
mit
| 440 |
#!/bin/bash
# dnase-pool-bioreps.sh - Pools two biological replicates for the ENCODE DNase-seq pipeline.
main() {
# Executables in resources/usr/bin
set +x
# If available, will print tool versions to stderr and json string to stdout
if [ -f /usr/bin/tool_versions.py ]; then
versions=`tool_versions.py --dxjson dnanexus-executable.json`
fi
echo "* Value of bam_A: '$bam_A'"
echo "* Value of bam_B: '$bam_B'"
echo "* Value of peaks_A: '$peaks_A'"
echo "* Value of peaks_B: '$peaks_B'"
echo "* Value of signal_A: '$signal_A'"
echo "* Value of signal_B: '$signal_B'"
echo "* Value of chrom_sizes: '$chrom_sizes'"
echo "* Download files..."
bam_A_root=`dx describe "$bam_A" --name`
bam_A_root=${bam_A_root%_filtered.bam}
dx download "$bam_A" -o bam_A.bam
echo "* bam_A file: '${bam_A_root}_filtered.bam'"
bam_B_root=`dx describe "$bam_B" --name`
bam_B_root=${bam_B_root%_filtered.bam}
dx download "$bam_B" -o bam_B.bam
echo "* bam_B file: '${bam_B_root}_filtered.bam'"
out_root="${bam_A_root}_${bam_B_root}"
### bam_pooled_root="${bam_A_root}_${bam_B_root}_pooled"
### echo "* bam_pooled will be: '${bam_pooled_root}.bam'"
peaks_A_root=`dx describe "$peaks_A" --name`
peaks_A_root=${peaks_A_root%_narrowPeak_hotspot.bb}
dx download "$peaks_A" -o peaks_A.bb
echo "* peaks_A file: '${peaks_A_root}_narrowPeak_hotspot.bb'"
peaks_B_root=`dx describe "$peaks_B" --name`
peaks_B_root=${peaks_B_root%_narrowPeak_hotspot.bb}
dx download "$peaks_B" -o peaks_B.bb
echo "* peaks_B file: '${peaks_B_root}_narrowPeak_hotspot.bb'"
### peaks_root="${peaks_A_root}_${peaks_B_root}"
### peaks_merged_root="${peaks_root}_merged_narrowPeak"
### echo "* peaks_merged will be: '${peaks_merged_root}.bed/.bb'"
signal_A_root=`dx describe "$signal_A" --name`
signal_A_root=${signal_A_root%_signal_hotspot.bw}
dx download "$signal_A" -o signal_A.bw
echo "* signal_A file: '${signal_A_root}_signal_hotspot.bw'"
signal_B_root=`dx describe "$signal_B" --name`
signal_B_root=${signal_B_root%_signal_hotspot.bw}
dx download "$signal_B" -o signal_B.bw
echo "* signal_B file: '${signal_B_root}_signal_hotspot.bw'"
### signal_root="${signal_A_root}_${signal_B_root}_signal"
dx download "$chrom_sizes" -o chrom.sizes
read_len_A=`parse_property.py -f "$bam_A" -p "read_length" --quiet`
read_len_B=`parse_property.py -f "$bam_B" -p "read_length" --quiet`
if [ "$read_len_A" == "" ]; then
echo "* Running edwBamStats on 'bam_A.bam'"
set -x
edwBamStats bam_A.bam bam_A_edwBamStats.txt
set +x
read_len_A=`qc_metrics.py -n edwBamStats -f bam_A_edwBamStats.txt -k readSizeMean`
fi
if [ "$read_len_B" == "" ]; then
echo "* Running edwBamStats on 'bam_B.bam'"
set -x
edwBamStats bam_B.bam bam_B_edwBamStats.txt
set +x
read_len_B=`qc_metrics.py -n edwBamStats -f bam_B_edwBamStats.txt -k readSizeMean`
fi
if [ "$read_len_A" != "" ] && [ "$read_len_B" != "" ] && [ "$read_len_A" != "$read_len_A" ]; then
echo "* WARNING: Read lengths of two bam files do not match."
fi
echo "* ===== Calling DNAnexus and ENCODE independent script... ====="
set -x
dnase_pooled_reps.sh bam_A.bam bam_B.bam peaks_A.bb peaks_B.bb signal_A.bw signal_B.bw chrom.sizes $out_root
set +x
echo "* ===== Returned from dnanexus and encodeD independent script ====="
bam_pooled_root="${out_root}_pooled"
peaks_merged_root="${out_root}_merged_narrowPeak"
signal_root_root="${out_root}_signal"
echo "* Compressing bed files..."
set -x
gzip ${peaks_merged_root}.bed
set +x
echo "* Prepare metadata..."
qc_pooled=''
qc_peaks=''
qc_signal=''
reads=0
read_len=0
if [ -f /usr/bin/qc_metrics.py ]; then
qc_pooled=`qc_metrics.py -n edwBamStats -f ${bam_pooled_root}_edwBamStats.txt`
reads=`qc_metrics.py -n edwBamStats -f ${bam_pooled_root}_edwBamStats.txt -k readCount`
read_len=`qc_metrics.py -n edwBamStats -f ${bam_pooled_root}_edwBamStats.txt -k readSizeMean`
#qc_signal=`qc_metrics.py -n bigWigCorrelate -f ${signal_root}_corr_qc.txt`
qc_signal=`qc_metrics.py -n singleton -f ${signal_root}_corr_qc.txt -k "bigWigCorrelate" --keypair "bigWigCorrelate"`
qc_peaks=`qc_metrics.py -n edwComparePeaks -f ${out_root}_peaks_overlap_qc.txt`
qc_pooled=`echo $qc_pooled, $qc_signal, $qc_peaks`
fi
# All qc to one file per target file:
echo "===== edwBamStats =====" > ${bam_pooled_root}_qc.txt
cat ${bam_pooled_root}_edwBamStats.txt >> ${bam_pooled_root}_qc.txt
echo " " >> ${bam_pooled_root}_qc.txt
echo "===== bigWigCorrelate =====" >> ${bam_pooled_root}_qc.txt
cat ${signal_root}_corr_qc.txt >> ${bam_pooled_root}_qc.txt
echo " " >> ${bam_pooled_root}_qc.txt
echo "===== edwComparePeaks =====" >> ${bam_pooled_root}_qc.txt
cat ${out_root}_peaks_overlap_qc.txt >> ${bam_pooled_root}_qc.txt
echo "* Upload results..."
bam_pooled=$(dx upload ${bam_pooled_root}.bam --details "{ $qc_pooled }" --property SW="$versions" \
--property reads="$reads" --property read_length="$read_len" --brief)
bed_merged=$(dx upload ${peaks_merged_root}.bed.gz --details "{ $qc_peaks }" --property SW="$versions" --brief)
bb_merged=$(dx upload ${peaks_merged_root}.bb --details "{ $qc_peaks }" --property SW="$versions" --brief)
pooled_qc=$(dx upload ${bam_pooled_root}_qc.txt --details "{ $qc_pooled }" --property SW="$versions" --brief)
dx-jobutil-add-output bam_pooled "$bam_pooled" --class=file
dx-jobutil-add-output bed_merged "$bed_merged" --class=file
dx-jobutil-add-output bb_merged "$bb_merged" --class=file
dx-jobutil-add-output pooled_qc "$pooled_qc" --class=file
dx-jobutil-add-output reads "$reads" --class=string
dx-jobutil-add-output metadata "{ $qc_signal }" --class=string
echo "* Finished."
}
|
ENCODE-DCC/dnase_pipeline
|
dnanexus/old/dnase-pool-bioreps/src/dnase-pool-bioreps.sh
|
Shell
|
mit
| 6,221 |
#!/bin/bash
echo -e "Executing docs"
if [ "$TRAVIS_PULL_REQUEST" == "false" ] && [ "$TRAVIS_BRANCH" == "master" ]; then
echo -e "Generating Jazzy output \n"
jazzy --clean --module-version 1.0.6 --author "Oleh Kulykov" --author_url http://www.resident.name --github_url https://github.com/OlehKulykov/OKAlertController --xcodebuild-arguments "-scheme,OKAlertController" --module OKAlertController --root-url http://olehkulykov.github.io/OKAlertController --theme apple --swift-version 2.2 --min-acl public --readme README.md
pushd docs
echo -e "Creating gh-pages\n"
git init
git config user.email ${GIT_EMAIL}
git config user.name ${GIT_NAME}
git add -A
git commit -m "Documentation from Travis build of $TRAVIS_COMMIT"
git push --quiet --force "https://${GH_TOKEN}@github.com/OlehKulykov/OKAlertController.git" master:gh-pages > /dev/null 2>&1
echo -e "Published documentation to gh-pages.\n"
popd
fi
|
OlehKulykov/OKAlertController
|
generate_docs.sh
|
Shell
|
mit
| 968 |
#!/bin/bash
# This file is part of the RemoteVideoWatcher package.
# (c) Alexander Lukashevich <[email protected]>
# For the full copyright and license information, please view the LICENSE file that was distributed with this source code.
MACHINE_ARCH=$(uname -m | cut -c1-3 | tr '[:lower:]' '[:upper:]')
WORK_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BUILD_DIR=$WORK_DIR/build
DOCKER_ARM_IMAGE=sdhibit/rpi-raspbian
DOCKER_X86_IMAGE=ubuntu
DOCKERFILE_CORDOVA_PATH=$WORK_DIR/Dockerfile-cordova
DOCKERFILE_SERVER_PATH=$WORK_DIR/Dockerfile-server
BUILD_DOCKERFILE_SERVER_PATH=$BUILD_DIR/Dockerfile-server
CERTIFICATE_KEY_PATH=$BUILD_DIR/server.key
CERTIFICATE_CRT_PATH=$BUILD_DIR/server.crt
MJPG_STREAMER_BIN=$BUILD_DIR/mjpg_streamer
CONFIG_FILE=$WORK_DIR/config.yml
SERVER_JS_FILE_NAME=server.js
SERVER_SH_FILE_NAME=cameras.sh
CLIENT_JS_FILE_NAME=index.js
SERVER_JS_FILE_PATH=$WORK_DIR/server/$SERVER_JS_FILE_NAME
SERVER_SH_FILE_PATH=$WORK_DIR/server/$SERVER_SH_FILE_NAME
CLIENT_JS_FILE_PATH=$WORK_DIR/client/www/js/$CLIENT_JS_FILE_NAME
BUILD_SERVER_JS_FILE_PATH=$BUILD_DIR/$SERVER_JS_FILE_NAME
BUILD_SERVER_SH_FILE_PATH=$BUILD_DIR/$SERVER_SH_FILE_NAME
BUILD_CLIENT_JS_FILE_PATH=$BUILD_DIR/$CLIENT_JS_FILE_NAME
NEW_LINE=$'\n'
###################################
######### Program Entry Point #######
#################################
function main {
if [ "$MACHINE_ARCH" != "ARM" ] && [ "$MACHINE_ARCH" != "X86" ]; then
echo "This program can work only at X86 or ARM machine. Abort!"
exit 1
fi
case "$1" in
"build-server")
buildCameraServer "$@"
;;
"build-client")
buildCameraClient
;;
"start-server-daemon")
startCameraServer
;;
"docker-camera-cordova-fix-files")
# you mustn't use this command yourself
fixCordovaSslPlugin
;;
"docker-camera-cordova-build")
# you mustn't use this command yourself
buildApkInDockerContainer "$@"
;;
*)
echo "Wrong command. Available commands are:$NEW_LINE$NEW_LINE \
1) build-server [--regenerate-token-crypt-phrase] [--regenerate-server-cert] [--recompile-mjpgstreamer]$NEW_LINE \
Create a docker image which consists of everything what you need to start using this program.$NEW_LINE \
The image includes the NodeJS server to process clients (from web browsers and from mobile applications)$NEW_LINE \
and Mjpg-streamer utility to capture video from web-cameras and send it to NodeJS server.$NEW_LINE$NEW_LINE \
2) build-client$NEW_LINE \
Create .apk file which you can install to your android devices and \
start using this program in the same way as a web browser.$NEW_LINE$NEW_LINE \
3) start-server-daemon$NEW_LINE \
Run server daemon in background.$NEW_LINE Usually you add this command in \
system-autorun on the server machine after running 'build-server' command at least one time.$NEW_LINE"
exit 1
;;
esac
}
###########################################
#### Create docker image to use on the server ###
##########################################
function buildCameraServer {
validateConfigFile
prepareBuildDirectory
# regenerate parts if required
for arg in "$@"
do
case "$arg" in
"--regenerate-token-crypt-phrase")
generateTokenCryptPhrase
;;
"--regenerate-server-cert")
generateServerCertificate
;;
"--recompile-mjpgstreamer")
compileMjpgStreamer
;;
esac
done
# generate necessary parts if not found
if [ "$(egrep -c 'certificate_fingerprint:.*none' $CONFIG_FILE)" -gt 0 ] || [ ! -f $CERTIFICATE_KEY_PATH ] || [ ! -f $CERTIFICATE_CRT_PATH ]; then
echo 'Server certificate files do not exist or certificate_fingerprint parameter is empty!'
generateServerCertificate
fi
if [ "$(egrep -c 'token_crypt_phrase:.*none' $CONFIG_FILE)" -gt 0 ]; then
echo 'Token_crypt_phrase parameter is empty!'
generateTokenCryptPhrase
fi
if [ ! -f $MJPG_STREAMER_BIN ]; then
echo 'Mjpg-streamer binary do not exist!'
compileMjpgStreamer
fi
# create copy of main files
cp $SERVER_JS_FILE_PATH $BUILD_SERVER_JS_FILE_PATH
cp $SERVER_SH_FILE_PATH $BUILD_SERVER_SH_FILE_PATH
cp $CLIENT_JS_FILE_PATH $BUILD_CLIENT_JS_FILE_PATH
# make changes in main files according to config.yml
replaceConfigParamsInBuildFiles \
$BUILD_SERVER_JS_FILE_PATH \
$BUILD_SERVER_SH_FILE_PATH \
$BUILD_CLIENT_JS_FILE_PATH
# create docker image for server
docker build -t alex_dwt/remote-video-watcher-server -f $BUILD_DOCKERFILE_SERVER_PATH $WORK_DIR
echo 'Docker image with camera-server successfully created!'
}
#########################################################
## Create android application (.apk file) to install on SmartPhone ##
#######################################################
function buildCameraClient {
if [ "$MACHINE_ARCH" != "X86" ]; then
echo "You can build client only on X86 machine. Abort!"
exit 1
fi
validateConfigFile
prepareBuildDirectory
# check necessary params in config.yml for client_js
if [ "$(egrep -c 'certificate_fingerprint:.*none' $CONFIG_FILE)" -gt 0 ]; then
echo "Certificate_fingerprint parameter is empty! Use 'build-server' command at first please. Abort!"
exit 1
fi
if [ "$(egrep -c 'token_crypt_phrase:.*none' $CONFIG_FILE)" -gt 0 ]; then
echo "Token_crypt_phrase parameter is empty! Use 'build-server' command at first please. Abort!"
exit 1
fi
# create copy of js file
cp $CLIENT_JS_FILE_PATH $BUILD_CLIENT_JS_FILE_PATH
# make changes in js file according to config.yml
replaceConfigParamsInBuildFiles $BUILD_CLIENT_JS_FILE_PATH
# create docker image for cordova
docker build -t alex_dwt/remote-video-watcher-cordova -f $DOCKERFILE_CORDOVA_PATH $WORK_DIR
if [ $? -ne 0 ]
then
echo 'Can not create docker image. Abort!'
exit 1
fi
echo 'Started creating android client...'
# build .apk in docker container
docker run --rm -it \
-v $WORK_DIR:/camera-apk \
alex_dwt/remote-video-watcher-cordova /bin/bash -c "./camera-utility.sh docker-camera-cordova-build"
}
##################################################
## Launch server daemon to start take and share video ###
################################################
function startCameraServer {
validateConfigFile
exportConfigParams
if [ "$(docker images | egrep -c 'alex_dwt/remote-video-watcher-server')" -eq 0 ]; then
echo "Can not find docker server image. You should run 'build-server' command at first. Abort!"
exit 0
fi
docker rm -f alex-dwt-remote-video-watcher-server >/dev/null 2>&1
docker run -d \
-v /opt:/opt:ro \
-p 443:443 \
-p $conf_wss_server_port:${conf_wss_server_port} \
-v /tmp/alex-dwt-remote-video-watcher-server:/camera/streamer/run \
$(find /dev/ 2>/dev/null | egrep "/dev/video*|/dev/vchiq" | xargs -I {} printf "--device={}:{} ") \
--name alex-dwt-remote-video-watcher-server alex_dwt/remote-video-watcher-server >/dev/null 2>&1
if [ $? -ne 0 ]
then
echo 'Can not start camera server daemon.'
else
echo 'Camera server daemon successfully started!'
fi
}
function replaceConfigParamsInBuildFiles {
exportConfigParams
local CONFIG_TEXT=$(parseYaml $CONFIG_FILE "conf_")
local CAMERAS_COUNT=$(echo "$CONFIG_TEXT" | egrep -o conf_cameras_[0-9]+ | sort -u | wc -l)
for arg in "$@"
do
if [ ! -f $arg ]; then
echo "Build file '$arg' does not exist. Abort!"
exit 1
fi
case "$arg" in
"$BUILD_SERVER_JS_FILE_PATH")
sed -i "s/%_enable_static_server_%/$conf_enable_static_server/" $BUILD_SERVER_JS_FILE_PATH
sed -i "s/%_token_crypt_phrase_%/$conf_token_crypt_phrase/" $BUILD_SERVER_JS_FILE_PATH
sed -i "s/%_wss_server_port_%/$conf_wss_server_port/" $BUILD_SERVER_JS_FILE_PATH
;;
"$BUILD_SERVER_SH_FILE_PATH")
local cameras_list=''
local i=0
while [ $i -lt $CAMERAS_COUNT ]; do
local name="conf_cameras_${i}_name"
local command="conf_cameras_${i}_command"
cameras_list="$cameras_list$i) # ${!name}$NEW_LINE PID=\$(${!command} > /dev/null 2>__amp__1 __amp__ echo \$!)$NEW_LINE;;"
if [ $i -ne $(($CAMERAS_COUNT-1)) ]; then
cameras_list="$cameras_list$NEW_LINE"
fi
i=$(($i+1))
done
awk -i inplace -v TEXT="$cameras_list" '{sub(/#_list_of_cameras/, TEXT);print;}' $BUILD_SERVER_SH_FILE_PATH
sed -i "s/__amp__/\&/g" $BUILD_SERVER_SH_FILE_PATH
sed -i "s/%_camera_max_id_%/$(($CAMERAS_COUNT-1))/" $BUILD_SERVER_SH_FILE_PATH
;;
"$BUILD_CLIENT_JS_FILE_PATH")
sed -i "s/%_default_server_ip_%/$conf_default_server_ip/" $BUILD_CLIENT_JS_FILE_PATH
sed -i "s/%_external_server_ip_%/$conf_external_server_ip/" $BUILD_CLIENT_JS_FILE_PATH
sed -i "s/%_certificate_fingerprint_%/$conf_certificate_fingerprint/" $BUILD_CLIENT_JS_FILE_PATH
sed -i "s/%_token_crypt_phrase_%/$conf_token_crypt_phrase/" $BUILD_CLIENT_JS_FILE_PATH
sed -i "s/%_wss_server_port_%/$conf_wss_server_port/" $BUILD_CLIENT_JS_FILE_PATH
local cameras_list=''
local i=0
while [ $i -lt $CAMERAS_COUNT ]; do
local name="conf_cameras_${i}_name"
local revert_option_val="conf_cameras_${i}_has_revert_option"
local revert_option_text=''
if [ ${!revert_option_val} -eq 1 ]; then
revert_option_text="revert: false"
fi
cameras_list="$cameras_list{label: '${!name}', el: null, options: { $revert_option_text }}"
if [ $i -ne $(($CAMERAS_COUNT-1)) ]; then
cameras_list="$cameras_list,$NEW_LINE"
fi
i=$(($i+1))
done
awk -i inplace -v TEXT="$cameras_list" '{sub(/\/\/_list_of_cameras/, TEXT);print;}' $BUILD_CLIENT_JS_FILE_PATH
;;
esac
done
}
function validateConfigFile {
if [ ! -f $CONFIG_FILE ]; then
echo 'Config file config.yml does not exist. Abort!'
exit 1
fi
local CONFIG_TEXT=$(parseYaml $CONFIG_FILE "conf_")
local CAMERAS_COUNT=$(echo "$CONFIG_TEXT" | egrep -o conf_cameras_[0-9]+ | sort -u | wc -l)
local CAMERAS_LINES_COUNT=$(echo "$CONFIG_TEXT" | egrep -c conf_cameras_[0-9]+)
if [ $CAMERAS_COUNT -eq 0 ]; then
echo 'There are no any cameras defined in config.yml. Abort!'
exit 1
fi
if [[ $(( $CAMERAS_LINES_COUNT / $CAMERAS_COUNT )) -ne 3 ]] || [[ $(( $CAMERAS_LINES_COUNT % 3 )) -ne 0 ]]; then
echo 'Cameras parameters are wrong. Every camera must have three parameters. Abort!'
exit 1
fi
if [ "$(echo "$CONFIG_TEXT" | egrep -c 'conf_wss_server_port|conf_default_server_ip|conf_external_server_ip|conf_enable_static_server|conf_certificate_fingerprint|conf_token_crypt_phrase')" -ne 6 ]; then
echo 'There is wrong count of parameters in config.yml. Abort!'
exit 1
fi
}
function exportConfigParams {
eval $(parseYaml $CONFIG_FILE "conf_")
}
function prepareBuildDirectory {
if [ ! -d "$BUILD_DIR" ]; then
mkdir $BUILD_DIR
if [ $? -ne 0 ]
then
echo 'Can not create build directory. Abort!'
exit 1
fi
fi
# clear old files
if [ -f $BUILD_SERVER_JS_FILE_PATH ]; then
rm -f $BUILD_SERVER_JS_FILE_PATH
if [ $? -ne 0 ]
then
exit 1
fi
fi
if [ -f $BUILD_SERVER_SH_FILE_PATH ]; then
rm -f $BUILD_SERVER_SH_FILE_PATH
if [ $? -ne 0 ]
then
exit 1
fi
fi
if [ -f $BUILD_CLIENT_JS_FILE_PATH ]; then
rm -f $BUILD_CLIENT_JS_FILE_PATH
if [ $? -ne 0 ]
then
exit 1
fi
fi
if [ -f $BUILD_DOCKERFILE_SERVER_PATH ]; then
rm -f $BUILD_DOCKERFILE_SERVER_PATH
if [ $? -ne 0 ]
then
exit 1
fi
fi
# determine what docker base image use to build server
cp $DOCKERFILE_SERVER_PATH $BUILD_DOCKERFILE_SERVER_PATH
local image=$DOCKER_ARM_IMAGE
if [ "$MACHINE_ARCH" == "X86" ]; then
image=$DOCKER_X86_IMAGE
fi
awk -i inplace -v TEXT="$image" '{sub(/%_base_image_name_%/, TEXT);print;}' $BUILD_DOCKERFILE_SERVER_PATH
if [ $? -ne 0 ]
then
echo 'Can not change docker image base name. Abort!'
exit 1
fi
}
function fixCordovaSslPlugin {
echo "Start fixing cordova-plugin-sslcertificatechecker (to accept self-signed certificates)..."
local IMPORT_BLOCK="\
import javax.net.ssl.TrustManager; \
import javax.net.ssl.X509TrustManager; \
import java.security.cert.X509Certificate; \
import javax.net.ssl.SSLContext; \
import java.security.GeneralSecurityException; \
import javax.net.ssl.HostnameVerifier; \
import javax.net.ssl.SSLSession;"
local VERIFY_FUNCTION="\
private static HostnameVerifier getMyHostnameVerifier() throws GeneralSecurityException { \
TrustManager[] trustAllCerts = new TrustManager[] { \
new X509TrustManager() { \
public java.security.cert.X509Certificate[] getAcceptedIssuers() { \
return new X509Certificate[0]; \
} \
public void checkClientTrusted( \
java.security.cert.X509Certificate[] certs, String authType) { \
} \
public void checkServerTrusted( \
java.security.cert.X509Certificate[] certs, String authType) { \
} \
} \
}; \
SSLContext sc = SSLContext.getInstance(\"SSL\"); \
sc.init(null, trustAllCerts, new java.security.SecureRandom()); \
HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory()); \
HostnameVerifier hostnameVerifier = new HostnameVerifier() { \
@Override \
public boolean verify(String hostname, SSLSession session) { \
return true; \
} \
}; \
return hostnameVerifier; \
}"
local FIX_FUNCTION_DECLARATION=", GeneralSecurityException \{"
local SKIP_ACTION="con.setHostnameVerifier(getMyHostnameVerifier());"
local PLUGIN_FILE=$(find /camera/platforms/android/ -iname "*sslcertificatechecker.java" 2>/dev/null)
if [ -z "$PLUGIN_FILE" ]
then
echo "File not found. Abort!"
exit 1
fi
egrep -qi "package .*" $PLUGIN_FILE
if [ $? -ne 0 ]
then
echo 'Can not find pattern text to replace. Abort!'
exit 1
fi
sed -r -i "s/(package .*)/\1 $IMPORT_BLOCK/I" $PLUGIN_FILE
egrep -qi "public.*class.*" $PLUGIN_FILE
if [ $? -ne 0 ]
then
echo 'Can not find pattern text to replace. Abort!'
exit 1
fi
sed -r -i "s/(public.*class.*)/\1 $VERIFY_FUNCTION/I" $PLUGIN_FILE
egrep -qi "private.*getfingerpr.*throws.*\{" $PLUGIN_FILE
if [ $? -ne 0 ]
then
echo 'Can not find pattern text to replace. Abort!'
exit 1
fi
sed -r -i "s/(private.*getfingerpr.*throws.*)\{/\1 $FIX_FUNCTION_DECLARATION/I" $PLUGIN_FILE
egrep -qi "setconnecttimeout.*" $PLUGIN_FILE
if [ $? -ne 0 ]
then
echo 'Can not find pattern text to replace. Abort!'
exit 1
fi
sed -r -i "s/(setconnecttimeout.*)/\1 $SKIP_ACTION/I" $PLUGIN_FILE
echo 'Plugin successfully fixed!'
}
function compileMjpgStreamer {
echo 'Started compiling mjpg-streamer...'
local image=$DOCKER_ARM_IMAGE
if [ "$MACHINE_ARCH" == "X86" ]; then
image=$DOCKER_X86_IMAGE
fi
docker run --rm -it \
-v /opt:/opt:ro \
-v $BUILD_DIR:/mjpg-streamer-compiled \
$image /bin/bash -c "apt-get update && \
apt-get install -y cmake git libjpeg8-dev build-essential && \
git clone https://github.com/jacksonliam/mjpg-streamer.git && \
cd /mjpg-streamer/mjpg-streamer-experimental && \
make && \
chmod 666 *.so mjpg_streamer && \
cp *.so mjpg_streamer /mjpg-streamer-compiled/"
if [ $? -ne 0 ]
then
echo 'Can not compile mjpg-streamer. Abort!'
exit 1
fi
echo 'Mjpg-streamer successfully compiled!'
}
function generateTokenCryptPhrase {
echo 'Started generating token_crypt_phrase...'
sed -r -i "s/(token_crypt_phrase:).*/\1 $(< /dev/urandom tr -dc _A-Z-a-z-0-9 | head -c${1:-32};echo;)/" $CONFIG_FILE
echo 'Token_crypt_phrase successfully generated!'
}
function generateServerCertificate {
echo 'Started generating server certificate...'
openssl req -x509 -sha256 -nodes -days 1000 -newkey rsa:2048 \
-keyout $CERTIFICATE_KEY_PATH -out $CERTIFICATE_CRT_PATH \
-subj "/C=GB/ST=London/L=London/O=example/OU=IT Department/CN=example.example" > /dev/null 2>&1
if [ $? -ne 0 ]
then
echo 'Can not create certificate. Abort!'
exit 1
fi
sed -r -i "s/(certificate_fingerprint:).*/\1 $(openssl x509 -fingerprint -in $CERTIFICATE_CRT_PATH | grep -i "fingerprint" | sed -r 's/^.*=//' | tr ':' ' ')/" $CONFIG_FILE
echo 'Server certificate successfully generated!'
}
function parseYaml {
local prefix=$2
local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034')
sed -ne "s|^\($s\):|\1|" \
-e "s|^\($s\)\($w\)$s:$s[\"']\(.*\)[\"']$s\$|\1$fs\2$fs\3|p" \
-e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 |
awk -F$fs '{
indent = length($1)/2;
vname[indent] = $2;
for (i in vname) {if (i > indent) {delete vname[i]}}
if (length($3) > 0) {
vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
printf("%s%s%s=\"%s\"\n", "'$prefix'",vn, $2, $3);
}
}'
}
function buildApkInDockerContainer {
local isInitialBuild=0
for arg in "$@"
do
if [ "$arg" == "--initial-build" ]
then
isInitialBuild=1
fi
done
if [ "$isInitialBuild" -eq 0 ]
then
cordova --no-telemetry clean
fi
APK_FILE=$(cordova --no-telemetry build android | egrep -i "/camera/platforms/android")
if [ -z "$APK_FILE" ]
then
echo "Can not build .apk file. Abort!"
exit 1
fi
if [ "$isInitialBuild" -eq 0 ]
then
#copy file to current directory
cp $APK_FILE /camera-apk/camera-client-android.apk
chmod 666 /camera-apk/camera-client-android.apk
echo 'Android client "camera-client-android.apk" successfully created!'
fi
}
# execute
main "$@"
|
alex-dwt/RemoteVideoWatcher
|
camera-utility.sh
|
Shell
|
mit
| 19,529 |
#!/bin/bash
#
cd ../bin
./deploymap.sh MapTest-Creative
cd -
|
sdhunt/McQuad
|
tools/src/dev/runmap.sh
|
Shell
|
mit
| 62 |
#!/usr/bin/env bash
# All necessary OpenCV dependencies:
sudo apt-get -y install libavcodec-dev libavformat-dev libswscale-dev libx264-dev libv4l-dev
sudo apt-get -y install libgtk2.0-dev
sudo apt-get -y install libatlas-base-dev gfortran
|
kkshmz/dotfiles
|
raspi/install-opencv.sh
|
Shell
|
mit
| 240 |
########################################################################################
## ##
## Automatically download and extract all the SRA data. This self contained ##
## bash script, should (!!) download a new version of the data and figure out ##
## which are metagenomes. ##
## ##
## Note that this is designed to run on my cluster that uses SGE for job submission ##
## if you run it elsewhere you will need to change the qsub part most likely. ##
## ##
## (c) 2018 Rob Edwards ##
## ##
########################################################################################
HOST=`hostname`
DATE=`date +%b_%Y`
WD=$PWD
if [ -e $DATE ]; then
echo "$DATE already exists. Do we need to do anything?"
exit;
fi
mkdir $DATE
cd $DATE
# get the modification time
curl --head https://s3.amazonaws.com/starbuck1/sradb/SRAmetadb.sqlite.gz > timestamp.txt
grep Last-Modified timestamp.txt | sed -e 's/Last-Modified: //' > ~/GitHubs/partie/SRA_Update_Time
curl -Lo SRA_Accessions.tab ftp://ftp.ncbi.nlm.nih.gov/sra/reports/Metadata/SRA_Accessions.tab
# Download one of the SRA SQLite databases:
echo "Downloading and extracting the new SQL Lite data base"
curl -Lo SRAmetadb.sqlite.gz "https://s3.amazonaws.com/starbuck1/sradb/SRAmetadb.sqlite.gz"
if [ ! -e SRAmetadb.sqlite.gz ]; then
echo "ERROR: No database was downloaded"
exit -1
fi
echo "Uncompressing the gzip file";
gunzip SRAmetadb.sqlite.gz
# Get all the possible SRA metagenome samples from the SQL lite table. This is described at https://edwards.sdsu.edu/research/sra-metagenomes/
echo "Running the SQLite commands";
sqlite3 SRAmetadb.sqlite 'select run_accession from run where experiment_accession in (select experiment_accession from experiment where (experiment.library_strategy = "AMPLICON" or experiment.library_selection = "PCR"))' > amplicons.ids
sqlite3 SRAmetadb.sqlite 'select run_accession from run where experiment_accession in (select experiment_accession from experiment where experiment.library_source = "METAGENOMIC")' > source_metagenomic.ids
sqlite3 SRAmetadb.sqlite 'select run_accession from run where experiment_accession in (select experiment_accession from experiment where experiment.study_accession in (select study_accession from study where study_type = "Metagenomics"));' > study_metagenomics.ids
sqlite3 SRAmetadb.sqlite 'select run_accession from run where experiment_accession in (select experiment_accession from experiment where experiment.sample_accession in (select sample.sample_accession from sample where (sample.scientific_name like "%microbiom%" OR sample.scientific_name like "%metagenom%")))' > sci_name_metagenome.ids
grep -F -x -v -f amplicons.ids source_metagenomic.ids > source_metagenomic.notamplicons.ids
grep -F -x -v -f amplicons.ids study_metagenomics.ids > study_metagenomics.notamplicons.ids
grep -F -x -v -f amplicons.ids sci_name_metagenome.ids > sci_name_metagenome.notamplicons.ids
sort -u sci_name_metagenome.notamplicons.ids source_metagenomic.notamplicons.ids study_metagenomics.notamplicons.ids > SRA-metagenomes.txt
# look at the previously downloaded metagenomes
echo "Figuring out the new metagenomes to download"
cut -f 1 ~/GitHubs/partie/SRA_Metagenome_Types.tsv | grep -Fxvf - SRA-metagenomes.txt > SRA-metagenomes-ToDownload.txt
# now set up a cluster job to parse out some data
mkdir partie
cp SRA-metagenomes-ToDownload.txt partie/
cd partie
# how many jobs do we have?
COUNT=$(wc -l SRA-metagenomes-ToDownload.txt | awk '{print $1}')
# Note that I added zotkill.pl here to reduce a problem where sge was overwriting itself.
# See http://moo.nac.uci.edu/~hjm/zotkill.pl for more information about zotkill.pl
echo -e "SRA=\$(head -n \$SGE_TASK_ID SRA-metagenomes-ToDownload.txt | tail -n 1);\nperl \$HOME/partie/partie.pl -noheader \${SRA}.sra | $HOME/bin/zotkill.pl partie.out;" > partie.sh
# and submit a few jobs to the queue to test
# NOTE:
# Do not make the outputs a directory!!
# If you use a file for STDERR/STDOUT then you don't need to concatenate the outputs in the next step
# this deals with an error in the BASH_FUNC_module
unset module
if [ $HOST == "anthill" ]; then
# we can submit directly
BEGIN=1; END=$((BEGIN+74999))
while [[ $END -lt $COUNT ]]; do
echo $BEGIN $END;
qsub -V -cwd -t $BEGIN-$END:1 -o sge_out -e sge_err ./partie.sh
echo "Submitting a partie job for sequences $BEGIN to $END"
BEGIN=$((END+1));
END=$((BEGIN+74999));
done;
echo "Submitting a partie job for sequences $BEGIN to $END"
qsub -V -cwd -t $BEGIN-$END:1 -o sge_out -e sge_err ./partie.sh
else
# submit via ssh
WD=$PWD
echo "Running the partie command on anthill"
ssh anthill "unset func; cd $WD; qsub -V -cwd -t 1-$COUNT:1 -o sge_out -e sge_err ./partie.sh"
fi
# get the sizes of the metagenomes
IDX=0;
while [ $IDX -lt $COUNT ]; do
IDX=$((IDX+250));
echo "Getting the sizes of the metagenomes upto number $IDX of $COUNT";
head -n $IDX SRA-metagenomes-ToDownload.txt | tail -n 250 > temp;
epost -db sra -input temp -format acc | esummary -format runinfo -mode xml | xtract -pattern Row -element Run,bases,spots,spots_with_mates,avgLength,size_MB,ReleaseDate >> SRA_Metagenome_Sizes.tsv;
done
rm -f temp
sort -u SRA_Metagenome_Sizes.tsv >> ~/GitHubs/partie/SRA_Metagenome_Sizes.tsv
echo "We have submitted the PARTIE jobs to the cluster, and you need to let them run."
echo "Once they are run (which doesn't take that long), you should be able to use the script:"
echo "partie_parse_output.sh"
echo "to finalize the data and add everything to GitHub"
|
linsalrob/partie
|
scripts/partie_download.sh
|
Shell
|
mit
| 6,015 |
#!/bin/bash
# --- First, run the program
python3 {{cookiecutter.project}}.py
# --- Check to see if there was an error
# make sure we redirect of the error message
logFile=$(ls logs/*.log 2> /dev/null | sort | tail -n1)
SHOW_ALL_LOG="no"
SHOW_TIME="no"
while getopts "at" option; do
case "${option}" in
a) SHOW_ALL_LOG="yes" ;;
t) SHOW_TIME="yes" ;;
esac
done
# Turn this on if necessary
if [ "$SHOW_ALL_LOG" = 'yes' ]
then
echo "The entire log file:"
cat $logFile
fi
# Find the timming information
if [ "$SHOW_TIME" = 'yes' ]
then
echo "Timming information:"
cat $logFile | grep 'seconds'
fi
# Print the errors in the log files ...
if [ -f "$logFile" ]; then
# Print the errors
echo "Errors:"
cat $logFile | grep 'ERROR'
fi
exit 0 # Prevent an error call in the Makefile
|
sankhaMukherjee/myCutter
|
{{cookiecutter.project}}/bin/run.sh
|
Shell
|
mit
| 838 |
sudo docker run -d --name logtastic logtastic
|
Burmudar/docker-logtastic
|
run.sh
|
Shell
|
mit
| 46 |
#!/usr/bin/bash
merge () {
local ext=$1
shift
for found in `locate $ext`;do
orig=${found%.${ext}}
if [ $# -gt 0 ];then
$1 $orig $found
rm -i $found
else
echo $orig '<->' $found
fi
done
}
merge pacnew "$@"
|
jokester/dotted
|
arch/merge_pacfiles.sh
|
Shell
|
mit
| 248 |
#!/usr/bin/env bash
if [[ $SL_BUILD_NATIVE == "false" ]]; then
echo "Skipping the native image build because SL_BUILD_NATIVE is set to false."
exit 0
fi
"$JAVA_HOME"/bin/native-image \
--macro:truffle --no-fallback --initialize-at-build-time \
-cp ../language/target/simplelanguage.jar:../launcher/target/launcher-19.2.0-SNAPSHOT.jar \
com.oracle.truffle.sl.launcher.SLMain \
slnative
|
m50d/amalie
|
native/make_native.sh
|
Shell
|
mit
| 409 |
#!/usr/bin/env bash
#script name : csvrename.sh
#title : Batch Renaming Using CSV File Look Up
#description : This bash script can be used to look up file names
# in a CSV file of root directory and rename them to
# the new name they are assigned in the CSV table.
#author : Srishti Belwariar
#date : 07/13/2017
#usage : ./csvrename.sh [CSVFile]
#notes :
# This script is useful for batch renaming files in a root directory at
# a max depth of 0. This script takes the table stored in the CSV file
# (which is located in the root directory as well) and looks up each
# root directory file in the CSV file and changes their names to the
# new names provided. This script requires the user to have a CSV
# file inside the root directory containing the files that will be renamed.
# The CSV file should be formated so that the first column (without a header)
# contains the current file names while the second column (without header)
# contains the new names. This script will echo the number of file
# renames done.
#
# valid command, if the csv file was real:
# $ ./csvrename.sh fileNameChanges.csv
#
#bash_version : 3.2.57(1)-release
#============================================================================
CSV=$1
original=()
tN=()
new=()
counter=0
#Check if CSV file is valid
if [ ! -f $1 ];
then
echo "CSV file does not exist"
exit
fi
#set column 1 to original array
while read line;
do
original+=($(echo $line | cut -d, -f1))
done < "$1"
#set column 2 to another array
while read line;
do
new+=($(echo $line | cut -d, -f2))
done < "$1"
#check if oL and nL are equal
oL=${#original[@]}
nL=${#new[@]}
if [ $oL -ne $nL ]
then
echo "CSV file does not contain equal entries in both columns"
exit
fi
#check if all oL files are valid files.
fakes=0
for((i=0; i<$oL; i++))
do
if [ ! -f ${oL[i]} ]
then
fakes=$fakes+1
fi
done
if [ $fakes -ne 0 ]
then
echo "some file names do not exist in root directory}
exit
fi
#set inode numbers as transition name tN array
for((i=0; i<$oL; i++))
do
SerialNumber=($(ls -i ${original[i]} | awk '{print $1}'))
extension=($(echo ${original[i]} | cut -d'.' -f2))
transN=$SerialNumber.$extension
tN+=$transN
done
#conduct name swaps using 2 pass system
for((i=0; i<$oL; i++))
do
mv ${original[i]} ${tN[i]}
original[i]=${tN[i]}
done
for((i=0; i<$oL; i++))
do
mv ${original[i]} ${new[i]}
counter=$counter+1
done
#echo some results
echo "Renamed $counter files."
|
srishtibelwariar/BashScripts
|
csvrename.sh
|
Shell
|
mit
| 2,498 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2449-1
#
# Security announcement date: 2014-12-22 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:09 UTC
#
# Operating System: Ubuntu 14.10
# Architecture: i686
#
# Vulnerable packages fix on version:
# - ntp:1:4.2.6.p5+dfsg-3ubuntu2.14.10.1
#
# Last versions recommanded by security team:
# - ntp:1:4.2.6.p5+dfsg-3ubuntu2.14.10.3
#
# CVE List:
# - CVE-2014-9293
# - CVE-2014-9294
# - CVE-2014-9295
# - CVE-2014-9296
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade ntp=1:4.2.6.p5+dfsg-3ubuntu2.14.10.3 -y
|
Cyberwatch/cbw-security-fixes
|
Ubuntu_14.10/i686/2014/USN-2449-1.sh
|
Shell
|
mit
| 709 |
#!/usr/bin/env bash
apt-get update
# install packages
apt-get install git -y
apt-get install nginx -y
apt-get install software-properties-common -y
# setup hhvm
apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0x5a16e7281be7a449 -y
add-apt-repository "deb http://dl.hhvm.com/ubuntu $(lsb_release -sc) main" -y
apt-get update -y
apt-get install hhvm -y
# setup mysql
apt-get install debconf-utils -y
debconf-set-selections <<< "mysql-server mysql-server/root_password password Password"
debconf-set-selections <<< "mysql-server mysql-server/root_password_again password Password"
apt-get install mysql-server -y
# setup nginx
cp /vagrant/provision/config/nginx_vhost /etc/nginx/sites-available/nginx_vhost > /dev/null
ln -s /etc/nginx/sites-available/nginx_vhost /etc/nginx/sites-enabled/
rm -rf /etc/nginx/sites-available/default
service nginx restart
# setup mysql
echo "CREATE DATABASE nucleus" | mysql -uroot -pPassword
mysql -uroot -pPassword nucleus < /vagrant/provision/config/schema.sql
# clone nucleus
git clone https://github.com/hacktx/nucleus.git /vagrant/nucleus
# setup composer
curl -sS https://getcomposer.org/installer | php
mv composer.phar /usr/local/bin/composer
# Setup nodejs and gulp
curl -sL https://deb.nodesource.com/setup | sudo bash -
sudo apt-get install nodejs -y
npm install -g gulp
# setup nucleus
cd /vagrant/nucleus
composer install
npm install
./vendor/bin/robo build
cp /vagrant/provision/config/config.ini .
# Reboot nginx for changes to take effect
service nginx restart
|
hacktx/nucleus-vagrant
|
provision/setup.sh
|
Shell
|
mit
| 1,536 |
#! /bin/sh
#
# Copyright (c) 2010, 2013 Oracle and/or its affiliates. All rights reserved.
#
# Display environment's deadlocks based on "db_stat -Co" output.
t1=__a
t2=__b
trap 'rm -f $t1 $t2; exit 0' 0 1 2 3 13 15
if [ $# -ne 1 ]; then
echo "Usage: dd.sh [db_stat -Co output]"
exit 1
fi
if `egrep '\<WAIT\>.*\<page\>' $1 > /dev/null`; then
n=`egrep '\<WAIT\>.*\<page\>' $1 | wc -l | awk '{print $1}'`
echo "dd.sh: $1: $n page locks in a WAIT state."
else
echo "dd.sh: $1: No page locks in a WAIT state found."
exit 1
fi
# Print out list of node wait states, and output cycles in the graph.
egrep '\<WAIT\>.*\<page\>' $1 | awk '{print $1 " " $5 " " $7}' |
while read l f p; do
for i in `egrep "\<HELD\>.*\<$f\>.*\<page\>.*\<$p\>" $1 |
awk '{print $1}'`; do
echo "$l $i"
done
done | tsort > /dev/null 2>$t1
# Display the locks in a single cycle.
c=1
display_one() {
if [ -s $1 ]; then
echo "Deadlock #$c ============"
c=`expr $c + 1`
cat $1 | sort -n +6
:> $1
fi
}
# Display the locks in all of the cycles.
#
# Requires tsort output some text before each list of nodes in the cycle,
# and the actual node displayed on the line be the second (white-space)
# separated item on the line. For example:
#
# tsort: cycle in data
# tsort: 8000177f
# tsort: 80001792
# tsort: 80001774
# tsort: cycle in data
# tsort: 80001776
# tsort: 80001793
# tsort: cycle in data
# tsort: 8000176a
# tsort: 8000178a
#
# XXX
# Currently, db_stat doesn't display the implicit wait relationship between
# parent and child transactions, where the parent won't release a lock until
# the child commits/aborts. This means the deadlock where parent holds a
# lock, thread A waits on parent, child waits on thread A won't be shown.
if [ -s $t1 ]; then
:>$t2
while read a b; do
case $b in
[0-9]*)
egrep $b $1 >> $t2;;
*)
display_one $t2;;
esac
done < $t1
display_one $t2
else
echo 'No deadlocks found.'
fi
exit 0
|
iadix/iadixcoin
|
db-5.3.28.NC/util/db_stat/dd.sh
|
Shell
|
mit
| 1,937 |
#!/usr/bin/env bash
###############################################################################
# General #
###############################################################################
# Set appearance
# Blue : 1
# Graphite : 6
defaults write NSGlobalDomain AppleAquaColorVariant -int 1
# Highlight color
# Graphite : `0.780400 0.815700 0.858800`
# Silver : `0.776500 0.776500 0.776500`
# Blue : `0.709800 0.835300 1.000000`
defaults write NSGlobalDomain AppleHighlightColor -string '0.709800 0.835300 1.000000'
# Use Dark menu bar and Dock
# defaults write NSGlobalDomain AppleInterfaceStyle -string "Dark"
# Automatically hide and show the menu bar
# defaults write NSGlobalDomain "_HIHideMenuBar" -bool true
# Sidebar icon size
# Small : 1
# Medium : 2
# Large : 3
defaults write NSGlobalDomain NSTableViewDefaultSizeMode -int 2
# Scroll bar visibility
# Possible values: `WhenScrolling`, `Automatic` and `Always`
defaults write NSGlobalDomain AppleShowScrollBars -string "Automatic"
# Smooth scrolling
# Disable on older Macs
#defaults write NSGlobalDomain NSScrollAnimationEnabled -bool false
# Set number of recent items (Applications, Document, Servers)
# PlistBuddy approach appears broken:
# RecentApplications, RecentDocuments, RecentServers
#/usr/libexec/PlistBuddy -x -c "Set :RecentApplications:MaxAmount 0" ~/Library/Preferences/com.apple.recentitems.plist
for category in 'applications' 'documents' 'servers'; do
/usr/bin/osascript -e "tell application \"System Events\" to tell appearance preferences to set recent $category limit to 10"
done
# Allow Handoff between this Mac and your iCloud devices
defaults write ~/Library/Preferences/ByHost/com.apple.coreservices.useractivityd ActivityAdvertisingAllowed -bool true
defaults write ~/Library/Preferences/ByHost/com.apple.coreservices.useractivityd ActivityReceivingAllowed -bool true
# Disable Auto Save, Versions and Resume
# defaults write -g ApplePersistence -bool false
|
semenovDL/dotfiles
|
system/general.sh
|
Shell
|
mit
| 2,043 |
#!/bin/bash
export NODE_ENV="production"
export MONGO_DB_URL="{{pnews_poller_mongo_url}}"
cd "{{pnews_poller_dir}}"
/usr/bin/node app.js --source hackernews
|
jorisroovers/pnews
|
deploy/roles/pnews-poller/templates/pnews-poller-hackernews.sh
|
Shell
|
mit
| 159 |
#!/usr/bin/env bash
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
UNAME="$(uname)"
source "$CURRENT_DIR/helpers.sh"
source "$CURRENT_DIR/variables.sh"
is_tmux_automatic_start_enabled() {
local auto_start_value="$(get_tmux_option "$auto_start_option" "$auto_start_default")"
[ "$auto_start_value" == "on" ]
}
is_osx() {
[ $UNAME == "Darwin" ]
}
is_linux() {
[ $UNAME == "Linux" ]
}
main() {
if is_tmux_automatic_start_enabled; then
if is_osx; then
"$CURRENT_DIR/handle_tmux_automatic_start/osx_enable.sh"
elif is_linux; then
"$CURRENT_DIR/handle_tmux_automatic_start/linux_enable.sh"
fi
else
if is_osx; then
"$CURRENT_DIR/handle_tmux_automatic_start/osx_disable.sh"
elif is_linux; then
"$CURRENT_DIR/handle_tmux_automatic_start/linux_disable.sh"
fi
fi
}
main
|
insanepaul/tmux-continuum
|
scripts/handle_tmux_automatic_start.sh
|
Shell
|
mit
| 813 |
#!/usr/bin/env bash
set -e
for file in test/*.json; do
if [[ $file == "test/context.json" || $file == "test/event.json" ]] ; then
continue;
fi
node-lambda run -x test/context.json -j $file
done
|
KangarooBox/lambda-cloudwatch-event-msteams
|
test/all.sh
|
Shell
|
mit
| 205 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2011:1455
#
# Security announcement date: 2011-11-16 23:12:24 UTC
# Script generation date: 2017-01-01 21:13:31 UTC
#
# Operating System: Red Hat 5
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - freetype.i386:2.2.1-28.el5_7.2
# - freetype-debuginfo.i386:2.2.1-28.el5_7.2
# - freetype.x86_64:2.2.1-28.el5_7.2
# - freetype-debuginfo.x86_64:2.2.1-28.el5_7.2
# - freetype-devel.i386:2.2.1-28.el5_7.2
# - freetype-demos.x86_64:2.2.1-28.el5_7.2
# - freetype-devel.x86_64:2.2.1-28.el5_7.2
#
# Last versions recommanded by security team:
# - freetype.i386:2.2.1-32.el5_9.1
# - freetype-debuginfo.i386:2.2.1-32.el5_9.1
# - freetype.x86_64:2.2.1-32.el5_9.1
# - freetype-debuginfo.x86_64:2.2.1-32.el5_9.1
# - freetype-devel.i386:2.2.1-32.el5_9.1
# - freetype-demos.x86_64:2.2.1-32.el5_9.1
# - freetype-devel.x86_64:2.2.1-32.el5_9.1
#
# CVE List:
# - CVE-2011-3439
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install freetype.i386-2.2.1 -y
sudo yum install freetype-debuginfo.i386-2.2.1 -y
sudo yum install freetype.x86_64-2.2.1 -y
sudo yum install freetype-debuginfo.x86_64-2.2.1 -y
sudo yum install freetype-devel.i386-2.2.1 -y
sudo yum install freetype-demos.x86_64-2.2.1 -y
sudo yum install freetype-devel.x86_64-2.2.1 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_5/x86_64/2011/RHSA-2011:1455.sh
|
Shell
|
mit
| 1,433 |
#!/bin/bash
# pip install virtualenv
virtualenv --python=python hhsmartmirror
source ./hhsmartmirror/bin/activate
pip install -r requirements.txt
pip install --global-option='build_ext' --global-option='-I /usr/local/include' --global-option='-L /usr/local/lib' pyaudio
|
ymnoor21/AI-Smart-Mirror
|
setup.sh
|
Shell
|
mit
| 271 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Alamofire/Alamofire.framework"
install_framework "$BUILT_PRODUCTS_DIR/DGElasticPullToRefresh/DGElasticPullToRefresh.framework"
install_framework "$BUILT_PRODUCTS_DIR/FillableLoaders/FillableLoaders.framework"
install_framework "$BUILT_PRODUCTS_DIR/Kingfisher/Kingfisher.framework"
install_framework "$BUILT_PRODUCTS_DIR/MBProgressHUD/MBProgressHUD.framework"
install_framework "$BUILT_PRODUCTS_DIR/Moya/Moya.framework"
install_framework "$BUILT_PRODUCTS_DIR/ReachabilitySwift/ReachabilitySwift.framework"
install_framework "$BUILT_PRODUCTS_DIR/Result/Result.framework"
install_framework "$BUILT_PRODUCTS_DIR/RxSwift/RxSwift.framework"
install_framework "$BUILT_PRODUCTS_DIR/SQLite.swift/SQLite.framework"
install_framework "$BUILT_PRODUCTS_DIR/SnapKit/SnapKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/StarWars/StarWars.framework"
install_framework "$BUILT_PRODUCTS_DIR/SwiftyJSON/SwiftyJSON.framework"
install_framework "$BUILT_PRODUCTS_DIR/SwiftyUtils/SwiftyUtils.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "$BUILT_PRODUCTS_DIR/Alamofire/Alamofire.framework"
install_framework "$BUILT_PRODUCTS_DIR/DGElasticPullToRefresh/DGElasticPullToRefresh.framework"
install_framework "$BUILT_PRODUCTS_DIR/FillableLoaders/FillableLoaders.framework"
install_framework "$BUILT_PRODUCTS_DIR/Kingfisher/Kingfisher.framework"
install_framework "$BUILT_PRODUCTS_DIR/MBProgressHUD/MBProgressHUD.framework"
install_framework "$BUILT_PRODUCTS_DIR/Moya/Moya.framework"
install_framework "$BUILT_PRODUCTS_DIR/ReachabilitySwift/ReachabilitySwift.framework"
install_framework "$BUILT_PRODUCTS_DIR/Result/Result.framework"
install_framework "$BUILT_PRODUCTS_DIR/RxSwift/RxSwift.framework"
install_framework "$BUILT_PRODUCTS_DIR/SQLite.swift/SQLite.framework"
install_framework "$BUILT_PRODUCTS_DIR/SnapKit/SnapKit.framework"
install_framework "$BUILT_PRODUCTS_DIR/StarWars/StarWars.framework"
install_framework "$BUILT_PRODUCTS_DIR/SwiftyJSON/SwiftyJSON.framework"
install_framework "$BUILT_PRODUCTS_DIR/SwiftyUtils/SwiftyUtils.framework"
fi
|
HarrisLee/SwiftBook
|
SmartHomeI/Pods/Target Support Files/Pods-SmartHomeI/Pods-SmartHomeI-frameworks.sh
|
Shell
|
mit
| 5,573 |
# -*- shell-script -*-
# hist.sh - Bourne Again Shell Debugger history routines
#
# Copyright (C) 2002-2003, 2006-2008, 2011, 2015 Rocky Bernstein
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with This program; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place, Suite 330, Boston,
# MA 02111 USA.
typeset -i _Dbg_hi_last_stop=0
typeset -i _Dbg_hi=0 # Current next history entry to store into.
typeset -a _Dbg_history; _Dbg_history=()
typeset -i _Dbg_set_history=1
typeset -i _Dbg_history_length=${HISTSIZE:-256} # gdb's default value
typeset _Dbg_histfile=${HOME:-.}/.bashdb_hist
# Set to rerun history item, or print history if command is of the form
# !n:p. If command is "history" then $1 is number of history item.
# the history command index to run is returned or $_Dbg_hi if
# there's nothing to run.
# Return value in $history_num
_Dbg_history_parse() {
history_num=$1
((history_num < 0)) && ((history_num=${#_Dbg_history[@]}-1+$1))
_Dbg_hi=${#_Dbg_history[@]}
[[ -z $history_num ]] && let history_num=$_Dbg_hi-1
if [[ $_Dbg_cmd == h* ]] ; then
if [[ $history_num != $int_pat ]] ; then
if [[ $history_num == -$int_pat ]] ; then
history_num=$_Dbg_hi+$history_num
else
_Dbg_errmsg "Invalid history number skipped: $history_num"
history_num=-1
fi
fi
else
# Handle ! form. May need to parse number out number and modifier
# case $_Dbg_cmd in
# \!\-${int_pat}:p )
# typeset -a word1
# word1=($(_Dbg_split '!' $_Dbg_cmd))
# local -a word2
# word2=($(_Dbg_split ':' ${word1[0]}))
# typeset -i num=_Dbg_hi+${word2[0]}
# _Dbg_do_history_list $num $num
# history_num=-1
# ;;
# [!]${int_pat}:p )
# local -a word1
# word1=($(_Dbg_split '!' $_Dbg_cmd))
# local -a word2
# word2=($(_Dbg_split ':' ${word1[0]}))
# _Dbg_do_history_list ${word2[0]} ${word2[0]}
# history_num=-1
# ;;
# \!\-$int_pat )
# local -a word
# word=($(_Dbg_split '!' $_Dbg_cmd))
# history_num=$_Dbg_hi+${word[0]}
# ;;
# \!$int_pat )
# local -a word
# word=($(_Dbg_split '!' $_Dbg_cmd))
# history_num=${word[0]}
# ;;
# '!' )
# if [[ $history_num != $int_pat ]] ; then
# if [[ $history_num == -$int_pat ]] ; then
# history_num=$_Dbg_hi+$history_num
# else
# _Dbg_msg "Invalid history number skipped: $history_num"
# history_num=-1
# fi
# fi
# ;;
# * )
# _Dbg_errmsg "Invalid history number skipped: $_Dbg_cmd"
# history_num=-1
# esac
:
fi
}
_Dbg_history_read() {
if [[ -r $_Dbg_histfile ]] ; then
history -r $_Dbg_histfile
typeset -a last_history; last_history=($(history 1))
typeset -i max_history=${last_history[0]}
if (( max_history > _Dbg_history_length )) ; then
max_history=$_Dbg_history_length
fi
local OLD_HISTTIMEFORMAT=${HISTTIMEFORMAT}
local hist
HISTTIMEFORMAT=''
local -i i
for (( i=1; (( i <= max_history )) ; i++ )) ; do
last_history=($(history $i))
hist=${last_history}[1]
# _Dbg_history[$i]=$hist
done
HISTTIMEFORMAT=${OLD_HISTTIMEFORMAT}
fi
}
# Save history file
_Dbg_history_write() {
(( _Dbg_history_length > 0 && _Dbg_set_history)) \
&& history -w $_Dbg_histfile
}
# Remove the last command from the history list.
_Dbg_history_remove_item() {
_Dbg_hi=${#_Dbg_history[@]}-1
unset _Dbg_history[$_Dbg_hi]
}
# _Dbg_history_read
|
rogalmic/vscode-bash-debug
|
bashdb_dir/lib/hist.sh
|
Shell
|
mit
| 4,098 |
# .functions
# vim:syntax=sh
#####################
# Utility functions #
#####################
# Upgrade mac-dots
function upgrade_macdots {
/usr/bin/env MACDOTSreload=$MACDOTS zsh $MACDOTS/bin/upgrade
}
# Log into a vzaar server
function vssh_login {
ssh -i ~/.ssh/vzaar.pem vzaar@$1 ;
}
# Rebuild Finder "Open with..." menu
# http://osxdaily.com/2013/01/22/fix-open-with-menu-mac-os-x/
function rebuild_finder_menu {
/System/Library/Frameworks/CoreServices.framework/Versions/A/Frameworks/LaunchServices.framework/Versions/A/Support/lsregister -kill -r -domain local -domain user; killall Finder; echo "Open With has been rebuilt, Finder will relaunch"
}
#################
# Git functions #
#################
# Rebase
function rebase_with {
git pull --rebase origin $1 ;
}
# Deploy to staging from current branch
function deploy_to_staging {
bundle exec cap staging deploy BRANCH=`current_branch` ;
}
# Push current branch to remote
function push_current_branch_to_remote {
git push -u origin `current_branch`
}
# Delete current branch on origin
function delete_current_branch_from_remote {
echo "Delete remote '`current_branch`' branch? (Y to confirm) \c"
read line
if [ "$line" = Y ]
then
git push origin :`current_branch`
fi
}
# Re-create current branch on remote
function recreate_current_branch_on_remote {
echo "Re-create remote '`current_branch`' branch? (Y to confirm) \c"
read line
if [ "$line" = Y ]
then
git push origin :`current_branch`
push_current_branch_to_remote
fi
}
|
alol/mac-dots
|
zsh/functions.zsh
|
Shell
|
mit
| 1,540 |
#!/bin/sh
set -e
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --reference-external-strings-file --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1"`.mom\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm\""
xcrun mapc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm"
;;
*.xcassets)
;;
/*)
echo "$1"
echo "$1" >> "$RESOURCES_TO_COPY"
;;
*)
echo "${PODS_ROOT}/$1"
echo "${PODS_ROOT}/$1" >> "$RESOURCES_TO_COPY"
;;
esac
}
install_resource "${BUILT_PRODUCTS_DIR}/TMCoreDataStack.bundle"
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]]; then
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ `find . -name '*.xcassets' | wc -l` -ne 0 ]
then
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
find "${PWD}" -name "*.xcassets" -print0 | xargs -0 actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${IPHONEOS_DEPLOYMENT_TARGET}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
tonymillion/TMCoreDataStack
|
Example/Pods/Target Support Files/Pods-TMCoreDataStack/Pods-TMCoreDataStack-resources.sh
|
Shell
|
mit
| 4,026 |
#!/bin/sh
DISTADDR="127.0.0.1"
. /etc/default/cuckoo
sudo service uwsgi start cuckoo-distributed
sudo service nginx start
sudo start cuckoo-distributed-instance INSTANCE=dist.status
sudo start cuckoo-distributed-instance INSTANCE=dist.scheduler
for worker in $(curl -s "$DISTADDR:9003/api/node?mode=workers"); do
sudo start cuckoo-distributed-instance "INSTANCE=$worker"
done
|
mburakergenc/Malware-Detection-using-Machine-Learning
|
cuckoo/utils/start-distributed.sh
|
Shell
|
mit
| 384 |
#! /usr/bin/env zsh
ls | while read s; do nkf -w $s | grep "きく" > /dev/null 2>&1 && echo $s; done
|
kmhjs/shell_gei
|
src/13/q1.zsh
|
Shell
|
mit
| 102 |
#!/usr/bin/env bash
##
# This script builds the extension for the Chrome browser.
#
##
echo 'Building Web Search Navigator for Chrome'
# copy the sources into the working directory
BIN=build/chrome
OBJ="$BIN/obj"
echo 'Copying files...'
# cleanup the previous build
rm -rf "$OBJ"
mkdir -p "$OBJ"
cp -R src/* "$OBJ"
echo 'Creating package...'
zip -FSj "$BIN/package.zip" $OBJ/*
echo 'Build complete'
|
infokiller/google-search-navigator
|
tools/make-chrome.sh
|
Shell
|
mit
| 405 |
set -o errexit
set -o nounset
export REGISTRY=quay.io/munnerz/
docker login -e="${QUAY_EMAIL}" -u "${QUAY_USERNAME}" -p "${QUAY_PASSWORD}" quay.io
if [ "${TRAVIS_TAG}" = "" ]; then
echo "Pushing images with sha tag."
make push
else
echo "Pushing images with release tag."
make push MUTABLE_TAG=latest VERSION="${TRAVIS_TAG}"
fi
|
munnerz/keepalived-cloud-provider
|
build/deploy.sh
|
Shell
|
mit
| 347 |
#!/usr/bin/env bash
NODE_ENV=test yarn build
|
WillowDeploy/willow-deploy
|
build-test.sh
|
Shell
|
mit
| 45 |
#!/bin/bash
# This is a naive implementation of the Porter's stemming algorithm in unix
if [ $# -eq 0 ]
then
echo 'No argument passed. Command format: stemming.sh foo.txt'
else
tr -sc 'A-Za-z' '\n' < $1 | tr 'A-Z' 'a-z' | grep '[aeiou].*ing$' | sort | uniq -c | sort -n -r | less
fi
|
Elixeus/Snippets
|
stemming.sh
|
Shell
|
cc0-1.0
| 291 |
#!/bin/bash
#
# OpenStack Installation script
#
# copyright 2017 kasidit chanchio, vasabilab,
# http://vasabilab.cs.tu.ac.th
# Department of Computer Science,
# Faculty of Science and Technology, Thammasat University
#
. ./install-paramrc.sh
#
export ORIINSTALL_TYPE=vasabi-1234install_type4321-ibasav
export ORINETWORK_TYPE=vasabi-1234network_type4321-ibasav
export ORILOGINNAME=vasabi-1234loginname4321-ibasav
export ORILOGINPASS=vasabi-1234loginpassword4321-ibasav
export ORITIMEZONE=vasabi-1234timezone4321-ibasav
#
export ORINTP_SERVER0=vasabi-1234-ntp-pool-server0-4321-ibasav
export ORINTP_SERVER1=vasabi-1234-ntp-pool-server1-4321-ibasav
export ORINTP_SERVER2=vasabi-1234-ntp-pool-server2-4321-ibasav
export ORINTP_SERVER3=vasabi-1234-ntp-pool-server3-4321-ibasav
export ORINTP_SERVER_LOCAL=vasabi-1234-ntp-local-org-server-4321-ibasav
#
export ORIHYPERVISOR=vasabi-1234hypervisor4321-ibasav
export ORIINIT_IMAGE_LOCATION=vasabi-1234init_image_location4321-ibasav
export ORIINIT_IMAGE_NAME=vasabi-1234init_image_name4321-ibasav
export ORIOPS_MYSQL_PASS=vasabilabMYSQL_PASS
export ORIDEMO_PASS=vasabilabDEMO_PASS
export ORIADMIN_PASS=vasabilabADMIN_PASS
export ORIDOMAINNAME=vasabi-1234domainname4321-ibasav
export ORIGATEWAY_IP=vasabi-1234gateway_ip4321-ibasav
export ORICONTROLLER_IP=vasabi-1234controller_ip4321-ibasav
export ORINETWORK_IP=vasabi-1234network_ip4321-ibasav
export ORICOMPUTE_IP=vasabi-1234compute_ip4321-ibasav
export ORICOMPUTE1_IP=vasabi-1234compute1_ip4321-ibasav
export ORIEXTERNAL_CIDR=vasabi-1234external_cidr4321-ibasav
export ORIMANAGEMENT_NETWORK=vasabi-1234management_network4321-ibasav
export ORISTART_FLOATING_IP=vasabi-1234start_floating_ip4321-ibasav
export ORIEND_FLOATING_IP=vasabi-1234end_floating_ip4321-ibasav
export ORIGATEWAY_IP=vasabi-1234gateway_ip4321-ibasav
export ORIMANAGEMENT_BROADCAST_ADDRESS=vasabi-1234broadcast_address4321-ibasav
export ORIDATA_TUNNEL_NETWORK_NODE_IP=vasabi-1234data_tunnel_network_node_ip4321-ibasav
export ORIDATA_TUNNEL_COMPUTE_NODE_IP=vasabi-1234data_tunnel_compute_node_ip4321-ibasav
export ORIDATA_TUNNEL_COMPUTE1_NODE_IP=vasabi-1234data_tunnel_compute1_node_ip4321-ibasav
export ORIDATA_TUNNEL_NETWORK_ADDRESS=vasabi-1234data_tunnel_network_address4321-ibasav
export ORILOCAL_REPO=vasabi-1234local_repo4321-ibasav
export ORILOCAL_SECURITY_REPO=vasabi-1234local_security_repo4321-ibasav
export ORIMANAGEMENT_NETWORK_NETMASK=vasabi-1234management_network_netmask4321-ibasav
export ORIDATA_TUNNEL_NETWORK_NETMASK=vasabi-1234data_network_netmask4321-ibasav
export ORIDNS_IP=vasabi-1234dns_ip4321-ibasav
export ORIKEYSTONE_PY_URL=vasabi-1234keystone-py-url4321-ibasav
#
export ORIGATEWAY_IP_NIC=vasabi-1234gateway_ip_nic4321-ibasav
export ORICONTROLLER_IP_NIC=vasabi-1234controller_ip_nic4321-ibasav
export ORINETWORK_IP_NIC=vasabi-1234network_ip_nic4321-ibasav
export ORIDATA_TUNNEL_NETWORK_NODE_IP_NIC=vasabi-1234data_tunnel_network_node_ip_nic4321-ibasav
export ORIVLAN_NETWORK_NODE_IP_NIC=vasabi-1234vlan_network_node_ip_nic4321-ibasav
export ORIEXTERNAL_CIDR_NIC=vasabi-1234external_cidr_nic4321-ibasav
export ORICOMPUTE_IP_NIC=vasabi-1234compute_ip_nic4321-ibasav
export ORICOMPUTE1_IP_NIC=vasabi-1234compute1_ip_nic4321-ibasav
export ORIDATA_TUNNEL_COMPUTE_NODE_IP_NIC=vasabi-1234data_tunnel_compute_node_ip_nic4321-ibasav
export ORIDATA_TUNNEL_COMPUTE1_NODE_IP_NIC=vasabi-1234data_tunnel_compute1_node_ip_nic4321-ibasav
export ORIVLAN_COMPUTE_NODE_IP_NIC=vasabi-1234vlan_compute_node_ip_nic4321-ibasav
export ORIVLAN_COMPUTE1_NODE_IP_NIC=vasabi-1234vlan_compute1_node_ip_nic4321-ibasav
#
# extract the initial OPSInstaller directory
#
printf "\nExtract the initial OPSInstaller directory\nPress a key\n"
#
tar xvf OPSInstaller-init.tar
#
printf "\nAssign password values\nsubstitution\n"
#
./config.d/exe-config-passwd.sh
#
printf "\nAssign parameter values\nsubstitution\n"
#
# Script to define parameter values below
#
ETC_FILES=OPSInstaller/*/files/*
SCRIPT_FILES=OPSInstaller/*/*.sh
#
# Change INSTALL_TYPE
#
CHANGETOPIC=INSTALL_TYPE
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIINSTALL_TYPE}/${INSTALL_TYPE}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
#
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIINSTALL_TYPE}/${INSTALL_TYPE}/g" ${SCRIPT_FILES}
#
# Change NETWORK_TYPE
#
CHANGETOPIC=NETWORK_TYPE
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORINETWORK_TYPE}/${NETWORK_TYPE}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
#
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORINETWORK_TYPE}/${NETWORK_TYPE}/g" ${SCRIPT_FILES}
#
# Change GATEWAY_IP_NIC
#
CHANGETOPIC=GATEWAY_IP_NIC
#
printf "\nsubstitution\n"
#
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIGATEWAY_IP_NIC}/${GATEWAY_IP_NIC}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
#
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIGATEWAY_IP_NIC}/${GATEWAY_IP_NIC}/g" ${SCRIPT_FILES}
#
# Change CONTROLLER_IP_NIC
#
CHANGETOPIC=CONTROLLER_IP_NIC
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORICONTROLLER_IP_NIC}/${CONTROLLER_IP_NIC}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORICONTROLLER_IP_NIC}/${CONTROLLER_IP_NIC}/g" ${SCRIPT_FILES}
#
# Change NETWORK_IP_NIC
#
CHANGETOPIC=NETWORK_IP_NIC
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORINETWORK_IP_NIC}/${NETWORK_IP_NIC}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORINETWORK_IP_NIC}/${NETWORK_IP_NIC}/g" ${SCRIPT_FILES}
#
# Change DATA_TUNNEL_NETWORK_NODE_IP_NIC
#
CHANGETOPIC=DATA_TUNNEL_NETWORK_NODE_IP_NIC
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIDATA_TUNNEL_NETWORK_NODE_IP_NIC}/${DATA_TUNNEL_NETWORK_NODE_IP_NIC}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIDATA_TUNNEL_NETWORK_NODE_IP_NIC}/${DATA_TUNNEL_NETWORK_NODE_IP_NIC}/g" ${SCRIPT_FILES}
#
CHANGETOPIC=VLAN_NETWORK_NODE_IP_NIC
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIVLAN_NETWORK_NODE_IP_NIC}/${VLAN_NETWORK_NODE_IP_NIC}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIVLAN_NETWORK_NODE_IP_NIC}/${VLAN_NETWORK_NODE_IP_NIC}/g" ${SCRIPT_FILES}
#
# Change EXTERNAL_CIDR_NIC
#
CHANGETOPIC=EXTERNAL_CIDR_NIC
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIEXTERNAL_CIDR_NIC}/${EXTERNAL_CIDR_NIC}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIEXTERNAL_CIDR_NIC}/${EXTERNAL_CIDR_NIC}/g" ${SCRIPT_FILES}
#
# Change COMPUTE_IP_NIC
#
CHANGETOPIC=COMPUTE_IP_NIC
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORICOMPUTE_IP_NIC}/${COMPUTE_IP_NIC}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORICOMPUTE_IP_NIC}/${COMPUTE_IP_NIC}/g" ${SCRIPT_FILES}
#
# Change DATA_TUNNEL_COMPUTE_NODE_IP_NIC
#
CHANGETOPIC=DATA_TUNNEL_COMPUTE_NODE_IP_NIC
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIDATA_TUNNEL_COMPUTE_NODE_IP_NIC}/${DATA_TUNNEL_COMPUTE_NODE_IP_NIC}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIDATA_TUNNEL_COMPUTE_NODE_IP_NIC}/${DATA_TUNNEL_COMPUTE_NODE_IP_NIC}/g" ${SCRIPT_FILES}
#
CHANGETOPIC=VLAN_COMPUTE_NODE_IP_NIC
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIVLAN_COMPUTE_NODE_IP_NIC}/${VLAN_COMPUTE_NODE_IP_NIC}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIVLAN_COMPUTE_NODE_IP_NIC}/${VLAN_COMPUTE_NODE_IP_NIC}/g" ${SCRIPT_FILES}
#
# Change COMPUTE1_IP_NIC
#
CHANGETOPIC=COMPUTE1_IP_NIC
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORICOMPUTE1_IP_NIC}/${COMPUTE1_IP_NIC}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORICOMPUTE1_IP_NIC}/${COMPUTE1_IP_NIC}/g" ${SCRIPT_FILES}
#
# Change DATA_TUNNEL_COMPUTE1_NODE_IP_NIC
#
CHANGETOPIC=DATA_TUNNEL_COMPUTE1_NODE_IP_NIC
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIDATA_TUNNEL_COMPUTE1_NODE_IP_NIC}/${DATA_TUNNEL_COMPUTE1_NODE_IP_NIC}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIDATA_TUNNEL_COMPUTE1_NODE_IP_NIC}/${DATA_TUNNEL_COMPUTE1_NODE_IP_NIC}/g" ${SCRIPT_FILES}
#
CHANGETOPIC=VLAN_COMPUTE1_NODE_IP_NIC
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIVLAN_COMPUTE1_NODE_IP_NIC}/${VLAN_COMPUTE1_NODE_IP_NIC}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIVLAN_COMPUTE1_NODE_IP_NIC}/${VLAN_COMPUTE1_NODE_IP_NIC}/g" ${SCRIPT_FILES}
#
# Change OPS_LOGIN_NAME
#
CHANGETOPIC=OPS_LOGIN_NAME
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORILOGINNAME}/${OPS_LOGIN_NAME}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORILOGINNAME}/${OPS_LOGIN_NAME}/g" ${SCRIPT_FILES}
#
# Change OPS_LOGIN_PASS
#
CHANGETOPIC=OPS_LOGIN_PASS
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORILOGINPASS}/${OPS_LOGIN_PASS}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORILOGINPASS}/${OPS_LOGIN_PASS}/g" ${SCRIPT_FILES}
#
# Change OPS_TIMEZONE
#
CHANGETOPIC=OPS_TIMEZONE
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORITIMEZONE}/${OPS_TIMEZONE}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORITIMEZONE}/${OPS_TIMEZONE}/g" ${SCRIPT_FILES}
#
# Change NTP_SERVER0
#
CHANGETOPIC=NTP_SERVER0
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORINTP_SERVER0}/${NTP_SERVER0}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORINTP_SERVER0}/${NTP_SERVER0}/g" ${SCRIPT_FILES}
#
# Change NTP_SERVER1
#
CHANGETOPIC=NTP_SERVER1
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORINTP_SERVER1}/${NTP_SERVER1}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORINTP_SERVER1}/${NTP_SERVER1}/g" ${SCRIPT_FILES}
#
# Change NTP_SERVER2
#
CHANGETOPIC=NTP_SERVER2
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORINTP_SERVER2}/${NTP_SERVER2}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORINTP_SERVER2}/${NTP_SERVER2}/g" ${SCRIPT_FILES}
#
# Change NTP_SERVER3
#
CHANGETOPIC=NTP_SERVER3
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORINTP_SERVER3}/${NTP_SERVER3}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORINTP_SERVER3}/${NTP_SERVER3}/g" ${SCRIPT_FILES}
#
# Change NTP_SERVER_LOCAL
#
CHANGETOPIC=NTP_SERVER_LOCAL
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORINTP_SERVER_LOCAL}/${NTP_SERVER_LOCAL}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORINTP_SERVER_LOCAL}/${NTP_SERVER_LOCAL}/g" ${SCRIPT_FILES}
#
# Change HYPERVISOR
#
CHANGETOPIC=HYPERVISOR
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIHYPERVISOR}/${HYPERVISOR}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIHYPERVISOR}/${HYPERVISOR}/g" ${SCRIPT_FILES}
#
# Change INIT_IMAGE_LOCATION
#
CHANGETOPIC=INIT_IMAGE_LOCATION
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIINIT_IMAGE_LOCATION}/${INIT_IMAGE_LOCATION}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIINIT_IMAGE_LOCATION}/${INIT_IMAGE_LOCATION}/g" ${SCRIPT_FILES}
#
# Change INIT_IMAGE_NAME
#
CHANGETOPIC=INIT_IMAGE_NAME
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIINIT_IMAGE_NAME}/${INIT_IMAGE_NAME}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIINIT_IMAGE_NAME}/${INIT_IMAGE_NAME}/g" ${SCRIPT_FILES}
#
# Change OPS_MYSQL_PASS
#
CHANGETOPIC=OPS_MYSQL_PASS
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIOPS_MYSQL_PASS}/${OPS_MYSQL_PASS}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIOPS_MYSQL_PASS}/${OPS_MYSQL_PASS}/g" ${SCRIPT_FILES}
printf "\n----------\n"
#
# Change DEMO_PASS
#
CHANGETOPIC=DEMO_PASS
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIDEMO_PASS}/${DEMO_PASS}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIDEMO_PASS}/${DEMO_PASS}/g" ${SCRIPT_FILES}
printf "\n----------\n"
#
# Change ADMIN_PASS
#
CHANGETOPIC=ADMIN_PASS
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} (in etc files) changed to\n\n"
sed -i "s/${ORIADMIN_PASS}/${ADMIN_PASS}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIADMIN_PASS}/${ADMIN_PASS}/g" ${SCRIPT_FILES}
printf "\n----------\n"
#
# Change domainname in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\n----------\n"
printf "\nDomain name (in etc files) changed to\n"
sed -i "s/${ORIDOMAINNAME}/${DOMAINNAME}/g" ${ETC_FILES}
#
printf "\n----------\n"
printf "\n\n${CHANGETOPIC} changed to\n\n"
sed -i "s/${ORIDOMAINNAME}/${DOMAINNAME}/g" ${SCRIPT_FILES}
printf "\n----------\n"
#
# Change controller ip in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nController's IP (in etc files) changed to\n"
sed -i "s/${ORIGATEWAY_IP}/${GATEWAY_IP}/g" ${ETC_FILES}
#
printf "\n----------\n"
printf "\nController's IP (in etc files) changed to\n"
sed -i "s/${ORIGATEWAY_IP}/${GATEWAY_IP}/g" ${SCRIPT_FILES}
#
# Change controller ip in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nController's IP (in etc files) changed to\n"
sed -i "s/${ORICONTROLLER_IP}/${CONTROLLER_IP}/g" ${ETC_FILES}
#
printf "\n----------\n"
printf "\nController's IP (in etc files) changed to\n"
sed -i "s/${ORICONTROLLER_IP}/${CONTROLLER_IP}/g" ${SCRIPT_FILES}
#
# Change network ip in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution NETWORK_IP\n"
printf "\n----------\n"
printf "\nnetwork's IP (in etc files) changed to\n"
sed -i "s/${ORINETWORK_IP}/${NETWORK_IP}/g" ${ETC_FILES}
#
printf "\n----------\n"
printf "\nnetwork's IP (in etc files) changed to\n"
sed -i "s/${ORINETWORK_IP}/${NETWORK_IP}/g" ${SCRIPT_FILES}
#
# Change compute ip in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution COMPUTE_IP\n"
printf "\n----------\n"
printf "\ncompute's IP (in etc files) changed to\n"
sed -i "s/${ORICOMPUTE_IP}/${COMPUTE_IP}/g" ${ETC_FILES}
#
printf "\n----------\n"
printf "\ncompute's IP (in sh files) changed to\n"
sed -i "s/${ORICOMPUTE_IP}/${COMPUTE_IP}/g" ${SCRIPT_FILES}
#
printf "\nsubstitution COMPUTE1_IP\n"
printf "\n----------\n"
printf "\ncompute's IP (in etc files) changed to\n"
sed -i "s/${ORICOMPUTE1_IP}/${COMPUTE1_IP}/g" ${ETC_FILES}
#
printf "\n----------\n"
printf "\ncompute's IP (in sh files) changed to\n"
sed -i "s/${ORICOMPUTE1_IP}/${COMPUTE1_IP}/g" ${SCRIPT_FILES}
#
# Change management network cidr in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nnetwork cidr (in etc files) changed to\n"
sed -i "s/${ORIEXTERNAL_CIDR}/${EXTERNAL_CIDR}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nnetwork cidr (in script files) changed to\n"
sed -i "s/${ORIEXTERNAL_CIDR}/${EXTERNAL_CIDR}/g" ${SCRIPT_FILES}
#
# Change management network address in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nnetwork address (in etc files) changed to\n"
sed -i "s/${ORIMANAGEMENT_NETWORK}/${MANAGEMENT_NETWORK}/g" ${ETC_FILES}
#
printf "\n----------\n"
printf "\nnetwork address (in script files) changed to\n"
sed -i "s/${ORIMANAGEMENT_NETWORK}/${MANAGEMENT_NETWORK}/g" ${SCRIPT_FILES}
#
# Change start floating ip in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nstart floating ip (in script files) changed to\n"
sed -i "s/${ORISTART_FLOATING_IP}/${START_FLOATING_IP}/g" ${ETC_FILES}
#
printf "\nstart floating ip (in script files) changed to\n"
sed -i "s/${ORISTART_FLOATING_IP}/${START_FLOATING_IP}/g" ${SCRIPT_FILES}
#
# Change end floating ip in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nend floating ip (in script files) changed to\n"
sed -i "s/${ORIEND_FLOATING_IP}/${END_FLOATING_IP}/g" ${ETC_FILES}
#
printf "\nend floating ip (in script files) changed to\n"
sed -i "s/${ORIEND_FLOATING_IP}/${END_FLOATING_IP}/g" ${SCRIPT_FILES}
#
# Change gateway ip in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ngateway ip (in etc files) changed to\n"
sed -i "s/${ORIGATEWAY_IP}/${GATEWAY_IP}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ngateway ip (in script files) changed to\n"
sed -i "s/${ORIGATEWAY_IP}/${GATEWAY_IP}/g" ${SCRIPT_FILES}
#
# Change broadcast address in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nbroadcast address (in etc files) changed to\n"
sed -i "s/${ORIMANAGEMENT_BROADCAST_ADDRESS}/${MANAGEMENT_BROADCAST_ADDRESS}/g" ${ETC_FILES}
#
printf "\nbroadcast address (in etc files) changed to\n"
sed -i "s/${ORIMANAGEMENT_BROADCAST_ADDRESS}/${MANAGEMENT_BROADCAST_ADDRESS}/g" ${SCRIPT_FILES}
#
# Data tunel network node ip in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndata network ip (in etc files) changed to\n"
sed -i "s/${ORIDATA_TUNNEL_NETWORK_NODE_IP}/${DATA_TUNNEL_NETWORK_NODE_IP}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndata network ip (in script file) changed to\n"
sed -i "s/${ORIDATA_TUNNEL_NETWORK_NODE_IP}/${DATA_TUNNEL_NETWORK_NODE_IP}/g" ${SCRIPT_FILES}
#
# Data tunel compute node ip in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndata network ip (in etc files) changed to\n"
sed -i "s/${ORIDATA_TUNNEL_COMPUTE_NODE_IP}/${DATA_TUNNEL_COMPUTE_NODE_IP}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndata network ip (in script file) changed to\n"
sed -i "s/${ORIDATA_TUNNEL_COMPUTE_NODE_IP}/${DATA_TUNNEL_COMPUTE_NODE_IP}/g" ${SCRIPT_FILES}
#
# Data tunel compute node ip in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndata network ip (in etc files) changed to\n"
sed -i "s/${ORIDATA_TUNNEL_COMPUTE1_NODE_IP}/${DATA_TUNNEL_COMPUTE1_NODE_IP}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndata network ip (in script file) changed to\n"
sed -i "s/${ORIDATA_TUNNEL_COMPUTE1_NODE_IP}/${DATA_TUNNEL_COMPUTE1_NODE_IP}/g" ${SCRIPT_FILES}
#
# Data tunel network address in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndata network ip (in etc files) changed to\n"
sed -i "s/${ORIDATA_TUNNEL_NETWORK_ADDRESS}/${DATA_TUNNEL_NETWORK_ADDRESS}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndata network ip (in script file) changed to\n"
sed -i "s/${ORIDATA_TUNNEL_NETWORK_ADDRESS}/${DATA_TUNNEL_NETWORK_ADDRESS}/g" ${SCRIPT_FILES}
#
# local repo ip address in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nlocal repo ip (in etc files) changed to\n"
sed -i "s/${ORILOCAL_REPO}/${LOCAL_REPO}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nlocal repo ip (in script file) changed to\n"
sed -i "s/${ORILOCAL_REPO}/${LOCAL_REPO}/g" ${SCRIPT_FILES}
#
# local security repo ip address in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nlocal repo ip (in etc files) changed to\n"
sed -i "s/${ORILOCAL_SECURITY_REPO}/${LOCAL_SECURITY_REPO}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nlocal repo ip (in script file) changed to\n"
sed -i "s/${ORILOCAL_SECURITY_REPO}/${LOCAL_SECURITY_REPO}/g" ${SCRIPT_FILES}
#
# management network netmask in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nmanage network netmask (in etc files) changed to\n"
sed -i "s/${ORIMANAGEMENT_NETWORK_NETMASK}/${MANAGEMENT_NETWORK_NETMASK}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\nmanage network netmask (in script file) changed to\n"
sed -i "s/${ORIMANAGEMENT_NETWORK_NETMASK}/${MANAGEMENT_NETWORK_NETMASK}/g" ${SCRIPT_FILES}
#
# data network netmask in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndata network netmask (in etc files) changed to\n"
sed -i "s/${ORIDATA_TUNNEL_NETWORK_NETMASK}/${DATA_TUNNEL_NETWORK_NETMASK}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndata network netmask (in script file) changed to\n"
sed -i "s/${ORIDATA_TUNNEL_NETWORK_NETMASK}/${DATA_TUNNEL_NETWORK_NETMASK}/g" ${SCRIPT_FILES}
#
# dns ip in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndns ip (in etc files) changed to\n"
sed -i "s/${ORIDNS_IP}/${DNS_IP}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndns ip changed to\n"
sed -i "s/${ORIDNS_IP}/${DNS_IP}/g" ${SCRIPT_FILES}
printf "\n----------\n"
#
# keystone-py-url in ${ETC_FILES} and ${SCRIPT_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndns ip (in etc files) changed to\n"
sed -i "s/${ORIKEYSTONE_PY_URL}/${KEYSTONE_PY_URL}/g" ${ETC_FILES}
#
printf "\nsubstitution\n"
printf "\n----------\n"
printf "\ndns ip changed to\n"
sed -i "s/${ORIKEYSTONE_PY_URL}/${KEYSTONE_PY_URL}/g" ${SCRIPT_FILES}
printf "\n----------\n"
#
# get rid of control-m from MS Windows..
#
sed -i "s/
//g" ${ETC_FILES}
sed -i "s/
//g" ${SCRIPT_FILES}
#
printf "\ntar the new OPSInstaller directory\n"
printf "substitution\n"
#
tar cvf OPSInstaller.tar OPSInstaller
mv OPSInstaller.tar OPSInstaller/installer/OPSInstaller.tar
#
printf "Done."
|
kasidit/openstack-ocata-installer
|
exe-config-installer.sh
|
Shell
|
gpl-2.0
| 24,686 |
#!/bin/sh
# Copyright (C) 1999-2005 ImageMagick Studio LLC
#
# This program is covered by multiple licenses, which are described in
# LICENSE. You should have received a copy of LICENSE with this
# package; otherwise see http://www.imagemagick.org/script/license.php.
. ${srcdir}/tests/common.shi
${RUNENV} ${MEMCHECK} ./rwfile -size 70x46 ${SRCDIR}/input_truecolor_70x46.miff YUV
|
atmark-techno/atmark-dist
|
user/imagemagick/tests/rwfile_YUV_truecolor_70x46.sh
|
Shell
|
gpl-2.0
| 382 |
#!/bin/bash
# reboot_droidboot.sh
#
# Función que permite reiniciar en modo Droidboot/Fastboot
#~ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# A TINY TR10 CLI TOOL FOR GNU/LINUX BASH VERSION 1.0.0
#
# Developer : Erick Carvajal Rodriguez
# Contact : http://twitter.com/neocarvajal && http://fb.com/neocarvajal
# Date : 03/12/2015
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
function reboot_droidboot() {
conections_tr10_tool
clear
# Info Dispositivo -------------------------------------------------
echo " "
echo "##################################################"
echo " INFORMACIÓN DEL DISPOSITIVO "
echo "# #"
echo " Emparejado con Pc $HOSTNAME "
echo "# #"
echo " Serial: $SERIAL "
echo "# #"
echo " `date` "
echo "# #"
echo "##################################################"
if [ $ESTADO == $CONECTADO ]; then
echo " "
echo " Entrar en modo Droidboot/Fastboot"
echo "- - - - - - - - - - - - - - - - - - -"
echo " 1 - Reiniciar en modo Droidboot/Fastboot"
echo " 2 - <-- MENÚ PRINCIPAL"
echo "- - - - - - - - - - - - - - - - - - -"
read -p "Seleccione una opción: " opcion
if [ $opcion -eq 1 ]; then
echo " "
read -t 2 -p "Reiniciando en modo Droidboot/Fastboot -- No toque el dispositivo "
echo " "
$ADB reboot-bootloader && $FASTBOOT getvar all
echo " "
clear
echo " "
read -p "Al finalizar puede Presionar Enter para Reiniciar el sistema normalmente "
echo " "
echo "Desea reiniciar el sistema o lo hara manualmente?"
echo " "
echo " Seleccione una opción"
echo "- - - - - - - - - - - - - - - - - - -"
echo " 1 - Reiniciar el sistema normalmente"
echo " 2 - <-- Salir (Reiniciare manualmente)"
echo "- - - - - - - - - - - - - - - - - - -"
read -p "Seleccione una opción: " opcionB
if [ $opcionB -eq 1 ]; then
echo " "
read -t 2 -p "Reiniciando el dispositivo en modo Normal -- No toque el dispositivo "
echo " "
`$FASTBOOT continue`
echo " "
echo "Cerrando conexiones ..."
echo " "
$ADB kill-server
clear
echo "GRACIAS POR USAR ESTA HERRAMIENTA!!!"
echo " "
echo "Puedes colaborar con el desarrollo de una próxima versión con interfaz gráfica"
echo " "
echo "Escríbeme y te haré llegar la documentación necesaria"
echo " "
echo "Erick Carvajal R - @neocarvajal"
echo " "
break
elif [ $opcionB -eq 2 ]; then
echo " "
echo "Cerrando conexiones ..."
echo " "
$ADB kill-server
clear
echo "GRACIAS POR USAR ESTA HERRAMIENTA!!!"
echo " "
echo "Puedes colaborar con el desarrollo de una próxima versión con interfaz gráfica"
echo " "
echo "Escribeme y te haré llegar la documentación necesaria"
echo " "
echo "Erick Carvajal R - @neocarvajal"
echo " "
break
fi
else
echo "Regresando al Menú principal ..."
fi
else
reconect_adb_tr10_tool
main_menu
fi
}
|
WuilmerBolivar/TR10-TOOL
|
FUNCTIONS/3-reboot_droidboot.sh
|
Shell
|
gpl-2.0
| 4,066 |
#!/bin/bash
set -e
: ${DATADIR:=/var/lib/mysql}
: ${MYSQL_ROOT_PASSWORD:=redhat}
: ${REP_USER:=cluster}
: ${REP_PASS:=123456}
: ${CLUSTER_NAME:=my_super_cluster}
edit_conf(){
cat <<MYCONF >/etc/mysql/my.cnf
[client]
port = 3306
socket = /var/run/mysqld/mysqld.sock
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = ${DATADIR}
tmpdir = /tmp
lc_messages_dir = /usr/share/mysql
lc_messages = en_US
skip-external-locking
max_connections = 100
connect_timeout = 5
wait_timeout = 600
max_allowed_packet = 16M
thread_cache_size = 128
sort_buffer_size = 4M
bulk_insert_buffer_size = 16M
tmp_table_size = 32M
max_heap_table_size = 32M
myisam_recover = BACKUP
key_buffer_size = 128M
table_open_cache = 400
myisam_sort_buffer_size = 512M
concurrent_insert = 2
read_buffer_size = 2M
read_rnd_buffer_size = 1M
query_cache_limit = 128K
query_cache_size = 64M
log_warnings = 2
slow_query_log_file = /var/log/mysql/mariadb-slow.log
long_query_time = 10
log_slow_verbosity = query_plan
log_bin = /var/log/mysql/mariadb-bin
log_bin_index = /var/log/mysql/mariadb-bin.index
expire_logs_days = 10
max_binlog_size = 100M
default_storage_engine = InnoDB
innodb_buffer_pool_size = 256M
innodb_log_buffer_size = 8M
innodb_file_per_table = 1
innodb_open_files = 400
innodb_io_capacity = 400
innodb_flush_method = O_DIRECT
binlog-format = ROW
wsrep-provider = /usr/lib/galera/libgalera_smm.so
wsrep-cluster-name = ${CLUSTER_NAME}
wsrep-sst-method = rsync
wsrep_cluster_address = gcomm://${REP_ADDRESS}
wsrep_sst_auth = ${REP_USER}:${REP_PASS}
[mysqldump]
quick
quote-names
max_allowed_packet = 16M
[mysql]
[isamchk]
key_buffer = 16M
!includedir /etc/mysql/conf.d/
MYCONF
}
init_db(){
echo 'Running mysql_install_db ...'
mysql_install_db --datadir="${DATADIR}"
echo 'Finished mysql_install_db'
}
create_sql(){
tempSqlFile='/tmp/mysql-first-time.sql'
echo "generate sql file ${tempSqlFile} begin"
echo "UPDATE mysql.user SET password=password('${MYSQL_ROOT_PASSWORD}') where user='root' ;" >> $tempSqlFile
echo "CREATE USER '${REP_USER}'@'%' IDENTIFIED BY '${REP_PASS}' ;" >> "$tempSqlFile"
echo "GRANT ALL ON *.* TO '$REP_USER'@'%' WITH GRANT OPTION ;" >> "$tempSqlFile"
echo 'FLUSH PRIVILEGES ;' >> "$tempSqlFile"
echo "generate sql file ${tempSqlFile} done"
}
apply_sql(){
echo -en "[import sql] start mysql"
/etc/init.d/mysql start &> /dev/null
[[ $? -ne 0 ]] && echo -en " fail\n" && exit 1
echo -en " done\n"
echo -en "[import sql] import $tempSqlFile"
mysql -e "source $tempSqlFile;"
[[ $? -ne 0 ]] && echo -en " fail\n" && exit 1
echo -en " done\n"
echo -en "[import sql] shutdown mysql"
mysqladmin -uroot -p${MYSQL_ROOT_PASSWORD} shutdown
[[ $? -ne 0 ]] && echo -en " fail\n" && exit 1
echo -en " done\n"
}
if [ ! -d "$DATADIR/mysql" ]; then
init_db
create_sql
apply_sql
edit_conf
chown -R mysql:mysql "${DATADIR}"
echo "`date` run mysql [ /bin/bash /usr/bin/mysqld_safe ${REP_NEW} ]"
exec /bin/bash /usr/bin/mysqld_safe ${REP_NEW}
fi
edit_conf
chown -R mysql:mysql "${DATADIR}"
exec /bin/bash /usr/bin/mysqld_safe
|
bw-y/docker-galera-mariadb
|
entrypoint.sh
|
Shell
|
gpl-2.0
| 3,320 |
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is executed by build/envsetup.sh, and can use anything
# defined in envsetup.sh.
#
# In particular, you can add lunch options with the add_lunch_combo
# function: add_lunch_combo generic-eng
add_lunch_combo cm_A2109A-userdebug
|
Kahlo007/android_device_lenovo_kai
|
vendorsetup.sh
|
Shell
|
gpl-2.0
| 845 |
#! /bin/bash
#
# This file is part of sutlac-gtk-theme
#
# Copyright (C) 2016-2017 Tista <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
INKSCAPE="`command -v inkscape`"
SRC_FILE="../assets-gtk2.svg"
ASSETS_DIR="../assets-gtk2"
INDEX="assets-gtk2-spin.txt"
KEY_FILE="../../sass/common/_key_colors.scss"
inkver="`$INKSCAPE --version | awk '{print $2}' | cut -c 1-4`"
if [ "$inkver" = 0.91 ]; then
non_scale_dpi=90
else
non_scale_dpi=96
fi
# Renderer
render-non-scale() {
ID=`echo $i | tr '/' '_'`
$INKSCAPE --export-id=$ID \
--export-dpi="$non_scale_dpi" \
--export-png=$ASSETS_DIR/$i.png $SRC_FILE >/dev/null \
2>>../inkscape.log
}
# Generate PNG files
for i in $(<$INDEX)
do
SUB_DIR=`echo $i | cut -f1 -d '/'`
if [ '!' -d $ASSETS_DIR/$SUB_DIR ]; then
mkdir $ASSETS_DIR/$SUB_DIR;
fi
if [ -f $ASSETS_DIR/$i.png ] && [ $KEY_FILE -ot $ASSETS_DIR/$i.png ]; then
echo $ASSETS_DIR/$i.png exists.
elif [ -f $ASSETS_DIR/$i.png ] && [ $KEY_FILE -nt $ASSETS_DIR/$i.png ]; then
echo Re-rendering $ASSETS_DIR/$i.png
echo $i.png >>../inkscape.log
rm -f $ASSETS_DIR/$i.png
render-non-scale
else
echo Rendering $ASSETS_DIR/$i.png
echo $i.png >>../inkscape.log
render-non-scale
fi
done
|
diglam/sutlac-gtk-theme
|
gtk/asset/assets-gtk2-scripts/render-assets-gtk2-spin.sh
|
Shell
|
gpl-2.0
| 1,614 |
#! /bin/sh
# Copyright (C) 2002-2022 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# Check that info files are normally built in $(srcdir),
# not in $(builddir).
required='makeinfo tex texi2dvi'
. test-init.sh
cat >> configure.ac << 'END'
AC_OUTPUT
END
cat > Makefile.am << 'END'
info_TEXINFOS = main.texi
END
cat > main.texi << 'END'
\input texinfo
@setfilename main.info
@settitle main
@node Top
Hello walls.
@include version.texi
@bye
END
$ACLOCAL
$AUTOMAKE --add-missing
$AUTOCONF
mkdir build
cd build
../configure
$MAKE
test -f ../main.info
test ! -e main.info
test -f ../stamp-vti
test ! -e stamp-vti
test -f ../version.texi
test ! -e version.texi
cd ..
rm -rf build make.info* stamp-vti version.texi
./configure
$MAKE
test -f main.info
# Make sure stamp-vti is older that version.texi.
# (A common situation in a real tree.)
# This is needed to test the "subtle" issue described below.
test -f stamp-vti
test -f version.texi
$sleep
touch stamp-vti
$MAKE distclean
test -f stamp-vti
test -f version.texi
mkdir build
cd build
../configure
$MAKE
# main.info should not be rebuilt in the current directory, since
# it's up-to-date in $(srcdir).
# This can be caused by a subtle issue related to VPATH handling
# of version.texi (see also the comment in texi-vers.am): because
# stamp-vti is newer than version.texi, the 'version.texi: stamp-vti'
# rule is always triggered. Still that's not a reason for 'make'
# to think 'version.texi' has been created...
test ! -e main.info
$MAKE dvi
test -f main.dvi
$MAKE distcheck
:
|
autotools-mirror/automake
|
t/txinfo-info-in-srcdir.sh
|
Shell
|
gpl-2.0
| 2,146 |
#!/bin/bash
#### 1 time instructions ###
# probably incomplete
# https://docs.github.com/en/enterprise/2.14/user/articles/setting-up-your-github-pages-site-locally-with-jekyll
# 1. Install ruby
# 2. Install bundler
# 3. Install deps
# sudo apt install ruby-dev
# gem install bundler
# bundle install
bundle exec jekyll serve
|
facundoq/facundoq.github.io
|
serve.sh
|
Shell
|
gpl-2.0
| 327 |
#!/bin/bash
############
# Purpose: Unit tests for functional tests
# Authors: Ankshit Jain
# Dependencies: bats (https://github.com/sstephenson/bats)
# Date : 13-March-2018
# Previous Versions: -
# Invocation: $bash website_load.sh
###########
# All variables that are exported/imported are in upper case convention. They are:
# TESTDIR : name for the temporary directory where tests will be run
set -ex
cd ../test_modules
TESTDIR='unit_tests'
export TESTDIR
$BATS bats/unit_tests.bats
|
AnkshitJain/Autolab-Local
|
tests/functional_tests/shell/unit_tests.sh
|
Shell
|
gpl-2.0
| 490 |
#!/bin/bash
set -ev
BREWS="qt5 cmake"
for i in $BREWS; do
brew outdated | grep -q $i && brew upgrade $i
done
for i in $BREWS; do
brew list | grep -q $i || brew install $i
done
|
mattiascibien/ivygen
|
CI/travis.osx.install.sh
|
Shell
|
gpl-2.0
| 180 |
WIF=$DEVICE
LIF=br-lan
IFCONFIG=/sbin/ifconfig
ROUTE=/sbin/route
IPTABLES=/usr/sbin/iptables
TMP_DIR=/tmp/tmp_file
TMP_PBRIDGE_IP="$TMP_DIR/pbridge_IP"
FWT="$TMP_DIR/pbridge_firewall.sh"
DNS_TC="$TMP_DIR/dnsmasq_pbridge.conf"
mkdir -p "$TMP_DIR"
get_wan_section() {
NAME=`uci get network.wan.ifname`
SECTION="wan"
if [ "$NAME" == "3g-ppp" ]; then
#3g-ppp gets its IP individually, it does not reflect in wan IP
SECTION="ppp"
elif [ "$NAME" == "wwan0" ]; then
SECTION="ppp_dhcp"
fi
echo "$SECTION"
}
EXTERNAL=
pseudo_bridge()
{
. /lib/functions/network.sh
network_flush_cache
network_get_ipaddr WIP "$INTERFACE"
network_get_gateway WGW "$INTERFACE"
network_get_subnet SUBNET "$INTERFACE"
if [ -z "$WIP" -o -z "$WGW" -o -z "$SUBNET" ]; then
return 1
fi
OLDWIP=`cat $TMP_PBRIDGE_IP 2>/dev/null`
echo "$WIP" >"$TMP_PBRIDGE_IP"
WNM=`ipcalc.sh $SUBNET | grep "NETMASK" | awk -F '=' '{print $2}'`
#/etc/init.d/dnsmasq stop
LIP=`uci get network.lan.ipaddr`
LNM=`uci get network.lan.netmask`
echo "$IPTABLES -t nat -D zone_wan_postrouting -j MASQUERADE" > "$FWT"
echo "$IPTABLES -t nat -A zone_wan_postrouting -s $LIP/$LNM -o $WIF -j SNAT --to-source $WIP" >> "$FWT"
chmod +x "$FWT"
$IFCONFIG $LIF:0 down
if [ "$OLDWIP" != "$WIP" ]; then
ifup lan
fi
# remove WAN IF IP
$IFCONFIG $WIF 0.0.0.0 up
# replace default route to Gateway through WIF
$ROUTE add -host $WGW dev $WIF
$ROUTE add default gw $WGW dev $WIF
# add route to WAN IP through LAN iface
$ROUTE add -host $WIP dev $LIF
# enable proxy_arp so can use WGW s gateway on LAN device
echo "1" >/proc/sys/net/ipv4/conf/$WIF/proxy_arp
echo "1" >/proc/sys/net/ipv4/conf/$LIF/proxy_arp
# replace MASQ on WIF with SNAT
#iptables -F
#iptables -t nat -F
#iptables -t raw -F
#iptables -t mangle -F
#iptables -P FORWARD ACCEPT
#$IPTABLES -t nat -D zone_wan_postrouting -j MASQUERADE
#$IPTABLES -t nat -A zone_wan_postrouting -s $LIP/$LNM -o $WIF -j SNAT --to-source $WIP
#echo "$IPTABLES -t nat -D zone_wan_postrouting -j MASQUERADE" > "$FWT"
#echo "$IPTABLES -t nat -A zone_wan_postrouting -s $LIP/$LNM -o $WIF -j SNAT --to-source $WIP" >> "$FWT"
#chmod +x "$FWT"
# add a bit of extra firewall
#$IPTABLES -t nat -I PREROUTING -i $WIF -d ! $WIP -j DROP
# intercept HTTP port
#logger -t MANO "MANO=======$IPTABLES -t nat -A PREROUTING -i $WIF -p tcp --dport 80 -j DNAT --to $LIP"
#$IPTABLES -t nat -A PREROUTING -i $WIF -p tcp --dport 80 -j DNAT --to $LIP
# setup DHCP server
# set WAN GW as secondary LAN IP for DHCP to work
#$IFCONFIG $LIF:0 $WGW netmask $WNM
passthrough_dhcp=`uci get -q network.ppp.passthrough_dhcp`
if [ "$passthrough_dhcp" != "no_dhcp" ]; then # nevykdom kai passthrough dhcp mode yra no DHCP
new_WGW=`echo $WIP | awk -F '.' '{print $1"."$2"."$3}'`
if [ "$WGW" != "$new_WGW.1" ] && [ "$WIP" != "$new_WGW.1" ]; then
new_WGW="$new_WGW.1"
elif [ "$WGW" != "$new_WGW.2" ] && [ "$WIP" != "$new_WGW.2" ]; then
new_WGW="$new_WGW.2"
else
new_WGW="$new_WGW.3"
fi
$IFCONFIG $LIF:0 $new_WGW netmask 255.255.255.0
fi
# setup DHCP config
#/etc/init.d/dnsmasq stop
#killall dnsmasq
#rm /tmp/dhcp.leases
#cp /var/etc/dnsmasq.conf "$DNS_TC"
#sed -i "/dhcp-range/d" "$DNS_TC"
rm -f "$DNS_TC"
#echo "dhcp-range=lan,$WIP,$WIP,$WNM,12h" >> "$DNS_TC"
#echo "dhcp-range=lan,192.168.1.160,192.168.1.200,255.255.255.0,12h" >> "$DNS_TC"
#echo "dhcp-range=lan,$WIP,$WIP,$WNM,12h" >> "$DNS_TC"
if [ "$passthrough_dhcp" != "no_dhcp" ]; then
leasetime=`uci get -q network.ppp.leasetime`
echo "dhcp-range=lan,$WIP,$WIP,255.255.255.0,$leasetime" >> "$DNS_TC"
DMAC=`uci get -q network.ppp.mac`
if [ "$DMAC" ]; then
#echo "dhcp-host=$DMAC,192.168.1.151,24h" >> "$DNS_TC"
echo "dhcp-host=$DMAC,$WIP,12h" >> "$DNS_TC"
fi
/etc/init.d/dnsmasq reload
fi
/etc/init.d/firewall reload
#/usr/sbin/dnsmasq -C "$DNS_TC"
}
if [ "$DEVICE" == "eth2" ] || [ "$DEVICE" == "3g-ppp" ] || [ "$DEVICE" == "wwan0" ]; then
ppp_method=`uci get -q network.ppp.method`
ppp_enabled=`uci get -q network.ppp.enabled`
if [ "$ppp_method" == "pbridge" ] && [ "$ppp_enabled" != "0" ]; then
if [ "$ACTION" == "ifup" -o "$ACTION" == "ifupdate" ]; then
#logger -t MANO "Darom"
pseudo_bridge
elif [ "$ACTION" == "ifdown" ]; then
$IFCONFIG $LIF:0 down
rm -f "$DNS_TC"
fi
fi
fi
|
ingran/balzac
|
package/base-files/files/etc/hotplug.d/iface/98-pbridge.sh
|
Shell
|
gpl-2.0
| 4,332 |
#!/bin/bash
###########################################################################################
## Copyright 2003, 2015 IBM Corp ##
## ##
## Redistribution and use in source and binary forms, with or without modification, ##
## are permitted provided that the following conditions are met: ##
## 1.Redistributions of source code must retain the above copyright notice, ##
## this list of conditions and the following disclaimer. ##
## 2.Redistributions in binary form must reproduce the above copyright notice, this ##
## list of conditions and the following disclaimer in the documentation and/or ##
## other materials provided with the distribution. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND ANY EXPRESS ##
## OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ##
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ##
## THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ##
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ##
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ##
## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ##
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
############################################################################################
## File : attr.sh
##
## Description: Test the functions provided by star.
##
## Author: Liu Deyan, [email protected]
###########################################################################################
## source the utility functions
#cd `dirname $0`
#LTPBIN=${PWD%%/testcases/*}/testcases/bin
source $LTPBIN/tc_utils.source
################################################################################
# global variables
################################################################################
TESTDIR=${LTPBIN%/shared}/attr/attr-tests
TEST1=${LTPBIN%/shared}/attr/attr-tests/attr.test
################################################################################
# the testcase function
################################################################################
TST_TOTAL=1
function tc_local_setup()
{
tc_exec_or_break grep rpm perl || return
rpm -qa | grep "^perl-[0-9]"
tc_break_if_bad $? "Need full perl installation, not just perl-base" || return
local msg1="'/' filesystem support "
local msg2="remount '/' with 'acl' and 'user_xattr' supported"
local remountflag=0
tc_info "Check if '/' filesystem support 'acl' and 'user_xattr'"
while read dev filesystem systype support
do
if [ "$filesystem" == "/" ]
then
if echo "$support" | grep "acl" &&
echo "$support" | grep "user_xattr"
then
tc_info "$msg1 'acl' and 'user_xattr'"
else
tc_info "$msg2"
support="remount,defaults,errors=remount-ro,acl,user_xattr"
mount -o $support $dev /
tc_break_if_bad $? "remount failed"
fi
fi
done </etc/fstab
}
function test01()
{
tc_register "attr test "
cd $TESTDIR
./run $TEST1 >$stdout 2>$stderr
cmd=$1
set `cat $stdout | grep "passed, 0 failed)" | wc -l`
[ $1 -eq 1 ]
tc_pass_or_fail $? "attr test"
}
################################################################################
# main
################################################################################
tc_setup
#tc_run_me_only_once
test01
|
PoornimaNayak/autotest-client-tests
|
linux-tools/attr/attr.sh
|
Shell
|
gpl-2.0
| 4,272 |
#!/bin/bash
test "$1" == "" && echo "Qual WM iniciar?!" && exit 1
Xephyr -name TEST -ac -br -noreset -screen 1024x768 :1 >/dev/null 2>&1 &
sleep 1
DISPLAY=:1.0 $1 &
|
Acidhub/tools
|
X.sh
|
Shell
|
gpl-2.0
| 166 |
#!/bin/bash
## begin license ##
#
# The Meresco Triplestore package consists out of a HTTP server written in Java that
# provides access to an Triplestore with a Sesame Interface, as well as python bindings to
# communicate as a client with the server.
#
# Copyright (C) 2011-2014, 2016 Seecr (Seek You Too B.V.) http://seecr.nl
# Copyright (C) 2011 Seek You Too B.V. (CQ2) http://www.cq2.nl
#
# This file is part of "Meresco Triplestore"
#
# "Meresco Triplestore" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Triplestore" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Triplestore"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
source /usr/share/seecr-tools/functions.d/distro
BUILDDIR=../../build/
test -d ${BUILDDIR} && rm -rf ${BUILDDIR}
mkdir ${BUILDDIR}
JUNIT=/usr/share/java/junit4.jar
if [ ! -f ${JUNIT} ]; then
echo "JUnit is not installed. Please install the junit4 package."
exit 1
fi
JARS=$(find ../../jars -type f -name "*.jar")
CP="$JUNIT:$(echo $JARS | tr ' ' ':'):../../build"
JAVA_VERSION=8
javac=/usr/lib/jvm/java-1.${JAVA_VERSION}.0-openjdk.x86_64/bin/javac
if [ -f /etc/debian_version ]; then
javac=/usr/lib/jvm/java-${JAVA_VERSION}-openjdk-amd64/bin/javac
fi
javaFiles=$(find ../java -name "*.java")
${javac} -d ${BUILDDIR} -cp $CP $javaFiles
if [ "$?" != "0" ]; then
echo "Build failed"
exit 1
fi
javaFiles=$(find . -name "*.java")
${javac} -d ${BUILDDIR} -cp $CP $javaFiles
if [ "$?" != "0" ]; then
echo "Test Build failed"
exit 1
fi
testClasses=$(cd ${BUILDDIR}; find . -name "*Test.class" | sed 's,.class,,g' | tr '/' '.' | sed 's,..,,')
echo "Running $testClasses"
$(dirname ${javac})/java -Xmx1024m -classpath ".:$CP" org.junit.runner.JUnitCore $testClasses
|
seecr/meresco-triplestore
|
server/src/test/alltests.sh
|
Shell
|
gpl-2.0
| 2,283 |
#!/bin/bash
screen -ls
echo
echo Cycling you through them....
sleep 1
for session in `screen -ls | grep Detached | cut -f 2`; do screen -r $session; done
|
cyclingzealot/bin
|
cycleScreenSessions.bash
|
Shell
|
gpl-2.0
| 157 |
#!/bin/bash
#--------爬虫-----------
cd /home/daizhaohui/Desktop/dzh/emdata/wyoming_another #切换到项目位置
./yesterday.sh /home/daizhaohui/Desktop/dzh/download #保存文件的位置
#--------导入数据库--------------
cd /home/daizhaohui/Desktop/dzh/download/$(date -d "1 day ago" +"%Y-%m-%d") #切换到csv文件所在目录
#filepath=$(cd "$(dirname "$0")"; pwd)
filepath=$(pwd)
export PGPASSWORD=123
for name in `ls`
do
dir=${filepath}/${name}
if [ -s ${dir} ] ;then
echo 'get it!'
order="copy crawl.wyoming_another from '"${dir}"'" #数据是放在crawl下的wyoming_another表中
/opt/PostgreSQL/9.4/bin/psql -d em2 -U postgres -c "${order}" #执行copy语句
mv ${dir} /home/daizhaohui/Desktop/dzh/done #把copy完毕的csv文件放到done文件下(done可以修改)
else
continue
fi
done
cd /home/daizhaohui/Desktop/dzh/download
rm -rf /home/daizhaohui/Desktop/dzh/download/$(date -d "1 day ago" +"%Y-%m-%d")
|
zhDai/CToFun
|
My_work/shell_scripts_Scrapy/shell_text/wyoming_another_import.sh
|
Shell
|
gpl-2.0
| 1,013 |
#!/bin/bash
BAK_DIR=$HOME/dotfiles_bak
DOT_DIR=$HOME/dotfiles
DOT_FILES=".zshrc .bashrc .Xdefaults .compton.conf .xinitrc .rtorrent.rc"
OB_DIR=$HOME/.config/openbox
OB_FILES="rc.xml autostart"
AWE_DIR=$HOME/.config/awesome
AWE_FILES="rc.lua"
AWE_FOLDERS="lain themes"
echo "Trying to backup your dotfiles."
## If backup directory does not exist, create it
if [ ! -d $BAK_DIR ]; then
echo "$BAK_DIR not found."
echo "Creating one now.."
mkdir -p $BAK_DIR/{openbox,awesome}
sleep 1s
echo "$BAK_DIR has been created."
echo ""
else
echo "$BAK_DIR found."
echo "Creating backup now.."
echo ""
fi
## Make sure we are in $HOME
echo "Changing to $HOME directory.."
cd $HOME
echo "We are not at: $HOME"
echo ""
## Proceed with the backup
for DOT_FILE in $DOT_FILES; do
if [ -f $DOT_FILE ]; then
echo "Creating backup for $DOT_FILE.."
mv $DOT_FILE $BAK_DIR
echo "Successful backup: $DOT_FILE"
echo ""
elif [ ! -f $DOT_FILE ]; then
echo "$DOT_FILE not found."
echo ""
else
echo "Something went wrong."
exit 1
fi
done
if ! type "openbox" > /dev/null 2>&1; then
echo "Openbox is not installed."
else
echo "Changing to openbox directory.."
cd $OB_DIR || echo "Openbox not found."
echo "We are now at: $OB_DIR"
echo ""
for OB_FILE in $OB_FILES; do
if [ -f $OB_FILE ]; then
echo "Creating backup for $OB_FILE.."
mv $OB_FILE $BAK_DIR/openbox
echo "Successful backup: $OB_FILE"
echo ""
elif [ ! -f $OB_FILE ]; then
echo "$OB_FILE not found."
echo ""
else
echo "Something went wrong."
exit 2
fi
done
fi
if ! type "awesome" > /dev/null 2>&1; then
echo "AwesomeWM is not installed."
else
echo "Changing to AwesomeWM directory.."
cd $AWE_DIR
echo "We are now at: $AWE_DIR"
echo ""
for AWE_FILE in $AWE_FILES; do
if [ -f $AWE_FILE ]; then
echo "Creating backup for $AWE_FILE.."
mv $AWE_FILE $BAK_DIR/awesome
echo "Successful backup: $AWE_FILE"
echo ""
elif [ ! -f $AWE_FILE ]; then
echo "$AWE_FILE not found."
echo ""
else
echo "Something went wrong."
exit 3
fi
done
for AWE_FOLDER in $AWE_FOLDERS; do
if [ -d $AWE_FOLDER ]; then
echo "Creating backup for $AWE_FOLDER folder.."
mv $AWE_FOLDER $BAK_DIR/awesome
echo "Successful folder backup: $AWE_FOLDER"
echo ""
elif [ ! -d $AWE_FOLDER ]; then
echo "$AWE_FOLDER does not exist."
echo ""
else
echo "Something went wrong."
exit 4
fi
done
fi
## When backups are done, proceed to PHASE 2 and execute install.sh
echo "Proceeding to PHASE 2.."
if [ -d $DOT_DIR ]; then
sh $DOT_DIR/install_nobackup.sh
else
echo "$DOT_DIR not found. Aborting."
exit 5
fi
## If everything in this script executed successfully, exit
exit 0
|
mavz42/dotfiles
|
install_complete.sh
|
Shell
|
gpl-2.0
| 3,129 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.