code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash -e
source tests/libtest.sh
# Create two namespaces, a veth pair, and assign each end of the pair
# to a different namespace.
create_veth_pair veth green red
create_namespace green
create_namespace red
add_veth_to_namespace green veth.green
add_veth_to_namespace red veth.red
# Normal over shim eth setup in the green namespace
ip netns exec green rlite-ctl ipcp-create green.eth shim-eth edif
ip netns exec green rlite-ctl ipcp-config green.eth netdev veth.green
ip netns exec green rlite-ctl ipcp-config green.eth flow-del-wait-ms 100
ip netns exec green rlite-ctl ipcp-create green.n normal mydif
ip netns exec green rlite-ctl ipcp-config green.n flow-del-wait-ms 100
ip netns exec green rlite-ctl dif-policy-mod mydif addralloc static
ip netns exec green rlite-ctl ipcp-config green.n address 8172
ip netns exec green rlite-ctl ipcp-enroller-enable green.n
ip netns exec green rlite-ctl ipcp-register green.n edif
start_daemon_namespace green rinaperf -lw -z rpinst1
# Normal over shim eth setup in the red namespace
ip netns exec red rlite-ctl ipcp-create red.eth shim-eth edif
ip netns exec red rlite-ctl ipcp-config red.eth netdev veth.red
ip netns exec red rlite-ctl ipcp-config red.eth flow-del-wait-ms 100
ip netns exec red rlite-ctl ipcp-create red.n normal mydif
ip netns exec red rlite-ctl ipcp-config red.n flow-del-wait-ms 100
ip netns exec red rlite-ctl dif-policy-mod mydif addralloc static
ip netns exec green rlite-ctl ipcp-config green.n address 2718
ip netns exec red rlite-ctl ipcp-register red.n edif
ip netns exec red rlite-ctl ipcp-enroll red.n mydif edif
ip netns exec red rinaperf -z rpinst1 -p 1 -c 7 -i 20
|
autrimpo/rlite
|
tests/integration/0025-broadcast-enrollment.sh
|
Shell
|
lgpl-2.1
| 1,650 |
#!/bin/bash
set -e
# Check for flags.
if [ "${1#-}" != "$1" ]; then
exec infinit "$@"
fi
# Check for known modes.
modes=("acl" "block" "credentials" "daemon" "device" "doctor" "drive" \
"journal" "ldap" "network" "passport" "silo" "user" "volume")
if [[ " ${modes[@]} " =~ " ${1} " ]]; then
exec infinit "$@"
fi
# If no arguments are provided, run infinit.
if [ "$#" = 0 ]; then
exec infinit
fi
# Else default to run whatever the user wanted, like "bash"
exec "$@"
|
Dimrok/id
|
0.8.0/debian/entrypoint.sh
|
Shell
|
apache-2.0
| 486 |
#! /bin/bash
if [[ $TRAVIS_OS_NAME == "osx" ]]; then
brew update
brew install openssl
brew install pyenv
pyenv install $TRAVIS_PYTHON_VERSION
pyenv shell $TRAVIS_PYTHON_VERSION
pyenv local $TRAVIS_PYTHON_VERSION
pyenv global $TRAVIS_PYTHON_VERSION
pyenv rehash
fi
|
arthurprs/aerospike-client-python
|
scripts/travis-prep.sh
|
Shell
|
apache-2.0
| 297 |
#!/bin/sh
# Add "freifunk" firewall zone
# If wan/lan is used for olsr then remove these networks from wan/lan zones
# Also setup rules defined in $meshr/etc/config/freifunk and $meshr/etc/config/profile_<community>
. $meshr/lib/functions.sh
. $dir/functions.sh
wan_is_olsr=$(uci -q get meshwizard.netconfig.wan_config)
lan_is_olsr=$(uci -q get meshwizard.netconfig.lan_config)
config_load firewall
# Rename firewall zone for freifunk if unnamed
# If wan is used for olsr then set network for the firewall zone wan to ' ' to remove the wan interface from it, else add local restrict to it
# If lan is used for olsr then set network for the firewall zone lan to ' ' to remove the lan interface from it
handle_fwzone() {
config_get name "$1" name
config_get network "$1" network
if [ "$name" == "freifunk" ]; then
# rename section if unnamed
if [ -z "${1/cfg[0-9a-fA-F]*/}" ]; then
section_rename firewall $1 zone_freifunk
fi
fi
if [ "$name" == "wan" ]; then
if [ "$wan_is_olsr" == 1 ]; then
uci set firewall.$1.network=' ' && uci_commitverbose "WAN is used for olsr, removed the wan interface from zone wan" firewall
else
uci set firewall.$1.local_restrict=1 && uci_commitverbose "Enable local_restrict for zone wan" firewall
fi
fi
if [ "$name" == "lan" ] && [ "$lan_is_olsr" == 1 ]; then
uci set firewall.$1.network=' ' && uci_commitverbose "LAN is used for olsr, removed the lan interface from zone lan" firewall
fi
}
config_foreach handle_fwzone zone
uci batch << EOF
set firewall.zone_freifunk="zone"
set firewall.zone_freifunk.name="freifunk"
set firewall.zone_freifunk.input="$zone_freifunk_input"
set firewall.zone_freifunk.forward="$zone_freifunk_forward"
set firewall.zone_freifunk.output="$zone_freifunk_output"
EOF
uci_commitverbose "Setup firewall zones" firewall
# Usually we need to setup masquerading for lan, except lan is an olsr interface or has an olsr hna-entry
handle_interface() {
config_get interface "$1" interface
if [ "$interface" == "lan" ]; then
no_masq_lan=1
fi
}
config_load olsrd
config_foreach handle_interface Interface
LANIP="$(uci -q get network.lan.ipaddr)"
if [ -n "$LANIP" ]; then
handle_hna() {
config_get netaddr "$1" netaddr
if [ "$LANIP" == "$netaddr" ]; then
no_masq_lan=1
fi
}
config_foreach handle_hna Hna4
fi
currms=$(uci -q get firewall.zone_freifunk.masq_src)
if [ ! "$no_masq_lan" == "1" ] && [ ! "$(uci -q get meshwizard.netconfig.lan_config)" == 1 ]; then
uci set firewall.zone_freifunk.masq="1"
[ -z "$(echo $currms |grep lan)" ] && uci add_list firewall.zone_freifunk.masq_src="lan"
fi
# Rules, Forwardings, advanced config and includes
for config in freifunk profile_$community; do
config_load $config
for section in advanced include fw_rule fw_forwarding; do
handle_firewall() {
local options=$(uci show $config."$1")
options=$(echo "$options" | sed -e "s/fw_//g" -e "s/^$config/firewall/g")
for o in $options; do
uci set $o
done
}
config_foreach handle_firewall $section
done
done
# If we use auto-ipv6-dhcp then allow 547/udp on the freifunk zone
if [ "$ipv6_config" = "auto-ipv6-dhcpv6" ]; then
uci batch <<- EOF
set firewall.dhcpv6=rule
set firewall.dhcpv6.src=freifunk
set firewall.dhcpv6.target=ACCEPT
set firewall.dhcpv6.dest_port=547
set firewall.dhcpv6.proto=udp
EOF
fi
# Firewall rules to allow incoming ssh and web if enabled
if [ "$wan_allowssh" == 1 ]; then
uci batch <<- EOF
set firewall.wanssh=rule
set firewall.wanssh.src=wan
set firewall.wanssh.target=ACCEPT
set firewall.wanssh.proto=tcp
set firewall.wanssh.dest_port=22
EOF
fi
if [ "$wan_allowweb" == 1 ]; then
uci batch <<- EOF
set firewall.wanweb=rule
set firewall.wanweb.src=wan
set firewall.wanweb.target=ACCEPT
set firewall.wanweb.proto=tcp
set firewall.wanweb.dest_port=80
set firewall.wanwebhttps=rule
set firewall.wanwebhttps.src=wan
set firewall.wanwebhttps.target=ACCEPT
set firewall.wanwebhttps.proto=tcp
set firewall.wanwebhttps.dest_port=443
EOF
fi
uci_commitverbose "Setup rules, forwardings, advanced config and includes." firewall
|
meshr-net/meshr_win32
|
usr/bin/meshwizard/helpers/setup_firewall.sh
|
Shell
|
apache-2.0
| 4,166 |
#!/bin/bash
# display usage
if [ $# -ne 1 ]
then
echo "usage: $0 username"
exit 1
fi
username=$1
#here you can use password generator, send password via email, etc.
password="password"
#echo $password
echo $password | passwd --stdin $username
|
wilas/vagrant-puppet-modules
|
tools/setpassword.sh
|
Shell
|
apache-2.0
| 253 |
#!/bin/bash
test_standard_ops() {
# Padded for pretty output
suite_name="STANDARD "
pushd ${home}/manifests > /dev/null
pushd ops-files > /dev/null
if interpolate ""; then
pass "cfcr.yml"
else
fail "cfcr.yml"
fi
# CI & wrapper scripts
check_interpolation "misc/bootstrap.yml" "-l example-vars-files/misc/bootstrap.yml"
check_interpolation "misc/bootstrap.yml" "-o misc/dev.yml" "-l example-vars-files/misc/bootstrap.yml"
# BOSH
check_interpolation "rename.yml" "-v deployment_name=fubar"
check_interpolation "vm-types.yml" "-v master_vm_type=master" "-v worker_vm_type=worker" "-v apply_addons_vm_type=addons"
check_interpolation "add-vm-extensions-to-master.yml"
check_interpolation "use-vm-extensions.yml" "-v deployment_name=cfcr"
check_interpolation "worker_count.yml" "-v worker_count=4"
check_interpolation "non-precompiled-releases.yml"
check_interpolation "use-persistent-disk-for-workers.yml" "-v disk_size=1000"
check_interpolation "disable-swap.yml"
# Infrastructure
check_interpolation "iaas/aws/cloud-provider.yml"
check_interpolation "iaas/aws/lb.yml" "-v kubernetes_cluster_tag=test"
check_interpolation "name:iaas/aws/add-master-credentials.yml" "iaas/aws/cloud-provider.yml" "-o iaas/aws/add-master-credentials.yml" "-v aws_access_key_id_master=access-key-id" "-v aws_secret_access_key_master=secret-access-key"
check_interpolation "name:iaas/aws/add-worker-credentials.yml" "iaas/aws/cloud-provider.yml" "-o iaas/aws/add-worker-credentials.yml" "-v aws_access_key_id_worker=access-key-id" "-v aws_secret_access_key_worker=secret-access-key"
check_interpolation "iaas/azure/cloud-provider.yml" "-l example-vars-files/iaas/azure/cloud-provider.yml"
check_interpolation "name:iaas/azure/use-credentials" "iaas/azure/cloud-provider.yml" "-o iaas/azure/use-credentials.yml " "-l example-vars-files/iaas/azure/cloud-provider.yml" "-v client_id=client" "-v client_secret=secret"
check_interpolation "iaas/gcp/cloud-provider.yml" "-l example-vars-files/iaas/gcp/cloud-provider.yml"
check_interpolation "name:iaas/gcp/add-subnetwork-for-internal-load-balancer.yml" "iaas/gcp/cloud-provider.yml" "-o iaas/gcp/add-subnetwork-for-internal-load-balancer.yml" "-v subnetwork=foo" "-l example-vars-files/iaas/gcp/cloud-provider.yml"
check_interpolation "name:iaas/gcp/add-service-key-master.yml" "iaas/gcp/cloud-provider.yml" "-o iaas/gcp/add-service-key-master.yml" "-v service_key_master=foo" "-l example-vars-files/iaas/gcp/cloud-provider.yml"
check_interpolation "name:iaas/gcp/add-service-key-worker.yml" "iaas/gcp/cloud-provider.yml" "-o iaas/gcp/add-service-key-worker.yml" "-v service_key_worker=foo" "-l example-vars-files/iaas/gcp/cloud-provider.yml"
check_interpolation "iaas/openstack/master-static-ip.yml" "-v kubernetes_master_host=10.11.12.13"
check_interpolation "iaas/openstack/cloud-provider.yml" "-l example-vars-files/iaas/openstack/cloud-provider.yml"
check_interpolation "iaas/vsphere/cloud-provider.yml" "-l example-vars-files/iaas/vsphere/cloud-provider.yml"
check_interpolation "name:iaas/vsphere/set-working-dir-no-rp.yml" "iaas/vsphere/cloud-provider.yml" "-o iaas/vsphere/set-working-dir-no-rp.yml" "-l example-vars-files/iaas/vsphere/set-working-dir-no-rp.yml"
check_interpolation "iaas/vsphere/master-static-ip.yml" "-v kubernetes_master_host=10.11.12.13"
check_interpolation "iaas/vsphere/use-vm-extensions.yml"
check_interpolation "iaas/virtualbox/bosh-lite.yml"
check_interpolation "iaas/azure/subnet.yml"
check_interpolation "iaas/azure/use-cifs.yml"
# HTTP proxy options
check_interpolation "add-proxy.yml" "-v http_proxy=10.10.10.10:8000 -v https_proxy=10.10.10.10:8000 -v no_proxy=localhost,127.0.0.1"
# Syslog
check_interpolation "add-syslog.yml" "-l example-vars-files/add-syslog.yml"
check_interpolation "name:add-syslog-tls.yml" "add-syslog.yml" "-o add-syslog-tls.yml" "-l example-vars-files/add-syslog.yml" "-l example-vars-files/add-syslog-tls.yml"
# Kubernetes
check_interpolation "add-hostname-to-master-certificate.yml" "-v api-hostname=example.com"
check_interpolation "add-oidc-endpoint.yml" "-l example-vars-files/misc/oidc.yml"
check_interpolation "change-audit-log-flags.yml" "-l example-vars-files/change-audit-log-flags.yml"
check_interpolation "addons-spec.yml" "-v addons-spec={}"
check_interpolation "allow-privileged-containers.yml"
check_interpolation "change-cidrs.yml" "-l example-vars-files/new-cidrs.yml"
check_interpolation "disable-anonymous-auth.yml"
check_interpolation "enable-denyescalatingexec.yml"
check_interpolation "enable-podsecuritypolicy.yml"
check_interpolation "enable-securitycontextdeny.yml"
check_interpolation "enable-encryption-config.yml" "-v encryption-config=encryption-config.yml"
check_interpolation "enable-csi-shared-mounts.yml"
check_interpolation "use-hostgw.yml"
check_interpolation "set-fs-inotify-limit.yml" "-l example-vars-files/fs-inotify-limit.yml"
# Etcd
check_interpolation "change-etcd-metrics-url.yml" "-v etcd_metrics_protocol=http -v etcd_metrics_port=2378"
# BBR
check_interpolation "enable-bbr.yml"
# Dev
check_interpolation "kubo-local-release.yml"
# NFS
check_interpolation "enable-nfs.yml"
popd > /dev/null # operations
popd > /dev/null
exit $exit_code
}
|
pivotal-cf-experimental/kubo-deployment
|
bin/test-standard-ops.sh
|
Shell
|
apache-2.0
| 5,598 |
# check dependencies
(
type docker &>/dev/null || ( echo "docker is not available"; exit 1 )
type curl &>/dev/null || ( echo "curl is not available"; exit 1 )
)>&2
# Assert that $1 is the outputof a command $2
function assert {
local expected_output=$1
shift
actual_output=$("$@")
if ! [ "$actual_output" = "$expected_output" ]; then
echo "expected: \"$expected_output\", actual: \"$actual_output\""
false
fi
}
# Retry a command $1 times until it succeeds. Wait $2 seconds between retries.
function retry {
local attempts=$1
shift
local delay=$1
shift
local i
for ((i=0; i < attempts; i++)); do
run "$@"
if [ "$status" -eq 0 ]; then
return 0
fi
sleep $delay
done
echo "Command \"$@\" failed $attempts times. Status: $status. Output: $output" >&2
false
}
function get_jenkins_url {
if [ -z $DOCKER_HOST]; then
DOCKER_IP=localhost
else
DOCKER_IP=$(echo $DOCKER_HOST | sed -e 's|tcp://\(.*\):[0-9]*|\1|')
fi
echo "http://$DOCKER_IP:$(docker port $SUT_CONTAINER 8080 | cut -d: -f2)"
}
function test_url {
run curl --output /dev/null --silent --head --fail --connect-timeout 30 --max-time 60 $(get_jenkins_url)$1
if [ "$status" -eq 0 ]; then
true
else
echo "URL $(get_jenkins_url)$1 failed" >&2
echo "output: $output" >&2
false
fi
}
|
defn/docker
|
tests/test_helpers.bash
|
Shell
|
apache-2.0
| 1,439 |
#!/bin/bash
# Copyright 2018 Frank Breedijk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Use this file to build a debian package locally. Make sure you are in the deb directory!
docker-compose up -d
docker-compose exec fedora26 bash -c "cd /root/project;dnf install -y build/seccubus*fc26*rpm"
docker-compose exec fedora26 bash -c "cd /opt/seccubus;hypnotoad seccubus.pl;echo -n >>/dev/tcp/localhost/8443"
docker-compose exec fedora27 bash -c "cd /root/project;dnf install -y build/seccubus*fc27*rpm"
docker-compose exec fedora27 bash -c "cd /opt/seccubus;PERL5LIB=$PERL5LIB:. hypnotoad seccubus.pl;echo -n >>/dev/tcp/localhost/8443"
docker-compose exec fedora28 bash -c "cd /root/project;dnf install -y build/seccubus*fc28*rpm"
docker-compose exec fedora28 bash -c "cd /opt/seccubus;PERL5LIB=$PERL5LIB:. hypnotoad seccubus.pl;echo -n >>/dev/tcp/localhost/8443"
docker-compose exec fedora29 bash -c "cd /root/project;dnf install -y build/seccubus*fc29*rpm"
docker-compose exec fedora29 bash -c "cd /opt/seccubus;PERL5LIB=$PERL5LIB:. hypnotoad seccubus.pl;echo -n >>/dev/tcp/localhost/8443"
docker-compose exec centos7 bash -c "cd /root/project;yum -y install epel-release;yum install -y build/perl*rpm build/seccubus*el7*rpm"
docker-compose exec centos7 bash -c "cd /opt/seccubus;hypnotoad seccubus.pl;echo -n >/dev/tcp/localhost/8443"
if [ $? == 0 ] ; then
echo "Done building, shutting down docker image in 10 secoonds..."
sleep 10
docker-compose down
fi
exit;
|
schubergphilis/Seccubus
|
rpm/test-locally.sh
|
Shell
|
apache-2.0
| 1,977 |
#!/bin/bash
#
# Copyright 2005-2014 Red Hat, Inc.
#
# Red Hat licenses this file to you under the Apache License, version
# 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
## defines the default environment settings
# add something like this:
#
# export MAIN="MyClassName"
|
jstrachan/jube
|
images/fabric8/java/src/main/distro/env.sh
|
Shell
|
apache-2.0
| 738 |
#!/bin/sh
rm -f /sbin/fsck.minix
rm -f /sbin/fstrim
rm -f /sbin/installkernel
rm -f /sbin/losetup
rm -rf /bin/arping
rm -rf /bin/attr
rm -rf /bin/chown
rm -rf /bin/chroot
rm -rf /bin/dmesg
rm -rf /bin/egrep
rm -rf /bin/login
rm -rf /bin/mount
rm -rf /bin/nano
rm -rf /bin/ping4
rm -rf /bin/run-parts
rm -rf /bin/su
rm -rf /boot
rm -rf /etc/conf.d
rm -rf /etc/conf.d/pydoc-*
rm -rf /etc/default
rm -rf /etc/DIR_COLORS
rm -rf /etc/fstab
rm -rf /etc/gai.conf
rm -rf /etc/init.d
rm -rf /etc/issue
rm -rf /etc/issue.logo
rm -rf /etc/logrotate.d
rm -rf /etc/modprobe.d
rm -rf /etc/nanorc
rm -rf /etc/rc.conf
rm -rf /etc/rpc
rm -rf /etc/rsyncd.conf
rm -rf /etc/sandbox.conf
rm -rf /etc/skel
rm -rf /etc/sysctl.conf
rm -rf /etc/terminfo
rm -rf /etc/udev
rm -rf /etc/wgetrc
rm -rf /etc/xattr.conf
rm -rf /etc/xinetd.d
rm -rf /home
rm -rf /lib/systemd
rm -rf /media
rm -rf /mnt
rm -rf /opt
rm -rf /root
rm -rf /sbin/blockdev
rm -rf /sbin/consoletype
rm -rf /sbin/ctrlaltdel
rm -rf /sbin/fdisk
rm -rf /sbin/findfs
rm -rf /sbin/getcap
rm -rf /sbin/getpcaps
rm -rf /sbin/mkfs
rm -rf /sbin/setcap
rm -rf /sbin/sfdisk
rm -rf /sbin/sln
rm -rf /tmp/*
rm -rf /usr/lib/cracklib_dict.hwm
rm -rf /usr/lib/cracklib_dict.pwd
rm -rf /usr/lib/cracklib_dict.pwi
rm -rf /usr/lib/python3*/
rm -rf /usr/lib/systemd
rm -rf /usr/lib64/locale
rm -rf /usr/lib64/python*/site-packages/allmydata/test
rm -rf /usr/lib64/python*/site-packages/portage/tests
rm -rf /usr/lib64/python*/site-packages/twisted/test
rm -rf /usr/lib64/python*/test
rm -rf /usr/local
rm -rf /usr/portage/distfiles/
rm -rf /usr/sbin/delpart
rm -rf /usr/sbin/logoutd
rm -rf /usr/sbin/partx
rm -rf /usr/sbin/resizepart
rm -rf /usr/sbin/rfkill
rm -rf /usr/share/applications
rm -rf /usr/share/bash-completion
rm -rf /usr/share/doc/
rm -rf /usr/share/gtk-doc/
rm -rf /usr/share/info
rm -rf /usr/share/locale
rm -rf /usr/share/man
rm -rf /usr/share/nano
rm -rf /usr/share/portage/config/make.conf.example
rm -rf /usr/share/vim
rm -rf /usr/share/zsh
rm -rf /usr/src
rm -rf /var/cache
rm -rf /var/cache/*
rm -rf /var/lib/gentoo/news
rm -rf /var/log/*
rm -rf /var/tmp/portage
rm /bin/findmnt
rm /bin/umount
rm /bin/wdctl
rm /sbin/agetty
rm /sbin/blkdiscard
rm /sbin/blkid
rm /sbin/blkzone
rm /sbin/fsck
rm /sbin/mkfs.bfs
rm /sbin/mkfs.minix
rm /sbin/mkswap
rm /sbin/pivot_root
rm /sbin/raw
rm /sbin/sulogin
rm /sbin/swaplabel
rm /sbin/swapon
rm /sbin/switch_root
rm /sbin/wipefs
rm /sbin/zramctl
rm /usr/sbin/addpart
|
vrusinov/copr-sundry
|
docker/python2/clean.sh
|
Shell
|
apache-2.0
| 2,447 |
if false; then {
git clone https://github.com/torch/distro.git ~/torch
cd ~/torch
'for pkg in cudnn cunn cunnx cutorch qttorch trepl graph optim sdl2 threads submodule graphicsmagick audio fftw3 signal nnx qtlua gnuplot dok iTorch argcheck image xlua; do { sed -i -e "s/\(.*$pkg.*\)/echo skipping $pkg # \1/" install.sh; } done'
'awk ''NR==2{print "set -x"}1'' install.sh > ~install.sh'
mv ~install.sh install.sh
chmod +x install.sh
cat install.sh
for pkg in exe/luajit-rocks extra/nn pkg/cwrap pkg/paths pkg/sundown pkg/sys pkg/torch pkg/paths extra/lua-cjson extra/luaffifb extra/luafilesystem extra/penlight; do { git submodule update --quiet --init $pkg; } done
sed -i -e 's/$(MAKE)/$(MAKE) -j 4/' pkg/torch/rocks/torch-scm-1.rockspec
./install.sh -b >/dev/null
} else {
mkdir -p ~/torch
cd ~/torch
wget https://s3.amazonaws.com/hughperkinstravis/hughperkins/distro/3/3.1/torch-install.tar.bz2
tar -xf torch-install.tar.bz2
} fi
sed -i -e 's/^export LD_LIBRARY_PATH/# export LD_LIBRARY_PATH/' ~/torch/install/bin/torch-activate
sed -i -e 's/^export DYLD_LIBRARY_PATH/# export LD_LIBRARY_PATH/' ~/torch/install/bin/torch-activate
source ~/torch/install/bin/torch-activate
luajit -l torch -e 'print(torch.Tensor(3,2):uniform())'
|
hughperkins/clnn
|
travis/install-torch.sh
|
Shell
|
bsd-2-clause
| 1,261 |
#!/usr/bin/env bash
# This script builds the siapool inside a docker,
# it then creates a docker from scratch containing just the built binary from the previous step
# and then pushes the resulting image to hub.docker.com
set -e
docker build -t siapoolbuilder .
docker run --rm -v "$PWD":/go/src/github.com/robvanmieghem/siapool --entrypoint go siapoolbuilder build -ldflags '-s' -v -o dist/siapool
docker build -t robvanmieghem/siapool:latest -f DockerfileMinimal .
docker push robvanmieghem/siapool:latest
|
siapool/p2pool
|
publishdocker.sh
|
Shell
|
bsd-2-clause
| 512 |
#!/bin/bash
set -e
PG_LOG=/var/log/postgresql/
PG_CONFIG=/etc/pgbouncer/pgbouncer.ini
PG_USER=postgres
mkdir -p ${PG_LOG}
chmod -R 755 ${PG_LOG}
chown -R ${PG_USER}:${PG_USER} ${PG_LOG}
echo "Starting pgbouncer as deamon..."
cd /root
nohup pgbouncer -q -u ${PG_USER} $PG_CONFIG -d
cd /var/www/html
echo "Starting apache in foreground..."
exec apache2-foreground
|
bravoman/docker-php-apache-pgbouncer
|
docker/php56/entrypoint.sh
|
Shell
|
mit
| 365 |
#!/bin/bash
# Specially created e2image dump to test backup superblock migration for
# new convert.
# These images will cause the following problems if convert doesn't handle
# backup superblock migration well:
# 1) Assert while building free space tree
# 2) Error copying inodes
# 3) Discontinuous file extents after convert
# 4) Overlap file extents
# 5) Unable to rollback
source "$TEST_TOP/common"
check_prereq btrfs-convert
check_prereq btrfs
check_global_prereq e2fsck
check_global_prereq xzcat
setup_root_helper
prepare_test_dev
# override common function
check_image() {
TEST_DEV="$1"
run_check e2fsck -n -f "$TEST_DEV"
run_check "$TOP/btrfs-convert" "$TEST_DEV"
run_check "$TOP/btrfs" check "$TEST_DEV"
run_check "$TOP/btrfs" inspect-internal dump-super "$TEST_DEV"
run_check_mount_test_dev
run_check $SUDO_HELPER e2fsck -n -f "$TEST_MNT/ext2_saved/image"
run_check $SUDO_HELPER umount "$TEST_MNT"
run_check "$TOP/btrfs" check "$TEST_DEV"
run_check "$TOP/btrfs-convert" -r "$TEST_DEV"
run_check e2fsck -n -f "$TEST_DEV"
rm -f "$TEST_DEV"
}
check_all_images
|
adam900710/btrfs-progs
|
tests/convert-tests/004-ext2-backup-superblock-ranges/test.sh
|
Shell
|
gpl-2.0
| 1,087 |
#!/bin/sh
DOCDIR=doc/
APIDOCDIR=$DOCDIR/api
SRCDIR=libmushu/
rm -rf $APIDOCDIR
mkdir -p $APIDOCDIR
sphinx-apidoc -o $APIDOCDIR $SRCDIR
cd $DOCDIR
make html
|
venthur/mushu
|
generate_doc.sh
|
Shell
|
gpl-2.0
| 160 |
#!/bin/bash
# This file is part of cc-oci-runtime.
#
# Copyright (C) 2017 Intel Corporation
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# Description: This script installs Clear Containers on a
# CentOS 7 or RHEL 7 system.
#
SCRIPT_PATH=$(dirname "$(readlink -f "$0")")
source "${SCRIPT_PATH}/installation-setup.sh"
source "${SCRIPT_PATH}/../versions.txt"
# List of packages to install to satisfy build dependencies
pkgs=""
mnl_dev_pkg="libmnl-devel"
# general
pkgs+=" zlib-devel"
pkgs+=" gettext-devel"
pkgs+=" libtool-ltdl-devel"
pkgs+=" libtool-ltdl"
pkgs+=" glib2-devel"
pkgs+=" bzip2"
pkgs+=" m4"
# for yum-config-manager
pkgs+=" yum-utils"
# runtime dependencies
pkgs+=" libuuid-devel"
pkgs+=" libmnl"
pkgs+=" ${mnl_dev_pkg}"
pkgs+=" libffi-devel"
pkgs+=" pcre-devel"
# qemu lite dependencies
pkgs+=" libattr-devel"
pkgs+=" libcap-devel"
pkgs+=" libcap-ng-devel"
pkgs+=" pixman-devel"
pkgs+=" gcc-c++"
if [ "$os_distribution" = rhel ]
then
# RHEL doesn't provide "*-devel" packages unless the "Optional RPMS"
# repository is enabled. However, to make life fun, there isn't a
# clean way to determine if that repository is enabled (the output
# format of "yum repolist" seems to change frequently and
# subscription-manager's output isn't designed for easy script
# consumption).
#
# Therefore, the safest approach seems to be to check if a known
# required development package is known to yum(8). If it isn't, the
# user will need to enable the extra repository.
#
# Note that this issue is unique to RHEL: yum on CentOS provides
# access to developemnt packages by default.
yum info "${mnl_dev_pkg}" >/dev/null 2>&1
if [ $? -ne 0 ]
then
echo >&2 "ERROR: You must enable the 'optional' repository for '*-devel' packages"
exit 1
fi
fi
if [ "$os_distribution" = rhel ]
then
cc_repo_url="http://download.opensuse.org/repositories/home:/clearlinux:/preview:/clear-containers-2.1/RHEL_7/home:clearlinux:preview:clear-containers-2.1.repo"
elif [ "$os_distribution" = centos ]
then
cc_repo_url="http://download.opensuse.org/repositories/home:/clearlinux:/preview:/clear-containers-2.1/CentOS_7/home:clearlinux:preview:clear-containers-2.1.repo"
else
echo >&2 "ERROR: Unrecognised distribution: $os_distribution"
echo >&2 "ERROR: This script is designed to work on CentOS and RHEL systems only."
exit 1
fi
sudo yum -y update
eval sudo yum -y install "$pkgs"
sudo yum groupinstall -y 'Development Tools'
pushd "$deps_dir"
# Install pre-requisites for gcc
curl -L -O ftp://gcc.gnu.org/pub/gcc/infrastructure/gmp-${gmp_version}.tar.bz2
compile gmp gmp-${gmp_version}.tar.bz2 gmp-${gmp_version}
curl -L -O ftp://gcc.gnu.org/pub/gcc/infrastructure/mpfr-${mpfr_version}.tar.bz2
compile mpfr mpfr-${mpfr_version}.tar.bz2 mpfr-${mpfr_version}
curl -L -O ftp://gcc.gnu.org/pub/gcc/infrastructure/mpc-${mpc_version}.tar.gz
compile mpc mpc-${mpc_version}.tar.gz mpc-${mpc_version}
# Install go
go_setup
# Install glib
glib_setup
# Install json-glib
json_glib_setup
# Install gcc
gcc_setup
# Install qemu-lite
qemu_lite_setup
popd
# Install docker
sudo mkdir -p /etc/yum.repos.d/
sudo tee /etc/yum.repos.d/docker.repo <<EOF
[dockerrepo]
name=Docker Repository
baseurl=https://yum.dockerproject.org/repo/main/centos/7/
enabled=1
gpgcheck=1
gpgkey=https://yum.dockerproject.org/gpg
EOF
sudo yum install -y \
docker-engine-1.12.1-1.el7.centos.x86_64 \
docker-engine-selinux-1.12.1-1.el7.centos.noarch
# Install kernel and CC image
sudo yum-config-manager --add-repo "${cc_repo_url}"
sudo yum -y install linux-container clear-containers-image
# Configure cc-oci-runtime
export PATH=$PATH:"${prefix_dir}"/go/bin
cor=github.com/01org/cc-oci-runtime
# Currently it is the latest version for cc-oci-runtime
commit=tags/2.1.9
release=${commit##*/}
go get "$cor"
pushd "$GOPATH/src/$cor"
git checkout -b "$release" "$commit"
# autoconf-archive package does not exist in RHEL we need to download all m4
# files required
source "${SCRIPT_PATH}/curl-autoconf-archive.sh"
./autogen.sh \
--prefix="${prefix_dir}" \
--disable-tests \
--disable-functional-tests \
--enable-autogopath \
--with-cc-image=/usr/share/clear-containers/clear-containers.img \
--with-cc-kernel=/usr/share/clear-containers/vmlinux.container \
--with-qemu-path="${prefix_dir}"/bin/qemu-system-x86_64
make -j5 && sudo make install
popd
# Configure CC by default
sudo mkdir -p /etc/systemd/system/docker.service.d/
cat <<EOF|sudo tee /etc/systemd/system/docker.service.d/clr-containers.conf
[Service]
ExecStart=
ExecStart=/usr/bin/dockerd -D --add-runtime cor=${prefix_dir}/bin/cc-oci-runtime --default-runtime=cor
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
sudo systemctl start cc-proxy.socket
|
devimc/cc-oci-runtime
|
installation/rhel-setup.sh
|
Shell
|
gpl-2.0
| 5,505 |
#!/bin/bash
###########################################################################################
## Copyright 2003, 2015 IBM Corp ##
## ##
## Redistribution and use in source and binary forms, with or without modification, ##
## are permitted provided that the following conditions are met: ##
## 1.Redistributions of source code must retain the above copyright notice, ##
## this list of conditions and the following disclaimer. ##
## 2.Redistributions in binary form must reproduce the above copyright notice, this ##
## list of conditions and the following disclaimer in the documentation and/or ##
## other materials provided with the distribution. ##
## ##
## THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND ANY EXPRESS ##
## OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ##
## MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ##
## THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ##
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF ##
## SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) ##
## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, ##
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ##
## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ##
############################################################################################
### File : pyxdg.sh ##
##
### Description: Test for pyxdg package ##
##
### Author: Athira Rajeev <[email protected]> ##
###########################################################################################
######cd $(dirname $0)
#LTPBIN=${LTPBIN%/shared}/pyxdg
source $LTPBIN/tc_utils.source
TESTS_DIR="${LTPBIN%/shared}/pyxdg/test"
REQUIRED="nosetests"
function tc_local_setup()
{
tc_exec_or_break $REQUIRED || return
}
function install_check()
{
tc_check_package pyxdg
tc_break_if_bad $? "pyxdg not installed"
}
function run_test()
{
pushd $TESTS_DIR >$stdout 2>$stderr
TESTS=`ls test-*.py`
TST_TOTAL=`echo $TESTS | wc -w`
for t in $TESTS; do
tc_register $t
nosetests $t >$stdout 2>$stderr
RC=$?
if [ `grep OK $stderr | wc -l` -eq 1 ];then cat /dev/null > $stderr; fi
tc_pass_or_fail $RC "$test failed"
done
popd >$stdout 2>$stderr
}
#
# main
#
tc_setup
TST_TOTAL=1
install_check && run_test
|
rajashreer7/autotest-client-tests
|
linux-tools/pyxdg/pyxdg.sh
|
Shell
|
gpl-2.0
| 3,005 |
#!/bin/sh
#
# Copyright 2005-2012 Intel Corporation. All Rights Reserved.
#
# This file is part of Threading Building Blocks.
#
# Threading Building Blocks is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# Threading Building Blocks is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Threading Building Blocks; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# As a special exception, you may use this file as part of a free software
# library without restriction. Specifically, if other files instantiate
# templates or use macros or inline functions from this file, or you compile
# this file and link it with other files to produce an executable, this
# file does not by itself cause the resulting executable to be covered by
# the GNU General Public License. This exception does not however
# invalidate any other reasons why the executable file might be covered by
# the GNU General Public License.
# Script used to generate version info string
echo "#define __TBB_VERSION_STRINGS(N) \\"
echo '#N": BUILD_HOST'"\t\t"`hostname -s`" ("`arch`")"'" ENDL \'
echo '#N": BUILD_OS'"\t\t"`sw_vers -productName`" version "`sw_vers -productVersion`'" ENDL \'
echo '#N": BUILD_KERNEL'"\t"`uname -v`'" ENDL \'
echo '#N": BUILD_GCC'"\t\t"`gcc -v </dev/null 2>&1 | grep 'version'`'" ENDL \'
[ -z "$COMPILER_VERSION" ] || echo '#N": BUILD_COMPILER'"\t"$COMPILER_VERSION'" ENDL \'
echo '#N": BUILD_TARGET'"\t$arch on $runtime"'" ENDL \'
echo '#N": BUILD_COMMAND'"\t"$*'" ENDL \'
echo ""
echo "#define __TBB_DATETIME \""`date -u`"\""
|
jckarter/tbb
|
build/version_info_macos.sh
|
Shell
|
gpl-2.0
| 1,972 |
docker run --privileged -v /dev/disk/by-id:/dev/disk/by-id:ro v2v -vv --log file=/dev/ttyS1,level=debug --log file=/dev/ttyS2,level=trace --report file=/dev/ttyS3 --report file=http://metadata.google.internal/computeMetadata/v1/instance/guest-attributes/v2v/report,headers=Metadata-Flavor=Google --drive file=/dev/disk/by-id/google-workload-disk1 --in-place
|
adjackura/compute-image-tools
|
daisy_integration_tests/scripts/v2v_adapt.sh
|
Shell
|
apache-2.0
| 357 |
#puppetfactory VM specific cleanup tasks go here
|
johnduarte/puppetlabs-training-bootstrap
|
packer/scripts/puppetfactory_cleanup.sh
|
Shell
|
apache-2.0
| 49 |
#!/bin/sh
# Copyright 2018 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generate go dependencies, for make. Uses `go list".
# Usage: makedepend.sh [-t] output package path [extra]
PATH_FORMAT='{{ .ImportPath }}{{"\n"}}{{join .Deps "\n"}}'
FILE_FORMAT='{{ range $file := .GoFiles }} {{$.Dir}}/{{$file}}{{"\n"}}{{end}}'
if [ "$1" = "-t" ]
then
PATH_FORMAT='{{ if .TestGoFiles }} {{.ImportPath}} {{end}}'
shift
fi
out=$1
pkg=$2
path=$3
extra=$4
# check for mandatory parameters
test -n "$out$pkg$path" || exit 1
echo "$out: $extra\\"
go list -f "$PATH_FORMAT" $path |
grep "$pkg" |
xargs go list -f "$FILE_FORMAT" |
sed -e "s|^ ${GOPATH}| \$(GOPATH)|;s/$/ \\\\/"
echo " #"
|
dims/minikube
|
makedepend.sh
|
Shell
|
apache-2.0
| 1,234 |
#!/usr/bin/env bash
export RUN_DOCKER=true
export TEST_DEPLOYMENTS=true
export SMOKE_TESTS=true
export RUN_APIMAN=true
export CLEANUP=true
|
apiman/apiman-test
|
tools/scripts/travis/scriptrc.sh
|
Shell
|
apache-2.0
| 140 |
backuppath=$PATH
PATH="$(remove_from_path "${JENV_ROOT}/shims")"
SCALA_BIN="$(command -v "scala" || true)"
SCALAC_BIN="$(command -v "scalac" || true)"
PATH=$backuppath
make_shims "$SCALA_BIN"
make_shims "$SCALAC_BIN"
|
entrypass/jenv
|
available-plugins/scala/etc/jenv.d/rehash/scala.bash
|
Shell
|
mit
| 232 |
#!/bin/sh
# Copyright (C) 1999-2006 ImageMagick Studio LLC
#
# This program is covered by multiple licenses, which are described in
# LICENSE. You should have received a copy of LICENSE with this
# package; otherwise see http://www.imagemagick.org/script/license.php.
. ${srcdir}/tests/common.shi
${RUNENV} ${MEMCHECK} ./rwfile ${SRCDIR}/input_pallette.miff MTV
|
ipwndev/DSLinux-Mirror
|
user/imagemagick/src/tests/rwfile_MTV_pallette.sh
|
Shell
|
gpl-2.0
| 363 |
#!/bin/sh
#
# Copyright (c) 2007 Steven Grimm
#
test_description='git commit
Tests for template, signoff, squash and -F functions.'
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-rebase.sh
commit_msg_is () {
expect=commit_msg_is.expect
actual=commit_msg_is.actual
printf "%s" "$(git log --pretty=format:%s%b -1)" >"$actual" &&
printf "%s" "$1" >"$expect" &&
test_cmp "$expect" "$actual"
}
# A sanity check to see if commit is working at all.
test_expect_success 'a basic commit in an empty tree should succeed' '
echo content > foo &&
git add foo &&
git commit -m "initial commit"
'
test_expect_success 'nonexistent template file should return error' '
echo changes >> foo &&
git add foo &&
(
GIT_EDITOR="echo hello >\"\$1\"" &&
export GIT_EDITOR &&
test_must_fail git commit --template "$PWD"/notexist
)
'
test_expect_success 'nonexistent template file in config should return error' '
test_config commit.template "$PWD"/notexist &&
(
GIT_EDITOR="echo hello >\"\$1\"" &&
export GIT_EDITOR &&
test_must_fail git commit
)
'
# From now on we'll use a template file that exists.
TEMPLATE="$PWD"/template
test_expect_success 'unedited template should not commit' '
echo "template line" > "$TEMPLATE" &&
test_must_fail git commit --template "$TEMPLATE"
'
test_expect_success 'unedited template with comments should not commit' '
echo "# comment in template" >> "$TEMPLATE" &&
test_must_fail git commit --template "$TEMPLATE"
'
test_expect_success 'a Signed-off-by line by itself should not commit' '
(
test_set_editor "$TEST_DIRECTORY"/t7500/add-signed-off &&
test_must_fail git commit --template "$TEMPLATE"
)
'
test_expect_success 'adding comments to a template should not commit' '
(
test_set_editor "$TEST_DIRECTORY"/t7500/add-comments &&
test_must_fail git commit --template "$TEMPLATE"
)
'
test_expect_success 'adding real content to a template should commit' '
(
test_set_editor "$TEST_DIRECTORY"/t7500/add-content &&
git commit --template "$TEMPLATE"
) &&
commit_msg_is "template linecommit message"
'
test_expect_success '-t option should be short for --template' '
echo "short template" > "$TEMPLATE" &&
echo "new content" >> foo &&
git add foo &&
(
test_set_editor "$TEST_DIRECTORY"/t7500/add-content &&
git commit -t "$TEMPLATE"
) &&
commit_msg_is "short templatecommit message"
'
test_expect_success 'config-specified template should commit' '
echo "new template" > "$TEMPLATE" &&
test_config commit.template "$TEMPLATE" &&
echo "more content" >> foo &&
git add foo &&
(
test_set_editor "$TEST_DIRECTORY"/t7500/add-content &&
git commit
) &&
commit_msg_is "new templatecommit message"
'
test_expect_success 'explicit commit message should override template' '
echo "still more content" >> foo &&
git add foo &&
GIT_EDITOR="$TEST_DIRECTORY"/t7500/add-content git commit --template "$TEMPLATE" \
-m "command line msg" &&
commit_msg_is "command line msg"
'
test_expect_success 'commit message from file should override template' '
echo "content galore" >> foo &&
git add foo &&
echo "standard input msg" |
(
test_set_editor "$TEST_DIRECTORY"/t7500/add-content &&
git commit --template "$TEMPLATE" --file -
) &&
commit_msg_is "standard input msg"
'
cat >"$TEMPLATE" <<\EOF
### template
EOF
test_expect_success 'commit message from template with whitespace issue' '
echo "content galore" >>foo &&
git add foo &&
GIT_EDITOR=\""$TEST_DIRECTORY"\"/t7500/add-whitespaced-content \
git commit --template "$TEMPLATE" &&
commit_msg_is "commit message"
'
test_expect_success 'using alternate GIT_INDEX_FILE (1)' '
cp .git/index saved-index &&
(
echo some new content >file &&
GIT_INDEX_FILE=.git/another_index &&
export GIT_INDEX_FILE &&
git add file &&
git commit -m "commit using another index" &&
git diff-index --exit-code HEAD &&
git diff-files --exit-code
) &&
cmp .git/index saved-index >/dev/null
'
test_expect_success 'using alternate GIT_INDEX_FILE (2)' '
cp .git/index saved-index &&
(
rm -f .git/no-such-index &&
GIT_INDEX_FILE=.git/no-such-index &&
export GIT_INDEX_FILE &&
git commit -m "commit using nonexistent index" &&
test -z "$(git ls-files)" &&
test -z "$(git ls-tree HEAD)"
) &&
cmp .git/index saved-index >/dev/null
'
cat > expect << EOF
zort
Signed-off-by: C O Mitter <[email protected]>
EOF
test_expect_success '--signoff' '
echo "yet another content *narf*" >> foo &&
echo "zort" | git commit -s -F - foo &&
git cat-file commit HEAD | sed "1,/^\$/d" > output &&
test_cmp expect output
'
test_expect_success 'commit message from file (1)' '
mkdir subdir &&
echo "Log in top directory" >log &&
echo "Log in sub directory" >subdir/log &&
(
cd subdir &&
git commit --allow-empty -F log
) &&
commit_msg_is "Log in sub directory"
'
test_expect_success 'commit message from file (2)' '
rm -f log &&
echo "Log in sub directory" >subdir/log &&
(
cd subdir &&
git commit --allow-empty -F log
) &&
commit_msg_is "Log in sub directory"
'
test_expect_success 'commit message from stdin' '
(
cd subdir &&
echo "Log with foo word" | git commit --allow-empty -F -
) &&
commit_msg_is "Log with foo word"
'
test_expect_success 'commit -F overrides -t' '
(
cd subdir &&
echo "-F log" > f.log &&
echo "-t template" > t.template &&
git commit --allow-empty -F f.log -t t.template
) &&
commit_msg_is "-F log"
'
test_expect_success 'Commit without message is allowed with --allow-empty-message' '
echo "more content" >>foo &&
git add foo &&
>empty &&
git commit --allow-empty-message <empty &&
commit_msg_is "" &&
git tag empty-message-commit
'
test_expect_success 'Commit without message is no-no without --allow-empty-message' '
echo "more content" >>foo &&
git add foo &&
>empty &&
test_must_fail git commit <empty
'
test_expect_success 'Commit a message with --allow-empty-message' '
echo "even more content" >>foo &&
git add foo &&
git commit --allow-empty-message -m"hello there" &&
commit_msg_is "hello there"
'
test_expect_success 'commit -C empty respects --allow-empty-message' '
echo more >>foo &&
git add foo &&
test_must_fail git commit -C empty-message-commit &&
git commit -C empty-message-commit --allow-empty-message &&
commit_msg_is ""
'
commit_for_rebase_autosquash_setup () {
echo "first content line" >>foo &&
git add foo &&
cat >log <<EOF &&
target message subject line
target message body line 1
target message body line 2
EOF
git commit -F log &&
echo "second content line" >>foo &&
git add foo &&
git commit -m "intermediate commit" &&
echo "third content line" >>foo &&
git add foo
}
test_expect_success 'commit --fixup provides correct one-line commit message' '
commit_for_rebase_autosquash_setup &&
EDITOR="echo ignored >>" git commit --fixup HEAD~1 &&
commit_msg_is "fixup! target message subject line"
'
test_expect_success 'commit --fixup -m"something" -m"extra"' '
commit_for_rebase_autosquash_setup &&
git commit --fixup HEAD~1 -m"something" -m"extra" &&
commit_msg_is "fixup! target message subject linesomething
extra"
'
test_expect_success 'commit --fixup --edit' '
commit_for_rebase_autosquash_setup &&
EDITOR="printf \"something\nextra\" >>" git commit --fixup HEAD~1 --edit &&
commit_msg_is "fixup! target message subject linesomething
extra"
'
get_commit_msg () {
rev="$1" &&
git log -1 --pretty=format:"%B" "$rev"
}
test_expect_success 'commit --fixup=amend: creates amend! commit' '
commit_for_rebase_autosquash_setup &&
cat >expected <<-EOF &&
amend! $(git log -1 --format=%s HEAD~)
$(get_commit_msg HEAD~)
edited
EOF
(
set_fake_editor &&
FAKE_COMMIT_AMEND="edited" \
git commit --fixup=amend:HEAD~
) &&
get_commit_msg HEAD >actual &&
test_cmp expected actual
'
test_expect_success '--fixup=amend: --only ignores staged changes' '
commit_for_rebase_autosquash_setup &&
cat >expected <<-EOF &&
amend! $(git log -1 --format=%s HEAD~)
$(get_commit_msg HEAD~)
edited
EOF
(
set_fake_editor &&
FAKE_COMMIT_AMEND="edited" \
git commit --fixup=amend:HEAD~ --only
) &&
get_commit_msg HEAD >actual &&
test_cmp expected actual &&
test_cmp_rev HEAD@{1}^{tree} HEAD^{tree} &&
test_cmp_rev HEAD@{1} HEAD^ &&
test_expect_code 1 git diff --cached --exit-code &&
git cat-file blob :foo >actual &&
test_cmp foo actual
'
test_expect_success '--fixup=reword: ignores staged changes' '
commit_for_rebase_autosquash_setup &&
cat >expected <<-EOF &&
amend! $(git log -1 --format=%s HEAD~)
$(get_commit_msg HEAD~)
edited
EOF
(
set_fake_editor &&
FAKE_COMMIT_AMEND="edited" \
git commit --fixup=reword:HEAD~
) &&
get_commit_msg HEAD >actual &&
test_cmp expected actual &&
test_cmp_rev HEAD@{1}^{tree} HEAD^{tree} &&
test_cmp_rev HEAD@{1} HEAD^ &&
test_expect_code 1 git diff --cached --exit-code &&
git cat-file blob :foo >actual &&
test_cmp foo actual
'
test_expect_success '--fixup=reword: error out with -m option' '
commit_for_rebase_autosquash_setup &&
echo "fatal: cannot combine -m with --fixup:reword" >expect &&
test_must_fail git commit --fixup=reword:HEAD~ -m "reword commit message" 2>actual &&
test_cmp expect actual
'
test_expect_success '--fixup=amend: error out with -m option' '
commit_for_rebase_autosquash_setup &&
echo "fatal: cannot combine -m with --fixup:amend" >expect &&
test_must_fail git commit --fixup=amend:HEAD~ -m "amend commit message" 2>actual &&
test_cmp expect actual
'
test_expect_success 'consecutive amend! commits remove amend! line from commit msg body' '
commit_for_rebase_autosquash_setup &&
cat >expected <<-EOF &&
amend! amend! $(git log -1 --format=%s HEAD~)
$(get_commit_msg HEAD~)
edited 1
edited 2
EOF
echo "reword new commit message" >actual &&
(
set_fake_editor &&
FAKE_COMMIT_AMEND="edited 1" \
git commit --fixup=reword:HEAD~ &&
FAKE_COMMIT_AMEND="edited 2" \
git commit --fixup=reword:HEAD
) &&
get_commit_msg HEAD >actual &&
test_cmp expected actual
'
test_expect_success 'deny to create amend! commit if its commit msg body is empty' '
commit_for_rebase_autosquash_setup &&
echo "Aborting commit due to empty commit message body." >expected &&
(
set_fake_editor &&
test_must_fail env FAKE_COMMIT_MESSAGE="amend! target message subject line" \
git commit --fixup=amend:HEAD~ 2>actual
) &&
test_cmp expected actual
'
test_expect_success 'amend! commit allows empty commit msg body with --allow-empty-message' '
commit_for_rebase_autosquash_setup &&
cat >expected <<-EOF &&
amend! $(git log -1 --format=%s HEAD~)
EOF
(
set_fake_editor &&
FAKE_COMMIT_MESSAGE="amend! target message subject line" \
git commit --fixup=amend:HEAD~ --allow-empty-message &&
get_commit_msg HEAD >actual
) &&
test_cmp expected actual
'
test_fixup_reword_opt () {
test_expect_success "--fixup=reword: incompatible with $1" "
echo 'fatal: reword option of --fixup is mutually exclusive with'\
'--patch/--interactive/--all/--include/--only' >expect &&
test_must_fail git commit --fixup=reword:HEAD~ $1 2>actual &&
test_cmp expect actual
"
}
for opt in --all --include --only --interactive --patch
do
test_fixup_reword_opt $opt
done
test_expect_success '--fixup=reword: give error with pathsec' '
commit_for_rebase_autosquash_setup &&
echo "fatal: cannot combine reword option of --fixup with path '\''foo'\''" >expect &&
test_must_fail git commit --fixup=reword:HEAD~ -- foo 2>actual &&
test_cmp expect actual
'
test_expect_success '--fixup=reword: -F give error message' '
echo "fatal: Only one of -c/-C/-F/--fixup can be used." >expect &&
test_must_fail git commit --fixup=reword:HEAD~ -F msg 2>actual &&
test_cmp expect actual
'
test_expect_success 'commit --squash works with -F' '
commit_for_rebase_autosquash_setup &&
echo "log message from file" >msgfile &&
git commit --squash HEAD~1 -F msgfile &&
commit_msg_is "squash! target message subject linelog message from file"
'
test_expect_success 'commit --squash works with -m' '
commit_for_rebase_autosquash_setup &&
git commit --squash HEAD~1 -m "foo bar\nbaz" &&
commit_msg_is "squash! target message subject linefoo bar\nbaz"
'
test_expect_success 'commit --squash works with -C' '
commit_for_rebase_autosquash_setup &&
git commit --squash HEAD~1 -C HEAD &&
commit_msg_is "squash! target message subject lineintermediate commit"
'
test_expect_success 'commit --squash works with -c' '
commit_for_rebase_autosquash_setup &&
test_set_editor "$TEST_DIRECTORY"/t7500/edit-content &&
git commit --squash HEAD~1 -c HEAD &&
commit_msg_is "squash! target message subject lineedited commit"
'
test_expect_success 'commit --squash works with -C for same commit' '
commit_for_rebase_autosquash_setup &&
git commit --squash HEAD -C HEAD &&
commit_msg_is "squash! intermediate commit"
'
test_expect_success 'commit --squash works with -c for same commit' '
commit_for_rebase_autosquash_setup &&
test_set_editor "$TEST_DIRECTORY"/t7500/edit-content &&
git commit --squash HEAD -c HEAD &&
commit_msg_is "squash! edited commit"
'
test_expect_success 'commit --squash works with editor' '
commit_for_rebase_autosquash_setup &&
test_set_editor "$TEST_DIRECTORY"/t7500/add-content &&
git commit --squash HEAD~1 &&
commit_msg_is "squash! target message subject linecommit message"
'
test_expect_success 'invalid message options when using --fixup' '
echo changes >>foo &&
echo "message" >log &&
git add foo &&
test_must_fail git commit --fixup HEAD~1 --squash HEAD~2 &&
test_must_fail git commit --fixup HEAD~1 -C HEAD~2 &&
test_must_fail git commit --fixup HEAD~1 -c HEAD~2 &&
test_must_fail git commit --fixup HEAD~1 -F log
'
cat >expected-template <<EOF
# Please enter the commit message for your changes. Lines starting
# with '#' will be ignored.
#
# Author: A U Thor <[email protected]>
#
# On branch commit-template-check
# Changes to be committed:
# new file: commit-template-check
#
# Untracked files not listed
EOF
test_expect_success 'new line found before status message in commit template' '
git checkout -b commit-template-check &&
git reset --hard HEAD &&
touch commit-template-check &&
git add commit-template-check &&
GIT_EDITOR="cat >editor-input" git commit --untracked-files=no --allow-empty-message &&
test_cmp expected-template editor-input
'
test_expect_success 'setup empty commit with unstaged rename and copy' '
test_create_repo unstaged_rename_and_copy &&
(
cd unstaged_rename_and_copy &&
echo content >orig &&
git add orig &&
test_commit orig &&
cp orig new_copy &&
mv orig new_rename &&
git add -N new_copy new_rename
)
'
test_expect_success 'check commit with unstaged rename and copy' '
(
cd unstaged_rename_and_copy &&
test_must_fail git -c diff.renames=copy commit
)
'
test_expect_success 'commit without staging files fails and displays hints' '
echo "initial" >file &&
git add file &&
git commit -m initial &&
echo "changes" >>file &&
test_must_fail git commit -m update >actual &&
test_i18ngrep "no changes added to commit (use \"git add\" and/or \"git commit -a\")" actual
'
test_done
|
abg1979/git
|
t/t7500-commit-template-squash-signoff.sh
|
Shell
|
gpl-2.0
| 15,114 |
#!/bin/bash
module load openmpi/gcc/4.8.2/1.6.5
module load python/2.7.8
module load python278/scipy
module load python278/numpy
datafile=$1
processfile=$2
outfile=$3
nCPUs=$4
filename=tomo_recon
echo $PYTHONPATH
nCPUs=$((nCPUs-1))
CPUs=CPU0
if [ $nCPUs -gt 0 ]; then
for i in $(eval echo {1..$nCPUs})
do
CPUs=$CPUs,CPU$i
done
fi
echo "Processes running are : ${processes}"
mpirun $filename $datafile $processfile $outfile -n $CPUs
|
rcatwood/Savu
|
mpi/scarf/savu_mpijob.sh
|
Shell
|
gpl-3.0
| 457 |
#!/bin/bash -xe
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DOCNAME=$1
if [ -z "$DOCNAME" ] ; then
echo "usage $0 DOCNAME"
exit 1
fi
# We're not doing anything for this directory. But we need to handle
# it by this script so that the common-rst.pot file gets registered.
if [[ "$DOCNAME" = "common-rst" ]] ; then
exit 0
fi
# Build Glossary
tools/glossary2rst.py doc/common-rst/glossary.rst
# First remove the old pot file, otherwise the new file will contain
# old references
rm -f doc/$DOCNAME/source/locale/$DOCNAME.pot
# We need to extract all strings, so add all supported tags
TAG=""
if [ ${DOCNAME} = "install-guide" ] ; then
TAG="-t obs -t rdo -t ubuntu -t debian"
fi
sphinx-build -b gettext $TAG doc/$DOCNAME/source/ doc/$DOCNAME/source/locale/
# Update common
sed -i -e 's/^"Project-Id-Version: [a-zA-Z0-9\. ]+\\n"$/"Project-Id-Version: \\n"/' \
doc/$DOCNAME/source/locale/common.pot
# Create the common pot file
msgcat --sort-output doc/common-rst/source/locale/common-rst.pot \
doc/$DOCNAME/source/locale/common.pot | \
sed -e 's/^"Project-Id-Version: [a-zA-Z0-9\. ]+\\n"$/"Project-Id-Version: \\n"/' | \
awk '$0 !~ /^\# [a-z0-9]+$/' | awk '$0 !~ /^\# \#-\#-\#-\#-\# /' \
> doc/$DOCNAME/source/locale/common-rst.pot
mv -f doc/$DOCNAME/source/locale/common-rst.pot doc/common-rst/source/locale/common-rst.pot
rm -f doc/$DOCNAME/source/locale/common.pot
# Simplify metadata
rm -f doc/common-rst/source/locale/dummy.po
cat << EOF > doc/common-rst/source/locale/dummy.po
msgid ""
msgstr ""
"Project-Id-Version: \n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2015-01-01 01:01+0900\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
EOF
msgmerge -N doc/common-rst/source/locale/dummy.po \
doc/common-rst/source/locale/common-rst.pot > doc/common-rst/source/locale/tmp.pot
mv -f doc/common-rst/source/locale/tmp.pot doc/common-rst/source/locale/common-rst.pot
rm -f doc/common-rst/source/locale/dummy.po
# Take care of deleting all temporary files so that git add
# doc/$DOCNAME/source/locale will only add the single pot file.
# Remove UUIDs, those are not necessary and change too often
msgcat --sort-output doc/$DOCNAME/source/locale/*.pot | \
awk '$0 !~ /^\# [a-z0-9]+$/' > doc/$DOCNAME/source/$DOCNAME.pot
rm doc/$DOCNAME/source/locale/*.pot
rm -rf doc/$DOCNAME/source/locale/.doctrees/
mv doc/$DOCNAME/source/$DOCNAME.pot doc/$DOCNAME/source/locale/$DOCNAME.pot
|
potsmaster/openstack-manuals
|
tools/generatepot-rst.sh
|
Shell
|
apache-2.0
| 3,115 |
#!/usr/bin/env bash
# **aggregates.sh**
# This script demonstrates how to use host aggregates:
# * Create an Aggregate
# * Updating Aggregate details
# * Testing Aggregate metadata
# * Testing Aggregate delete
# * Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates)
# * Testing add/remove hosts (with one host)
echo "**************************************************"
echo "Begin DevStack Exercise: $0"
echo "**************************************************"
# This script exits on an error so that errors don't compound and you see
# only the first error that occurred.
set -o errexit
# Print the commands being run so that we can see the command that triggers
# an error. It is also useful for following allowing as the install occurs.
set -o xtrace
# Settings
# ========
# Keep track of the current directory
EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
# Import common functions
source $TOP_DIR/functions
# Import configuration
source $TOP_DIR/openrc
# Import exercise configuration
source $TOP_DIR/exerciserc
# Test as the admin user
. $TOP_DIR/openrc admin admin
# Cells does not support aggregates.
is_service_enabled n-cell && exit 55
# Create an aggregate
# ===================
AGGREGATE_NAME=test_aggregate_$RANDOM
AGGREGATE2_NAME=test_aggregate_$RANDOM
AGGREGATE_A_ZONE=nova
exit_if_aggregate_present() {
aggregate_name=$1
if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then
echo "SUCCESS $aggregate_name not present"
else
die $LINENO "found aggregate: $aggregate_name"
exit -1
fi
}
exit_if_aggregate_present $AGGREGATE_NAME
AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1)
AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1)
# check aggregate created
nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created"
# Ensure creating a duplicate fails
# =================================
if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then
die $LINENO "could create duplicate aggregate"
fi
# Test aggregate-update (and aggregate-details)
# =============================================
AGGREGATE_NEW_NAME=test_aggregate_$RANDOM
nova aggregate-update $AGGREGATE_ID $AGGREGATE_NEW_NAME
nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NEW_NAME
nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE
nova aggregate-update $AGGREGATE_ID $AGGREGATE_NAME $AGGREGATE_A_ZONE
nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_NAME
nova aggregate-details $AGGREGATE_ID | grep $AGGREGATE_A_ZONE
# Test aggregate-set-metadata
# ===========================
META_DATA_1_KEY=asdf
META_DATA_2_KEY=foo
META_DATA_3_KEY=bar
#ensure no additional metadata is set
nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}"
nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123
nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
nova aggregate-details $AGGREGATE_ID | grep 123
nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_2_KEY}=456
nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY
nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_2_KEY ${META_DATA_3_KEY}=789
nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY
nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared"
nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY
nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}"
# Test aggregate-add/remove-host
# ==============================
if [ "$VIRT_DRIVER" == "xenserver" ]; then
echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate"
fi
FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1)
# Make sure can add two aggregates to same host
nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST
nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST
if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then
die $LINENO "could add duplicate host to single aggregate"
fi
nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST
nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST
# Test aggregate-delete
# =====================
nova aggregate-delete $AGGREGATE_ID
nova aggregate-delete $AGGREGATE2_ID
exit_if_aggregate_present $AGGREGATE_NAME
set +o xtrace
echo "**************************************************"
echo "End DevStack Exercise: $0"
echo "**************************************************"
|
inkerra/devstack
|
exercises/aggregates.sh
|
Shell
|
apache-2.0
| 4,857 |
#!/bin/sh
name=math_test
rm -rf "${name}_html.html ${name}_pandoc.html sphinx-rootdir" # clean
doconce format latex $name
doconce ptex2tex $name
latex $name
latex $name
dvipdf $name
cp $name.pdf ${name}_latex.pdf
doconce format html $name
cp $name.html ${name}_html.html
doconce sphinx_dir dirname=sphinx-rootdir-math $name
python automake_sphinx.py
doconce format pandoc $name
# Do not use pandoc directly because it does not support MathJax enough
doconce md2html $name.md
cp $name.html ${name}_pandoc.html
doconce format pandoc $name
doconce md2latex $name
latex $name
latex $name
dvipdf $name
cp $name.pdf ${name}_pandoc.pdf
#exit 0
for name in "${name}_html.html ${name}_pandoc.html sphinx-rootdir/_build/html/math_test.html"
do
echo $name
google-chrome $name
done
|
sjsrey/doconce
|
test/math_test.sh
|
Shell
|
bsd-3-clause
| 783 |
rm -f PrivlyChromeExtension.crx
rm -f PrivlyChromeExtension.zip
zip -vr PrivlyChromeExtension.zip * [email protected]_ignore
|
smcgregor/privly-chrome
|
package.sh
|
Shell
|
mit
| 118 |
#!/bin/sh
# Copyright (c) 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---
# Author: Craig Silverstein
#
# Just tries to run gflags_unittest with various flags defined in
# gflags.cc, and make sure they give the appropriate exit
# status and appropriate error message.
if [ -z "$1" ]
then
echo "USAGE: $0 <unittest exe> [top_srcdir] [tmpdir]"
exit 1
fi
EXE="$1"
SRCDIR="${2:-./}"
TMPDIR="${3:-/tmp/gflags}"
# Executables built with the main source file suffixed with "-main" and "_main".
EXE2="${EXE}2" # eg, gflags_unittest2
EXE3="${EXE}3" # eg, gflags_unittest3
# $1: executable
# $2: line-number $3: expected return code. $4: substring of expected output.
# $5: a substring you *don't* expect to find in the output. $6+ flags
ExpectExe() {
local executable="$1"
shift
local line_number="$1"
shift
local expected_rc="$1"
shift
local expected_output="$1"
shift
local unexpected_output="$1"
shift
# We always add --srcdir=$SRCDIR because it's needed for correctness
"$executable" --srcdir="$SRCDIR" "$@" > "$TMPDIR/test.$line_number" 2>&1
local actual_rc=$?
if [ $actual_rc != $expected_rc ]; then
echo "Test on line $line_number failed:" \
"expected rc $expected_rc, got $actual_rc"
exit 1;
fi
if [ -n "$expected_output" ] &&
! fgrep -e "$expected_output" "$TMPDIR/test.$line_number" >/dev/null; then
echo "Test on line $line_number failed:" \
"did not find expected substring '$expected_output'"
exit 1;
fi
if [ -n "$unexpected_output" ] &&
fgrep -e "$unexpected_output" "$TMPDIR/test.$line_number" >/dev/null; then
echo "Test line $line_number failed:" \
"found unexpected substring '$unexpected_output'"
exit 1;
fi
}
# $1: line-number $2: expected return code. $3: substring of expected output.
# $4: a substring you *don't* expect to find in the output. $5+ flags
Expect() {
ExpectExe "$EXE" "$@"
}
rm -rf "$TMPDIR"
mkdir "$TMPDIR" || exit 2
# Create a few flagfiles we can use later
echo "--version" > "$TMPDIR/flagfile.1"
echo "--foo=bar" > "$TMPDIR/flagfile.2"
echo "--nounused_bool" >> "$TMPDIR/flagfile.2"
echo "--flagfile=$TMPDIR/flagfile.2" > "$TMPDIR/flagfile.3"
# Set a few environment variables (useful for --tryfromenv)
export FLAGS_undefok=foo,bar
export FLAGS_weirdo=
export FLAGS_version=true
export FLAGS_help=false
# First, just make sure the unittest works as-is
Expect $LINENO 0 "PASS" ""
# --help should show all flags, including flags from gflags_reporting.cc
Expect $LINENO 1 "/gflags_reporting.cc" "" --help
# Make sure --help reflects flag changes made before flag-parsing
Expect $LINENO 1 \
"-changed_bool1 (changed) type: bool default: true" "" --help
Expect $LINENO 1 \
"-changed_bool2 (changed) type: bool default: true" "" --help
# --nohelp and --help=false should be as if we didn't say anything
Expect $LINENO 0 "PASS" "" --nohelp
Expect $LINENO 0 "PASS" "" --help=false
# --helpfull is the same as help
Expect $LINENO 1 "/gflags_reporting.cc" "" -helpfull
# --helpshort should show only flags from the unittest itself
Expect $LINENO 1 "/gflags_unittest.cc" "/gflags_reporting.cc" --helpshort
# --helpshort should show the tldflag we created in the unittest dir
Expect $LINENO 1 "tldflag1" "/google.cc" --helpshort
Expect $LINENO 1 "tldflag2" "/google.cc" --helpshort
# --helpshort should work if the main source file is suffixed with [_-]main
ExpectExe "$EXE2" $LINENO 1 "/gflags_unittest-main.cc" "/gflags_reporting.cc" \
--helpshort
ExpectExe "$EXE3" $LINENO 1 "/gflags_unittest_main.cc" "/gflags_reporting.cc" \
--helpshort
# --helpon needs an argument
Expect $LINENO 1 \
"'--helpon' is missing its argument; flag description: show help on" \
"" --helpon
# --helpon argument indicates what file we'll show args from
Expect $LINENO 1 "/gflags.cc" "/gflags_unittest.cc" --helpon=gflags
# another way of specifying the argument
Expect $LINENO 1 "/gflags.cc" "/gflags_unittest.cc" --helpon gflags
# test another argument
Expect $LINENO 1 "/gflags_unittest.cc" "/gflags.cc" \
--helpon gflags_unittest
# helpmatch is like helpon but takes substrings
Expect $LINENO 1 "/gflags_reporting.cc" "/gflags_unittest.cc" \
-helpmatch reporting
Expect $LINENO 1 "/gflags_unittest.cc" "/gflags.cc" \
-helpmatch=unittest
# if no flags are found with helpmatch or helpon, suggest --help
Expect $LINENO 1 "No modules matched" "/gflags_unittest.cc" \
-helpmatch=nosuchsubstring
Expect $LINENO 1 "No modules matched" "/gflags_unittest.cc" \
-helpon=nosuchmodule
# helppackage shows all the flags in the same dir as this unittest
# --help should show all flags, including flags from google.cc
Expect $LINENO 1 "/gflags_reporting.cc" "" --helppackage
# xml!
Expect $LINENO 1 "/gflags_unittest.cc</file>" \
"/gflags_unittest.cc:" --helpxml
# just print the version info and exit
Expect $LINENO 0 "gflags_unittest" "gflags_unittest.cc" --version
# --undefok is a fun flag...
Expect $LINENO 1 "unknown command line flag 'foo'" "" --undefok= --foo --unused_bool
Expect $LINENO 0 "PASS" "" --undefok=foo --foo --unused_bool
# If you say foo is ok to be undefined, we'll accept --nofoo as well
Expect $LINENO 0 "PASS" "" --undefok=foo --nofoo --unused_bool
# It's ok if the foo is in the middle
Expect $LINENO 0 "PASS" "" --undefok=fee,fi,foo,fum --foo --unused_bool
# But the spelling has to be just right...
Expect $LINENO 1 "unknown command line flag 'foo'" "" --undefok=fo --foo --unused_bool
Expect $LINENO 1 "unknown command line flag 'foo'" "" --undefok=foot --foo --unused_bool
# See if we can successfully load our flags from the flagfile
Expect $LINENO 0 "gflags_unittest" "gflags_unittest.cc" \
--flagfile="$TMPDIR/flagfile.1"
Expect $LINENO 0 "PASS" "" --flagfile="$TMPDIR/flagfile.2"
Expect $LINENO 0 "PASS" "" --flagfile="$TMPDIR/flagfile.3"
# Also try to load flags from the environment
Expect $LINENO 0 "gflags_unittest" "gflags_unittest.cc" --fromenv=version
Expect $LINENO 0 "gflags_unittest" "gflags_unittest.cc" --tryfromenv=version
Expect $LINENO 0 "PASS" "" --fromenv=help
Expect $LINENO 0 "PASS" "" --tryfromenv=help
Expect $LINENO 1 "helpfull not found in environment" "" --fromenv=helpfull
Expect $LINENO 0 "PASS" "" --tryfromenv=helpfull
Expect $LINENO 0 "PASS" "" --tryfromenv=undefok --foo
Expect $LINENO 1 "unknown command line flag" "" --tryfromenv=weirdo
Expect $LINENO 0 "gflags_unittest" "gflags_unittest.cc" \
--tryfromenv=test_bool,version,unused_bool
Expect $LINENO 1 "not found in environment" "" --fromenv=test_bool
Expect $LINENO 1 "unknown command line flag" "" --fromenv=test_bool,ok
# Here, the --version overrides the fromenv
Expect $LINENO 0 "gflags_unittest" "gflags_unittest.cc" \
--fromenv=test_bool,version,ok
# Make sure -- by itself stops argv processing
Expect $LINENO 0 "PASS" "" -- --help
# Make sure boolean flags gives warning when type of default value is not bool
Expect $LINENO 0 "Flag test_bool_string is of type bool, but its default value is not a boolean." ""
Expect $LINENO 0 "Flag test_bool_float is of type bool, but its default value is not a boolean." ""
Expect $LINENO 0 "Flag test_bool_int is of type bool, but its default value is not a boolean." ""
# Make sure that boolean flags don't give warning when default value is bool
Expect $LINENO 0 "" "Flag test_bool_bool is of type bool, but its default value is not a boolean."
# And we should die if the flag value doesn't pas the validator
Expect $LINENO 1 "ERROR: failed validation of new value 'true' for flag 'always_fail'" "" --always_fail
echo "PASS"
exit 0
|
fivejjs/dtm
|
lib/util/gflags-1.1/src/gflags_unittest.sh
|
Shell
|
gpl-2.0
| 9,062 |
#!/bin/sh
# ------------------------------------------------------------------------------
# Travis CI scripts
# Copyright(c) pgRouting Contributors
#
# Test pgRouting
# ------------------------------------------------------------------------------
PGDATABASE="pgr_test__db__test"
POSTGRESQL_VERSION="$1"
PGUSER="$2"
#POSTGIS_VERSION="$2"
POSTGRESQL_DIRECTORY="/usr/share/postgresql/$POSTGRESQL_VERSION"
echo "POSTGRESQL_VERSION $POSTGRESQL_VERSION"
# exit script on error
set -e
ERROR=0
# Define alias function for psql command
run_psql () {
PGOPTIONS='--client-min-messages=warning' psql -U $PGUSER -d $PGDATABASE -X -q -v ON_ERROR_STOP=1 --pset pager=off "$@"
if [ "$?" -ne 0 ]
then
echo "Test query failed: $@"
ERROR=1
fi
}
# ------------------------------------------------------------------------------
# CREATE DATABASE
# ------------------------------------------------------------------------------
#export PGUSER
#run_psql -l
#run_psql -c "CREATE DATABASE ____tmp_pgdb______;"
#export PGDATABASE
# ------------------------------------------------------------------------------
# CREATE EXTENSION
# ------------------------------------------------------------------------------
run_psql -c "CREATE EXTENSION postgis;"
run_psql -c "CREATE EXTENSION pgrouting;"
# ------------------------------------------------------------------------------
# Get version information
# ------------------------------------------------------------------------------
run_psql -c "SELECT version();"
run_psql -c "SELECT postgis_full_version();"
run_psql -c "SELECT pgr_version();"
#PGROUTING_VERSION=`run_psql -A -t -c "SELECT version FROM pgr_version();"`
# ------------------------------------------------------------------------------
# Test runner
# ------------------------------------------------------------------------------
# use -v -v for more verbose debuging output
# ./tools/test-runner.pl -v -v -pgver $POSTGRESQL_VERSION
#./tools/test-runner.pl -pgver $POSTGRESQL_VERSION $IGNORE
#./tools/test-runner.pl -pgver $POSTGRESQL_VERSION $IGNORE -v -alg ksp
#cd ./tools/testers/
#psql -f setup_db.sql
#pg_prove ../../src/trsp/test/pgtap/*
#dropdb ___pgr___test___
#cd ../../
./tools/testers/algorithm-tester.pl -pgver $POSTGRESQL_VERSION -pguser $PGUSER
if [ "$?" -ne 0 ]
then
ERROR=1
fi
# Return success or failure
# ------------------------------------------------------------------------------
exit $ERROR
|
sankepallyrohithreddy/pgrouting
|
tools/travis/pgrouting_test.sh
|
Shell
|
gpl-2.0
| 2,474 |
until test $1 = red
do
echo $1
shift
done
|
rodrigosiqueira/TA
|
tools/bash/1_basic_shell/codes/stop_and_go.sh
|
Shell
|
gpl-3.0
| 46 |
#!/bin/bash
# Check Mac OS X Server Hostname
# by Jedda Wignall
# http://jedda.me
# v1.1 - 12 Aug 2013
# Significant re-work. Now also does a forward and reverse lookup to ensure server DNS is healthy.
# v1.0 - 20 Mar 2012
# Initial release.
# Simple script that makes sure the infamous changeip -checkhostname command returns a happy status.
# It then does a forward and reverse lookup of the returned hostname and IP adress to make sure that DNS is healthy.
checkHostname=`sudo /Applications/Server.app/Contents/ServerRoot/usr/sbin/changeip -checkhostname`
regex="s.+=.([0-9].+)..Cu.+=.([a-z0-9.-]+).D"
if echo $checkHostname | grep -q "The names match."; then
[[ $checkHostname =~ $regex ]]
if [ "${BASH_REMATCH[0]}" != "" ]; then
forward=`dig ${BASH_REMATCH[2]} +short`
reverse=`dig -x ${BASH_REMATCH[1]} +short`
if [ "$forward" != "${BASH_REMATCH[1]}" ]; then
printf "CRITICAL - DNS lookup of ${BASH_REMATCH[2]} yielded $forward. We expected ${BASH_REMATCH[1]}!\n"
exit 2
elif [ "$reverse" != "${BASH_REMATCH[2]}." ]; then
printf "CRITICAL - Reverse DNS lookup of ${BASH_REMATCH[1]} yielded $reverse. We expected ${BASH_REMATCH[2]}.!\n"
exit 2
fi
else
printf "CRITICAL - Could not read hostname or IP! Run 'sudo changeip -checkhostname' on server!\n"
exit 2
fi
printf "OK - Hostname is ${BASH_REMATCH[2]}. Forward and reverse lookups matched expected values.\n"
exit 0
else
printf "CRITICAL - Hostname check returned non matching names!\n"
exit 2
fi
|
ryanmoon/OSX-Monitoring-Tools
|
check_osx_hostname.sh
|
Shell
|
unlicense
| 1,509 |
#!/bin/bash
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This script validates that iOS is set up correctly for the
# testing environment.
#
# In particular, it checks that the minimum required Xcode version is installed.
# It also checks that the correct Node version is installed. Node 10 is not fully
# supported at the time and Node 6 is no longer supported.
# Function used to compare dot separated version numbers
function version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
# Check that node is installed.
if [ -z "$(which node)" ]; then
echo "Could not find Node binary. Please check your nodejs install."
echo "See https://reactnative.dev/docs/getting-started.html for instructions."
exit 1
fi
# Check that the correct version of node is installed
NODE_VERSION="$(command node --version | sed 's/[-/a-zA-Z]//g' |sed 's/.\{2\}$//')"
if (( $(echo "${NODE_VERSION} <= 6.0" | bc -l) )); then
echo "Node ${NODE_VERSION} detected. This version of Node is not supported."
echo "See https://reactnative.dev/docs/getting-started.html for instructions."
exit 1
fi
# Check that Xcode is installed.
if [ -z "$(which xcodebuild)" ]; then
echo "Could not find Xcode build tools. Please check your Xcode install."
echo "See https://reactnative.dev/docs/getting-started.html for instructions."
exit 1
fi
MIN_XCODE_VERSION=9.4
# Check that the correct version of Xcode is installed
XCODE_VERSION="$(command xcodebuild -version | sed '$ d' | sed 's/[-/a-zA-Z]//g')"
if (version_gt $MIN_XCODE_VERSION $XCODE_VERSION) && [ "$XCODE_VERSION" != "$MIN_XCODE_VERSION" ]; then
echo "Xcode ${XCODE_VERSION} detected. React Native requires ${MIN_XCODE_VERSION} or newer."
echo "Older versions of Xcode may cause cryptic build errors."
echo "See https://reactnative.dev/docs/getting-started.html for instructions."
exit 1
fi
|
exponentjs/react-native
|
scripts/validate-ios-test-env.sh
|
Shell
|
bsd-3-clause
| 1,998 |
#put xctool.sh into your PATH
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
COCOS2DX_ROOT="$DIR"/../../../..
cd ${COCOS2DX_ROOT}
python tools/framework-compile/bin/gen_cocos_libs.py -c --android
|
dios-game/dios-cocos
|
src/oslibs/cocos/cocos-src/tools/jenkins-scripts/slave-scripts/framework/android-build.sh
|
Shell
|
mit
| 205 |
# copy the restore log to restored system $TARGET_FS_ROOT/root/ with a timestamp
if ! test -d $TARGET_FS_ROOT/root ; then
mkdir -p $TARGET_FS_ROOT/root
chmod 0700 $TARGET_FS_ROOT/root
fi
cp "${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log" $TARGET_FS_ROOT/root/restore-$(date +%Y%m%d.%H%M).log
StopIfError "Could not copy ${BACKUP_PROG_ARCHIVE}-restore.log to $TARGET_FS_ROOT/root"
gzip "$TARGET_FS_ROOT/root/restore-$(date +%Y%m%d.)*.log"
# the rear.log file will be copied later (by wrapup/default/990_copy_logfile.sh)
|
phracek/rear
|
usr/share/rear/restore/RSYNC/default/800_copy_restore_log.sh
|
Shell
|
gpl-3.0
| 524 |
#!/bin/bash
# Creates a test app and installs the plugin, then changes domain classes and does the required
# migrations. Change the hard-coded values in the variables below for your local system to use.
# Create a MySQL database 'migrationtest' and drop/create before each run.
PLUGIN_DIR="/home/burt/workspace/grails/plugins/grails-database-migration"
TESTAPP_DIR="/home/burt/workspace/testapps/migration"
HOME_DIR="/home/burt"
APP_NAME="migrationtests"
DB_NAME="migrationtest"
PLUGIN_VERSION="1.0"
#GRAILS_VERSION="1.3.3"
#GRAILS_HOME="/home/burt/dev/javalib/grails-$GRAILS_VERSION"
GRAILS_VERSION="2.0.0.BUILD-SNAPSHOT"
GRAILS_HOME="/home/burt/workspace.grails"
PATH=$GRAILS_HOME/bin:$PATH
APP_DIR="$TESTAPP_DIR/$APP_NAME"
verifyExitCode() {
if [ $1 -ne 0 ]; then
echo "ERROR: $2 failed with exit code $1"
exit $1
fi
}
mkdir -p $TESTAPP_DIR
cd $TESTAPP_DIR
rm -rf "$APP_NAME"
rm -rf "$HOME_DIR/.grails/$GRAILS_VERSION/projects/$APP_NAME"
grails create-app "$APP_NAME" --stacktrace
verifyExitCode $? "create-app"
cd "$PLUGIN_DIR/testapp"
# initial domain classes
mkdir "$TESTAPP_DIR/$APP_NAME/grails-app/domain/$APP_NAME"
cp Product.v1.groovy "$TESTAPP_DIR/$APP_NAME/grails-app/domain/$APP_NAME/Product.groovy"
cp Order.v1.groovy "$TESTAPP_DIR/$APP_NAME/grails-app/domain/$APP_NAME/Order.groovy"
cp OrderItem.v1.groovy "$TESTAPP_DIR/$APP_NAME/grails-app/domain/$APP_NAME/OrderItem.groovy"
# config
cp BuildConfig.groovy "$TESTAPP_DIR/$APP_NAME/grails-app/conf"
cp Config.groovy "$TESTAPP_DIR/$APP_NAME/grails-app/conf"
cp DataSource.groovy "$TESTAPP_DIR/$APP_NAME/grails-app/conf"
# scripts
cp PopulateData.groovy "$TESTAPP_DIR/$APP_NAME/scripts/"
cp VerifyData.groovy "$TESTAPP_DIR/$APP_NAME/scripts/"
# drop and create db
mysql -u "$DB_NAME" -p"$DB_NAME" -D "$DB_NAME" -e "drop database if exists $DB_NAME; create database $DB_NAME"
verifyExitCode $? "drop/create database"
cd $APP_DIR
grails compile --stacktrace
# install plugin
grails install-plugin hibernate $GRAILS_VERSION --force --stacktrace
#2.0 hack
cp "$PLUGIN_DIR/grails-database-migration-$PLUGIN_VERSION.zip" "$TESTAPP_DIR/$APP_NAME/lib/database-migration-$PLUGIN_VERSION.zip"
grails install-plugin "$PLUGIN_DIR/grails-database-migration-$PLUGIN_VERSION.zip" --stacktrace
verifyExitCode $? "install-plugin"
grails compile --stacktrace
# create the initial changelog and export to db
grails dbm-create-changelog --stacktrace
verifyExitCode $? "dbm-create-changelog"
grails dbm-generate-gorm-changelog initial.groovy --add --stacktrace
verifyExitCode $? "dbm-generate-gorm-changelog"
grails dbm-update --stacktrace
verifyExitCode $? "dbm-update"
# insert initial data
grails populate-data --stacktrace
verifyExitCode $? "populate-data"
# fix Order.customer by making it a domain class
cd -
cp Customer.groovy "$TESTAPP_DIR/$APP_NAME/grails-app/domain/$APP_NAME/"
cp Order.v2.groovy "$TESTAPP_DIR/$APP_NAME/grails-app/domain/$APP_NAME/Order.groovy"
cp customer.changelog.groovy "$TESTAPP_DIR/$APP_NAME/grails-app/migrations"
cd -
grails dbm-register-changelog customer.changelog.groovy --stacktrace
verifyExitCode $? "dbm-register-changelog"
grails dbm-update --stacktrace
verifyExitCode $? "dbm-update"
# fix Product.prize -> Product.price
cd -
cp Product.v2.groovy "$TESTAPP_DIR/$APP_NAME/grails-app/domain/$APP_NAME/Product.groovy"
cp price.changelog.groovy "$TESTAPP_DIR/$APP_NAME/grails-app/migrations"
cd -
grails dbm-register-changelog price.changelog.groovy --stacktrace
verifyExitCode $? "dbm-register-changelog"
grails dbm-update --stacktrace
verifyExitCode $? "dbm-update"
# verify data after migrations
grails verify-data --stacktrace
verifyExitCode $? "verify-data"
echo "SUCCESS!"
|
bond-/grails-database-migration
|
testapp/run_test_app.sh
|
Shell
|
apache-2.0
| 3,700 |
version=6.11.0
src_url=http://nodejs.org/dist/v$version/node-v$version.tar.gz
src_url_sha1=df31d0e4e2104b3a62342533af5fb879f321416b
|
JackieXie168/rethinkdb
|
mk/support/pkg/node.sh
|
Shell
|
apache-2.0
| 133 |
#! /bin/bash
if [[ $# < 3 ]] || [[ $# > 4 ]]; then
echo "Usage: ${0##*/} {secret_key} {libnss_resolver_version} {distro} [--clean_repo]"
exit 1
fi
if [[ $# == 4 ]] && [[ "$4" == "--clean_repo" ]]; then
CLEAN_REPO=true
fi
# Get azk root path
abs_dir() {
cd "${1%/*}"; link=`readlink ${1##*/}`;
if [ -z "$link" ]; then pwd; else abs_dir $link; fi
}
export AZK_ROOT_PATH=`cd \`abs_dir ${BASH_SOURCE:-$0}\`/../../../..; pwd`
cd $AZK_ROOT_PATH
if [[ ! -e ./bin/azk ]]; then
echo "$AZK_ROOT_PATH is not azk project root"
exit 2
fi
set -e
export PATH=`pwd`/bin:$PATH
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
export VERSION=$( azk version | sed -e 's/^azk //; s/^version //; s/,.*//' )
export SECRET_KEY=$1
export LIBNSS_RESOLVER_VERSION=$2
export DISTRO=$3 && export REPO=azk-${DISTRO}
RELEASE_CHANNEL=$( echo "${VERSION}" | sed s/[^\\-]*// | sed s/^\\-// | sed s/\\..*// )
if [[ -z "${RELEASE_CHANNEL}" ]]; then
PKG_SUFFIX=
else
PKG_SUFFIX="-${RELEASE_CHANNEL}"
fi
gpg --import $SECRET_KEY
# Try to remove old publishes
(
set +e
aptly publish drop ${DISTRO}
aptly snapshot drop ${REPO}-${VERSION}
[[ $CLEAN_REPO == true ]] && aptly repo drop ${REPO}
! aptly repo show -with-packages ${REPO} && aptly repo create -distribution=${DISTRO} -component=main ${REPO}
) || true
# Publish a new release
aptly repo add -force-replace=true ${REPO} package/deb/azk*.deb package/deb/${DISTRO}-libnss-resolver_${LIBNSS_RESOLVER_VERSION}_amd64.deb
aptly repo show -with-packages ${REPO} | grep "azk${PKG_SUFFIX}_${VERSION}_amd64"
aptly repo show -with-packages ${REPO} | grep "libnss-resolver_${LIBNSS_RESOLVER_VERSION}_amd64"
aptly snapshot create ${REPO}-${VERSION} from repo ${REPO}
aptly snapshot list | grep "${REPO}-${VERSION}"
aptly publish snapshot ${REPO}-${VERSION}
cp -R /azk/aptly/public package/
|
gullitmiranda/azk
|
src/libexec/package-tools/ubuntu/generate.sh
|
Shell
|
apache-2.0
| 1,845 |
#!/usr/bin/env bash
set -e
source $(dirname $0)/00-init-env.sh
#-------------------------------------------------------------------------------
# Launch frontend tests
#-------------------------------------------------------------------------------
cd "$JHI_FOLDER_APP"
npm run ci:frontend:test --if-present
|
gmarziou/generator-jhipster
|
test-integration/scripts/22-tests-frontend-npm.sh
|
Shell
|
apache-2.0
| 311 |
#!/bin/bash
set -x
rm -rf \
CMakeCache.txt \
CMakeFiles/ \
Makefile \
bin/ \
build.xml \
cmake_install.cmake \
jni/CMakeFiles/ \
jni/Makefile \
jni/cmake_install.cmake \
libs/ \
local.properties \
proguard.cfg \
project.properties \
systemlibs/
|
daviddoria/PCLMirror
|
mobile_apps/android/PointCloudStreaming/clean.sh
|
Shell
|
bsd-3-clause
| 246 |
#!/usr/bin/env bash
# untar all VMwareTools-*.tar.gz files found, and apply patches for all modules for each one
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
dir="${1:-$(pwd)}"
tools="$(find "${dir}" -type f -name 'VMwareTools-*.tar.gz' | sort -r)"
if [[ -z "${tools}" ]]; then
echo "$0: No files matching VMwareTools-*.tar.gz found in '${dir}'" >&2
exit 1
fi
for tool in ${tools}; do
"${SCRIPT_DIR}/untar-and-patch.sh" "${tool}"
done
|
rasa/vmware-tools-patches
|
untar-all-and-patch.sh
|
Shell
|
mit
| 470 |
#From: "Grigoriy Strokin" <[email protected]>
#Newsgroups: comp.unix.shell
#Subject: BASH: getopt function that parses long-named options
#Date: Mon, 22 Dec 1997 20:35:18 +0300
#Hi, I have written a BASH function named getoptex, that is like bash builtin
#"getopts", but does parse long-named options and optional arguments. It only
#uses builtin bash commands, so it is very fast. In order to use it in your
#bash scripts, include a command ". getopt.sh" (<dot> getopt.sh) to the file
#containing your script, and that will define functions getopt, getoptex, and
#optlistex (the file getopt.sh with its detailed description is listed
#below).
#*** file getopt.sh ***
#! /bin/bash
#
# getopt.sh:
# functions like getopts but do long-named options parsing
# and support optional arguments
#
# Version 1.0 1997 by Grigoriy Strokin ([email protected]), Public Domain
# Date created: December 21, 1997
# Date modified: December 21, 1997
#
# IMPORTANT FEATURES
#
# 1) Parses both short and long-named options
# 2) Supports optional arguments
# 3) Only uses bash builtins, thus no calls to external
# utilities such as expr or sed is done. Therefore,
# parsing speed is high enough
#
#
# DESCRIPTION
#
# FUNCTION getopt
# Usage: getopt OPTLIST {"$@"|ALTERNATIVE_PARAMETERS}
#
# like getopts, but parse options with both required and optional arguments,
# Options with optional arguments must have "." instead of ":" after them.
# Furthemore, a variable name to place option name cannot be specified
# and is always placed in OPTOPT variable
#
# This function is provided for compatibility with getopts()
# OPTLIST style, and it actually calls getoptex (see bellow)
#
# NOTE that a list of parameters is required and must be either "$@",
# if processing command line arguments, or some alternative parameters.
#
# FUNCTION getoptex
# Usage: getoptex OPTION_LIST {"$@"|ALTERNATIVE_PARAMETERS}
#
# like getopts, but parse long-named options.
#
# Both getopt and getoptex return 0 if an option has been parsed,
# and 1 if all options are already parsed or an error occured
#
# Both getopt and getoptex set or test the following variables:
#
# OPTERR -- tested for whether error messages must be given for invalid
options
#
# OPTOPT -- set to the name of an option parsed,
# or to "?" if no more options or error
# OPTARG -- set to the option argument, if any;
# unset if ther is no argument;
# on error, set to the erroneous option name
#
# OPTIND -- Initialized to 1.
# Then set to the number of the next parameter to be parsed
# when getopt or getoptex will be called next time.
# When all options are parsed, contains a number of
# the first non-option argument.
#
#
# OPTOFS -- If a parameter number $OPTIND containg an option parsed
# does not contain any more options, OPTOFS is unset;
# otherwise, OPTOFS is set to such a number of "?" signs
# which is equal to the number of options parsed
#
# You might not set variables OPTIND and OPTOFS yourself
# unless you want to parse a list of parameters more than once.
# Otherwise, you whould unset OPTIND (or set it to 1)
# and unset OPTOFS each time you want to parse a new parameters
list
#
# Option list format is DIFFERENT from one for getopts or getopt.
getopts-style
# option list can be converted to getoptex-style using a function optlistex
# (see bellow)
#
# DESCRIPTION of option list used with getoptex:
# Option names are separated by whitespace. Options consiting of
# more than one character are treated as long-named (--option)
#
# Special characters can appear at the and of option names specifying
# whether an argument is required (default is ";"):
# ";" (default) -- no argument
# ":" -- required argument
# "," -- optional argument
#
# For example, an option list "a b c help version f: file: separator."
# defines the following options:
# -a, -b, -c, --help, --version -- no argument
# -f, --file -- argument required
# --separator -- optional argument
#
# FUNCTION optlistex
# Usage new_style_optlist=`optlistex OLD_STYLE_OPTLIST`
#
# Converts getopts-style option list in a format suitable for use with getoptex
# Namely, it inserts spaces after each option name.
#
#
# HOW TO USE
#
# In order o use in your bash scripts the functions described,
# include a command ". getopt.sh" to the file containing the script,
# which will define functions getopt, getoptex, and optlistex
#
# EXAMPLES
#
# See files 'getopt1' and 'getopt2' that contain sample scripts that use
# getopt and getoptex functions respectively
#
#
# Please send your comments to [email protected]
function getoptex()
{
let $# || return 1
local optlist="${1#;}"
let OPTIND || OPTIND=1
[ $OPTIND -lt $# ] || return 1
shift $OPTIND
if [ "$1" != "-" -a "$1" != "${1#-}" ]
then OPTIND=$[OPTIND+1]; if [ "$1" != "--" ]
then
local o
o="-${1#-$OPTOFS}"
for opt in ${optlist#;}
do
OPTOPT="${opt%[;.:]}"
unset OPTARG
local opttype="${opt##*[^;:.]}"
[ -z "$opttype" ] && opttype=";"
if [ ${#OPTOPT} -gt 1 ]
then # long-named option
case $o in
"--$OPTOPT")
if [ "$opttype" != ":" ]; then return 0; fi
OPTARG="$2"
if [ -z "$OPTARG" ];
then # error: must have an agrument
let OPTERR && echo "$0: error: $OPTOPT must have an argument" >&2
OPTARG="$OPTOPT";
OPTOPT="?"
return 1;
fi
OPTIND=$[OPTIND+1] # skip option's argument
return 0
;;
"--$OPTOPT="*)
if [ "$opttype" = ";" ];
then # error: must not have arguments
let OPTERR && echo "$0: error: $OPTOPT must not have arguments" >&2
OPTARG="$OPTOPT"
OPTOPT="?"
return 1
fi
OPTARG=${o#"--$OPTOPT="}
return 0
;;
esac
else # short-named option
case "$o" in
"-$OPTOPT")
unset OPTOFS
[ "$opttype" != ":" ] && return 0
OPTARG="$2"
if [ -z "$OPTARG" ]
then
echo "$0: error: -$OPTOPT must have an argument" >&2
OPTARG="$OPTOPT"
OPTOPT="?"
return 1
fi
OPTIND=$[OPTIND+1] # skip option's argument
return 0
;;
"-$OPTOPT"*)
if [ $opttype = ";" ]
then # an option with no argument is in a chain of options
OPTOFS="$OPTOFS?" # move to the next option in the chain
OPTIND=$[OPTIND-1] # the chain still has other options
return 0
else
unset OPTOFS
OPTARG="${o#-$OPTOPT}"
return 0
fi
;;
esac
fi
done
echo "$0: error: invalid option: $o"
fi; fi
OPTOPT="?"
unset OPTARG
return 1
}
function optlistex
{
local l="$1"
local m # mask
local r # to store result
while [ ${#m} -lt $[${#l}-1] ]; do m="$m?"; done # create a "???..." mask
while [ -n "$l" ]
do
r="${r:+"$r "}${l%$m}" # append the first character of $l to $r
l="${l#?}" # cut the first charecter from $l
m="${m#?}" # cut one "?" sign from m
if [ -n "${l%%[^:.;]*}" ]
then # a special character (";", ".", or ":") was found
r="$r${l%$m}" # append it to $r
l="${l#?}" # cut the special character from l
m="${m#?}" # cut one more "?" sign
fi
done
echo $r
}
function getopt()
{
local optlist=`optlistex "$1"`
shift
getoptex "$optlist" "$@"
return $?
}
#**************************************
# cut here
#**************************************
#*** (end of getopt.sh) ***
#*** file getopt1 ***
#! /bin/bash
# getopt1:
# Sample script using the function getopt
#
# Type something like "getopt1 -ab -d 10 -e20 text1 text2"
# on the command line to see how it works
#
# See getopt.sh for more information
#. getopt.sh
#echo Using getopt to parse arguments:
#while getopt "abcd:e." "$@"
#do
# echo "Option <$OPTOPT> ${OPTARG:+has an arg <$OPTARG>}"
#done
#shift $[OPTIND-1]
#for arg in "$@"
#do
# echo "Non option argument <$arg>"
#done
#
#**************************************
# cut here
#**************************************
#*** (end of getopt1) ***
#
#
#*** file getopt2 ***
#
#! /bin/bash
# getopt2:
# Sample script using the function getoptex
#
# Type something like "getopt2 -ab -d 10 -e20 --opt1 --opt4=100 text1 text2"
# to see how it works
#
# See getopt.sh for more information
. getopt.sh
#echo Using getoptex to parse arguments:
#while getoptex "a; b; c; d: e. opt1 opt2 opt3 opt4: opt5." "$@"
#do
# echo "Option <$OPTOPT> ${OPTARG:+has an arg <$OPTARG>}"
#done
#shift $[OPTIND-1]
#for arg in "$@"
#do
# echo "Non option argument <$arg>"
#done
#
#**************************************
# cut here
#**************************************
#*** (end of getopt2) ***
|
nmacs/lm3s-uclinux
|
user/bash/examples/functions/getoptx.bash
|
Shell
|
gpl-2.0
| 9,081 |
#!/bin/bash
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
######################################################################
### Run one group of the tests in weekly test suite.
echo "========== Auction App 72Hr test"
py.test -v --junitxml results_auction_72Hr.xml testAuctionChaincode.py
|
cophey/fabric
|
test/regression/weekly/runGroup4.sh
|
Shell
|
apache-2.0
| 336 |
#!/bin/bash
MXNET_ROOT=$(cd "$(dirname $0)/../../../.."; pwd)
CLASS_PATH=$MXNET_ROOT/scala-package/assembly/linux-x86_64-gpu/target/*:$MXNET_ROOT/scala-package/examples/target/*:$MXNET_ROOT/scala-package/examples/target/classes/lib/*
# more details please refer to
# https://github.com/Ldpe2G/mxnet/blob/develop/example/neural-style/end_to_end/README.md
TRAIN_DATA_PATH=$1
STYLE_IMG=$2
VGG_MODEL_PATH=$3
SAVE_MODEL_DIR=$4
GPU=0
java -Xmx1024m -cp $CLASS_PATH \
ml.dmlc.mxnetexamples.neuralstyle.end2end.BoostTrain \
--data-path $TRAIN_DATA_PATH \
--vgg--model-path $VGG_MODEL_PATH \
--save--model-path $SAVE_MODEL_DIR \
--style-image $STYLE_IMG \
--gpu $GPU
|
rishita/mxnet
|
scala-package/examples/scripts/neuralstyle_end2end/run_train_end2end.sh
|
Shell
|
apache-2.0
| 670 |
#!/bin/bash
#
# NFS/CIFS file system mount/umount/etc. agent
#
#
# Copyright (C) 1997-2003 Sistina Software, Inc. All rights reserved.
# Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
. $(dirname $0)/utils/fs-lib.sh
do_metadata()
{
cat <<EOT
<?xml version="1.0" ?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1-modified.dtd">
<resource-agent name="netfs" version="rgmanager 2.0">
<version>1.0</version>
<longdesc lang="en">
This defines an NFS/CIFS mount for use by cluster services.
</longdesc>
<shortdesc lang="en">
Defines an NFS/CIFS file system mount.
</shortdesc>
<parameters>
<parameter name="name" primary="1">
<longdesc lang="en">
Symbolic name for this file system.
</longdesc>
<shortdesc lang="en">
File System Name
</shortdesc>
<content type="string"/>
</parameter>
<parameter name="mountpoint" unique="1" required="1">
<longdesc lang="en">
Path in file system heirarchy to mount this file system.
</longdesc>
<shortdesc lang="en">
Mount Point
</shortdesc>
<content type="string"/>
</parameter>
<parameter name="host" required="1">
<longdesc lang="en">
Server IP address or hostname
</longdesc>
<shortdesc lang="en">
IP or Host
</shortdesc>
<content type="string"/>
</parameter>
<parameter name="export" required="1">
<longdesc lang="en">
NFS Export directory name or CIFS share
</longdesc>
<shortdesc lang="en">
Export
</shortdesc>
<content type="string"/>
</parameter>
<parameter name="fstype" required="0">
<longdesc lang="en">
File System type (nfs, nfs4 or cifs)
</longdesc>
<shortdesc lang="en">
File System Type
</shortdesc>
<content type="string"/>
</parameter>
<parameter name="no_unmount" required="0">
<longdesc lang="en">
Do not unmount the filesystem during a stop or relocation operation
</longdesc>
<shortdesc lang="en">
Skip unmount opration
</shortdesc>
<content type="boolean"/>
</parameter>
<parameter name="force_unmount">
<longdesc lang="en">
If set, the cluster will kill all processes using
this file system when the resource group is
stopped. Otherwise, the unmount will fail, and
the resource group will be restarted.
</longdesc>
<shortdesc lang="en">
Force Unmount
</shortdesc>
<content type="boolean"/>
</parameter>
<parameter name="self_fence">
<longdesc lang="en">
If set and unmounting the file system fails, the node will
immediately reboot. Generally, this is used in conjunction
with force_unmount support, but it is not required.
</longdesc>
<shortdesc lang="en">
Seppuku Unmount
</shortdesc>
<content type="boolean"/>
</parameter>
<parameter name="options">
<longdesc lang="en">
Provides a list of mount options. If none are specified,
the NFS file system is mounted -o sync.
</longdesc>
<shortdesc lang="en">
Mount Options
</shortdesc>
<content type="string"/>
</parameter>
<parameter name="use_findmnt">
<longdesc lang="en">
Use findmnt to determine if and where a filesystem is mounted.
Disabling this uses the failback method (should be used if autofs
maps are located on network storage (ie. nfs, iscsi, etc).
</longdesc>
<shortdesc lang="en">
Utilize findmnt to detect if and where filesystems are mounted
</shortdesc>
<content type="boolean"/>
</parameter>
</parameters>
<actions>
<action name="start" timeout="900"/>
<action name="stop" timeout="30"/>
<!-- Recovery isn't possible; we don't know if resources are using
the file system. -->
<!-- Checks to see if it's mounted in the right place -->
<action name="status" interval="1m" timeout="10"/>
<action name="monitor" interval="1m" timeout="10"/>
<!-- Checks to see if we can read from the mountpoint -->
<action name="status" depth="10" timeout="30" interval="5m"/>
<action name="monitor" depth="10" timeout="30" interval="5m"/>
<!-- Checks to see if we can write to the mountpoint (if !ROFS) -->
<action name="status" depth="20" timeout="30" interval="10m"/>
<action name="monitor" depth="20" timeout="30" interval="10m"/>
<action name="meta-data" timeout="5"/>
<action name="validate-all" timeout="5"/>
</actions>
<special tag="rgmanager">
<child type="nfsexport" forbid="1"/>
<child type="nfsclient" forbid="1"/>
</special>
</resource-agent>
EOT
}
verify_host()
{
if [ -z "$OCF_RESKEY_host" ]; then
ocf_log err "No server hostname or IP address specified."
return 1
fi
host $OCF_RESKEY_host 2>&1 | grep -vq "not found"
if [ $? -eq 0 ]; then
return 0
fi
ocf_log err "Hostname or IP address \"$OCF_RESKEY_host\" not valid"
return $OCF_ERR_ARGS
}
verify_fstype()
{
# Auto detect?
[ -z "$OCF_RESKEY_fstype" ] && return 0
case $OCF_RESKEY_fstype in
nfs|nfs4|cifs)
return 0
;;
*)
ocf_log err "File system type $OCF_RESKEY_fstype not supported"
return $OCF_ERR_ARGS
;;
esac
}
verify_options()
{
declare -i ret=0
#
# From mount(1)
#
for o in `echo $OCF_RESKEY_options | sed -e s/,/\ /g`; do
case $o in
async|atime|auto|defaults|dev|exec|_netdev|noatime)
continue
;;
noauto|nodev|noexec|nosuid|nouser|ro|rw|suid|sync)
continue
;;
dirsync|user|users)
continue
;;
esac
case $OCF_RESKEY_fstype in
cifs)
continue
;;
nfs|nfs4)
case $o in
#
# NFS / NFS4 common
#
rsize=*|wsize=*|timeo=*|retrans=*|acregmin=*)
continue
;;
acregmax=*|acdirmin=*|acdirmax=*|actimeo=*)
continue
;;
retry=*|port=*|bg|fg|soft|hard|intr|cto|ac|noac)
continue
;;
esac
#
# NFS v2/v3 only
#
if [ "$OCF_RESKEY_fstype" = "nfs" ]; then
case $o in
mountport=*|mounthost=*)
continue
;;
mountprog=*|mountvers=*|nfsprog=*|nfsvers=*)
continue
;;
namelen=*)
continue
;;
tcp|udp|lock|nolock)
continue
;;
esac
fi
#
# NFS4 only
#
if [ "$OCF_RESKEY_fstype" = "nfs4" ]; then
case $o in
proto=*|clientaddr=*|sec=*)
continue
;;
esac
fi
;;
esac
ocf_log err "Option $o not supported for $OCF_RESKEY_fstype"
ret=$OCF_ERR_ARGS
done
return $ret
}
do_validate()
{
verify_name || return $OCF_ERR_ARGS
verify_fstype|| return $OCF_ERR_ARGS
verify_host || return $OCF_ERR_ARGS
verify_mountpoint || return $OCF_ERR_ARGS
verify_options || return $OCF_ERR_ARGS
# verify_target || return $OCF_ERR_ARGS
}
#
# Override real_device to use fs-lib's functions for start/stop_filesystem
#
real_device() {
export REAL_DEVICE="$1"
}
#
# do_mount - nfs / cifs are mounted differently than blockdevs
#
do_mount() {
declare opts=""
declare mount_options=""
declare ret_val
declare mp="$OCF_RESKEY_mountpoint"
#
# Get the filesystem type, if specified.
#
fstype_option=""
fstype=${OCF_RESKEY_fstype}
case "$fstype" in
""|"[ ]*")
fstype=""
;;
*) # found it
fstype_option="-t $fstype"
;;
esac
#
# Get the mount options, if they exist.
#
mount_options=""
opts=${OCF_RESKEY_options}
case "$opts" in
""|"[ ]*")
opts=""
;;
*) # found it
mount_options="-o $opts"
;;
esac
case $OCF_RESKEY_fstype in
nfs|nfs4)
mount -t $OCF_RESKEY_fstype $mount_options $OCF_RESKEY_host:"$OCF_RESKEY_export" "$mp"
;;
cifs)
mount -t $OCF_RESKEY_fstype $mount_options //$OCF_RESKEY_host/"$OCF_RESKEY_export" "$mp"
;;
esac
ret_val=$?
if [ $ret_val -ne 0 ]; then
ocf_log err "\
'mount $fstype_option $mount_options $OCF_RESKEY_host:$OCF_RESKEY_export $mp' failed, error=$ret_val"
return 1
fi
return 0
}
do_nfs_rpc_check() {
# see man nfs TRANSPORT PROTOCOL section for defaults
local nfsproto=tcp
local nfsmountproto=udp
# follow the same logic as mount.nfs option parser.
# the rightmost option wins over previous ones, so don't break when
# we find something.
for o in $(echo ${OCF_RESKEY_options} | sed -e s/,/\ /g); do
if echo $o | grep -q "^proto=" ; then
nfsproto="$(echo $o | cut -d "=" -f 2)"
fi
if echo $o | grep -q "^mountproto=" ; then
nfsmountproto="$(echo $o | cut -d "=" -f 2)"
fi
case $o in
tcp) nfsproto=tcp;;
udp) nfsproto=udp;;
rdma) nfsproto=rdma;;
esac
done
ocf_log debug "Testing generic rpc access on server ${OCF_RESKEY_host} with protocol $nfsproto"
if ! rpcinfo -T $nfsproto ${OCF_RESKEY_host} > /dev/null 2>&1; then
ocf_log alert "RPC server on ${OCF_RESKEY_host} with $nfsproto is not responding"
return 1
fi
ocf_log debug "Testing nfs rcp access on server ${OCF_RESKEY_host} with protocol $nfsproto"
if ! rpcinfo -T $nfsproto ${OCF_RESKEY_host} nfs > /dev/null 2>&1; then
ocf_log alert "NFS server on ${OCF_RESKEY_host} with $nfsproto is not responding"
return 1
fi
if [ "$OCF_RESKEY_fstype" = nfs ]; then
ocf_log debug "Testing mountd rpc access on server ${OCF_RESKEY_host} with protocol $nfsmountproto"
if ! rpcinfo -T $nfsmountproto ${OCF_RESKEY_host} mountd; then
ocf_log alert "MOUNTD server on ${OCF_RESKEY_host} with $nfsmountproto is not responding"
return 1
fi
fi
return 0
}
do_pre_unmount() {
case $OCF_RESKEY_fstype in
nfs|nfs4)
if [ "$self_fence" != $YES ]; then
ocf_log debug "Skipping pre unmount checks: self_fence is disabled"
return 0
fi
is_mounted "$dev" "$mp"
case $? in
$NO)
ocf_log debug "Skipping pre unmount checks: device is not mounted"
return 0
;;
esac
ocf_log info "pre unmount: checking if nfs server ${OCF_RESKEY_host} is alive"
if ! do_nfs_rpc_check; then
ocf_log alert "NFS server not responding - REBOOTING"
sleep 2
reboot -fn
fi
;;
esac
return 0
}
do_force_unmount() {
case $OCF_RESKEY_fstype in
nfs|nfs4)
ocf_log warning "Calling 'umount -f $mp'"
umount -f "$OCF_RESKEY_mountpoint"
return $?
;;
*)
;;
esac
return 1 # Returning 1 lets stop_filesystem do add'l checks
}
populate_defaults()
{
if [ -z "$OCF_RESKEY_fstype" ]; then
export OCF_RESKEY_fstype=nfs
fi
case $OCF_RESKEY_fstype in
nfs|nfs4)
export OCF_RESKEY_device="$OCF_RESKEY_host:$OCF_RESKEY_export"
if [ -z "$OCF_RESKEY_options" ]; then
export OCF_RESKEY_options=sync,soft,noac
fi
;;
cifs)
export OCF_RESKEY_device="//$OCF_RESKEY_host/$OCF_RESKEY_export"
if [ -z "$OCF_RESKEY_options" ]; then
export OCF_RESKEY_options=guest
fi
;;
esac
}
#
# Main...
#
populate_defaults
main $*
|
ingted/resource-agents
|
rgmanager/src/resources/netfs.sh
|
Shell
|
gpl-2.0
| 11,818 |
#!/bin/bash
# Copyright (c) 2011-2014, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# It creates XML settings from a .pfw description
# It Also instanciate a PFW with them loaded
# The generated pfw commands
scriptPFWFile="/tmp/scriptPFW"
# A tmp file, the pfw must have write priviledge on it
ExportTmpFile="/data/routageDomains.xml"
set -e -u -o pipefail
if test $# -eq 0
then
DomainFile="${PFWtest_DomainFile}"
else
DomainFile="$@"
fi
function echoColor ()
{
if test -t 1 ;
then
# stdout is a tty => colors
/bin/echo -e "\033[32m$@\033[0m"
else
# stdout is not a tty => no color
/bin/echo -e "$@"
fi
}
function androidWithError ()
{
echo " \$ $PFWtest_prefixCommand $@"
local result
result=$( $PFWtest_prefixCommand "$*"' ; echo $?' | sed -e 's#[\r]##' );
echo "$(echo "$result" | sed '$d')" ;
return "$(echo "$result" | tail -n1 )";
}
echoColor "Translate domains to pfw commands"
echoColor "Domains source file: $DomainFile"
m4 "$DomainFile" | $(dirname $0)/PFWScriptGenerator.py --output-kind pfw -o "$scriptPFWFile"
echoColor "List of generated domains :"
sed -ne 's/createDomain \(.*\)/ \1/p' "$scriptPFWFile"
echoColor "Make fs writable"
adb remount
echoColor "instanciate pseudo hal"
$PFWtest_test_scripts/instanciatePseudoHal.sh "$PFWtest_ParameterFrameworkConfigurationFile" "$PFWtest_CriterionFile"
echoColor "Create Domains"
$(dirname $0)/domainGenerator.sh "$scriptPFWFile" --keep-autoSync-disable
echoColor "Export domains and settings"
androidWithError remote-process $PFWtest_ParameterFramworkHost exportDomainsWithSettingsXML "$ExportTmpFile"
androidWithError cp "$ExportTmpFile" "$PFWtest_RemoteOutputFile"
echoColor "restart PseudoHal"
$PFWtest_test_scripts/instanciatePseudoHal.sh "$PFWtest_ParameterFrameworkConfigurationFile" "$PFWtest_CriterionFile"
echoColor "Synchronization with local file : $PFWtest_LocalOutputFile"
adb pull "$PFWtest_RemoteOutputFile" "$PFWtest_LocalOutputFile"
|
geekboxzone/mmallow_external_parameter-framework
|
tools/xmlGenerator/updateRoutageDomains.sh
|
Shell
|
bsd-3-clause
| 3,496 |
#!/bin/bash
autoreconf -fi;
rm -Rf autom4te.cache;
|
aagallag/nexmon
|
utilities/libnl/autogen.sh
|
Shell
|
gpl-3.0
| 52 |
#!/bin/bash
#
# Script to setup a GCE instance to run the webtry server.
# For full instructions see the README file.
sudo apt-get install schroot debootstrap monit squid3
sudo apt-get install g++ libfreetype6 libfreetype6-dev libpng12-0 libpng12-dev libglu1-mesa-dev mesa-common-dev freeglut3-dev libgif-dev libfontconfig libfontconfig-dev
echo "Adding the webtry user account"
sudo adduser webtry
sudo cp continue_install /home/webtry/continue_install
sudo chmod 766 /home/webtry/continue_install
sudo chown webtry:webtry /home/webtry/continue_install
sudo su webtry -c /home/webtry/continue_install
sudo mkdir -p /srv/chroot/webtry
sudo cp /home/webtry/skia/experimental/webtry/sys/webtry_schroot /etc/schroot/chroot.d/webtry
sudo mkdir /srv/chroot/webtry/etc
sudo mkdir /srv/chroot/webtry/bin
sudo cp /bin/sh /srv/chroot/webtry/bin/sh
# Copy all the dependent libraries into the schroot.
sudo cp --parents `ldd /home/webtry/skia/out/Debug/webtry | cut -d " " -f 3` /srv/chroot/webtry
sudo cp --parents `ldd /bin/sh | cut -d " " -f 3` /srv/chroot/webtry
sudo cp /home/webtry/skia/experimental/webtry/sys/webtry_init /etc/init.d/webtry
sudo cp /home/webtry/skia/experimental/webtry/sys/webtry_monit /etc/monit/conf.d/webtry
sudo cp /home/webtry/skia/experimental/webtry/sys/webtry_squid /etc/squid3/squid.conf
sudo chmod 744 /etc/init.d/webtry
# Confirm that monit is happy.
sudo monit -t
|
s20121035/rk3288_android5.1_repo
|
external/skia/experimental/webtry/setup/webtry_setup.sh
|
Shell
|
gpl-3.0
| 1,398 |
#!/bin/sh
#
# Check if current architecture are missing any function calls compared
# to i386.
# i386 define a number of legacy system calls that are i386 specific
# and listed below so they are ignored.
#
# Usage:
# syscallchk gcc gcc-options
#
ignore_list() {
cat << EOF
#include <asm/types.h>
#include <asm/unistd.h>
/* *at */
#define __IGNORE_open /* openat */
#define __IGNORE_link /* linkat */
#define __IGNORE_unlink /* unlinkat */
#define __IGNORE_mknod /* mknodat */
#define __IGNORE_chmod /* fchmodat */
#define __IGNORE_chown /* fchownat */
#define __IGNORE_mkdir /* mkdirat */
#define __IGNORE_rmdir /* unlinkat */
#define __IGNORE_lchown /* fchownat */
#define __IGNORE_access /* faccessat */
#define __IGNORE_rename /* renameat */
#define __IGNORE_readlink /* readlinkat */
#define __IGNORE_symlink /* symlinkat */
#define __IGNORE_utimes /* futimesat */
#if BITS_PER_LONG == 64
#define __IGNORE_stat /* fstatat */
#define __IGNORE_lstat /* fstatat */
#else
#define __IGNORE_stat64 /* fstatat64 */
#define __IGNORE_lstat64 /* fstatat64 */
#endif
/* CLOEXEC flag */
#define __IGNORE_pipe /* pipe2 */
#define __IGNORE_dup2 /* dup3 */
#define __IGNORE_epoll_create /* epoll_create1 */
#define __IGNORE_inotify_init /* inotify_init1 */
#define __IGNORE_eventfd /* eventfd2 */
#define __IGNORE_signalfd /* signalfd4 */
/* MMU */
#ifndef CONFIG_MMU
#define __IGNORE_madvise
#define __IGNORE_mbind
#define __IGNORE_mincore
#define __IGNORE_mlock
#define __IGNORE_mlockall
#define __IGNORE_munlock
#define __IGNORE_munlockall
#define __IGNORE_mprotect
#define __IGNORE_msync
#define __IGNORE_migrate_pages
#define __IGNORE_move_pages
#define __IGNORE_remap_file_pages
#define __IGNORE_get_mempolicy
#define __IGNORE_set_mempolicy
#define __IGNORE_swapoff
#define __IGNORE_swapon
#endif
/* System calls for 32-bit kernels only */
#if BITS_PER_LONG == 64
#define __IGNORE_sendfile64
#define __IGNORE_ftruncate64
#define __IGNORE_truncate64
#define __IGNORE_stat64
#define __IGNORE_lstat64
#define __IGNORE_fstat64
#define __IGNORE_fcntl64
#define __IGNORE_fadvise64_64
#define __IGNORE_fstatat64
#define __IGNORE_fstatfs64
#define __IGNORE_statfs64
#define __IGNORE_llseek
#define __IGNORE_mmap2
#else
#define __IGNORE_sendfile
#define __IGNORE_ftruncate
#define __IGNORE_truncate
#define __IGNORE_stat
#define __IGNORE_lstat
#define __IGNORE_fstat
#define __IGNORE_fcntl
#define __IGNORE_fadvise64
#define __IGNORE_newfstatat
#define __IGNORE_fstatfs
#define __IGNORE_statfs
#define __IGNORE_lseek
#define __IGNORE_mmap
#endif
/* i386-specific or historical system calls */
#define __IGNORE_break
#define __IGNORE_stty
#define __IGNORE_gtty
#define __IGNORE_ftime
#define __IGNORE_prof
#define __IGNORE_lock
#define __IGNORE_mpx
#define __IGNORE_ulimit
#define __IGNORE_profil
#define __IGNORE_ioperm
#define __IGNORE_iopl
#define __IGNORE_idle
#define __IGNORE_modify_ldt
#define __IGNORE_ugetrlimit
#define __IGNORE_vm86
#define __IGNORE_vm86old
#define __IGNORE_set_thread_area
#define __IGNORE_get_thread_area
#define __IGNORE_madvise1
#define __IGNORE_oldstat
#define __IGNORE_oldfstat
#define __IGNORE_oldlstat
#define __IGNORE_oldolduname
#define __IGNORE_olduname
#define __IGNORE_umount
#define __IGNORE_waitpid
#define __IGNORE_stime
#define __IGNORE_nice
#define __IGNORE_signal
#define __IGNORE_sigaction
#define __IGNORE_sgetmask
#define __IGNORE_sigsuspend
#define __IGNORE_sigpending
#define __IGNORE_ssetmask
#define __IGNORE_readdir
#define __IGNORE_socketcall
#define __IGNORE_ipc
#define __IGNORE_sigreturn
#define __IGNORE_sigprocmask
#define __IGNORE_bdflush
#define __IGNORE__llseek
#define __IGNORE__newselect
#define __IGNORE_create_module
#define __IGNORE_query_module
#define __IGNORE_get_kernel_syms
#define __IGNORE_sysfs
#define __IGNORE_uselib
#define __IGNORE__sysctl
/* ... including the "new" 32-bit uid syscalls */
#define __IGNORE_lchown32
#define __IGNORE_getuid32
#define __IGNORE_getgid32
#define __IGNORE_geteuid32
#define __IGNORE_getegid32
#define __IGNORE_setreuid32
#define __IGNORE_setregid32
#define __IGNORE_getgroups32
#define __IGNORE_setgroups32
#define __IGNORE_fchown32
#define __IGNORE_setresuid32
#define __IGNORE_getresuid32
#define __IGNORE_setresgid32
#define __IGNORE_getresgid32
#define __IGNORE_chown32
#define __IGNORE_setuid32
#define __IGNORE_setgid32
#define __IGNORE_setfsuid32
#define __IGNORE_setfsgid32
/* these can be expressed using other calls */
#define __IGNORE_alarm /* setitimer */
#define __IGNORE_creat /* open */
#define __IGNORE_fork /* clone */
#define __IGNORE_futimesat /* utimensat */
#define __IGNORE_getpgrp /* getpgid */
#define __IGNORE_getdents /* getdents64 */
#define __IGNORE_pause /* sigsuspend */
#define __IGNORE_poll /* ppoll */
#define __IGNORE_select /* pselect6 */
#define __IGNORE_epoll_wait /* epoll_pwait */
#define __IGNORE_time /* gettimeofday */
#define __IGNORE_uname /* newuname */
#define __IGNORE_ustat /* statfs */
#define __IGNORE_utime /* utimes */
#define __IGNORE_vfork /* clone */
/* sync_file_range had a stupid ABI. Allow sync_file_range2 instead */
#ifdef __NR_sync_file_range2
#define __IGNORE_sync_file_range
#endif
/* Unmerged syscalls for AFS, STREAMS, etc. */
#define __IGNORE_afs_syscall
#define __IGNORE_getpmsg
#define __IGNORE_putpmsg
#define __IGNORE_vserver
EOF
}
syscall_list() {
sed -n -e '/^\#define/ s/[^_]*__NR_\([^[:space:]]*\).*/\
\#if !defined \(__NR_\1\) \&\& !defined \(__IGNORE_\1\)\
\#warning syscall \1 not implemented\
\#endif/p' $1
}
(ignore_list && syscall_list ${srctree}/arch/x86/include/asm/unistd_32.h) | \
$* -E -x c - > /dev/null
|
richardtrip/GT-P6200-kernel
|
scripts/checksyscalls.sh
|
Shell
|
gpl-2.0
| 5,624 |
#!/usr/bin/env bash
set -eu
source ../gen-tests-makefile.sh
echo "Generate FST for sim models"
find tb/* -name tb*.v | while read name; do
test_name=$(basename -s .v $name)
echo "Test $test_name"
verilog_name=${test_name:3}.v
iverilog -o tb/$test_name.out $name $verilog_name
./tb/$test_name.out -fst
done
run_tests --yosys-scripts --bash --yosys-args "-w 'Yosys has only limited support for tri-state logic at the moment.'"
|
YosysHQ/yosys
|
tests/sim/run-test.sh
|
Shell
|
isc
| 445 |
OLD_FILENAME=a_b_c
OLD_MACRO=_A_B_C_H_
OLD_CLASS=ABC
OLD_HEADER=${OLD_FILENAME}.h
for f in `cat source.txt`
do
echo "copy..."
cp ${OLD_FILENAME}.h ${f}.h
cp ${OLD_FILENAME}.cpp ${f}.cpp
echo ${f}.h >> tmp.txt
echo ${f}.cpp >> tmp.txt
done
for f in `cat tmp.txt`
do
echo "replace..."
filename=${f%.*}
#echo $filename
macro=$(echo -n _${filename}_H_|tr '[a-z]' '[A-Z]')
#echo $macro
# 替换头文件的宏
sed -i "s/$OLD_MACRO/$macro/g" $f
# 替换源文件中include的头文件
sed -i "s/$OLD_HEADER/${filename}.h/g" $f
class=$(echo -n ${filename:0:1}|tr '[a-z]' '[A-Z]')
for ((i=1;i<${#filename};i=$i+1))
do
ch=${filename:$i:1}
if [ ${ch}x = "_"x ];then
i=$i+1
ch=$(echo -n ${filename:$i:1}|tr '[a-z]' '[A-Z]')
fi
class=${class}${ch}
# 替换头文件源文件中的类名
sed -i "s/${OLD_CLASS}/${class}/g" $f
done
#echo $class
rm tmp.txt
echo "done"
done
|
guodongxiaren/practicalscripts
|
cpp_stub.sh
|
Shell
|
mit
| 1,002 |
#!/bin/bash
# #### USAGE
# cd /go-to-the-directory-where-you-to-create-your-QR-code/
# sh qr_code_generator_1.sh
#install the library qrencode to generate the QR code
brew install qrencode
#install the library zbar to analyse the QR code
brew install zba
#generate the QR codes for the main navigation of the website
qrencode -o flaven_home.png "http://www.flaven.fr"
qrencode -o flaven_blog.png "http://www.flaven.fr/blog/"
qrencode -o flaven_resume.png "http://flaven.fr/bruno-flaven-resume-cv/"
qrencode -o flaven_clients.png "http://flaven.fr/clients-et-realisations/"
qrencode -o flaven_about_3wdoc.png "http://flaven.fr/a-propos-de-3wdoc/"
qrencode -o flaven_books.png "http://flaven.fr/livres/"
qrencode -o flaven_quotes.png "http://flaven.fr/les-citations/"
#analyse the newly generated QR codes
zbarimg flaven_home.png
zbarimg flaven_blog.png
zbarimg flaven_resume.png
zbarimg flaven_clients.png
zbarimg flaven_about_3wdoc.png
zbarimg flaven_books.png
zbarimg flaven_quotes.png
#create a QR code from a .vcf
cat forrest_gump_vcf.vcf | qrencode -o "forrest_gump_qr_code.png"
zbarimg forrest_gump_qr_code.png
echo "\033[1;33m ### forrest_gump_qr_code.png is created ### \033[0m"
#get rid of the margin -m option
cat forrest_gump_vcf.vcf | qrencode -o "forrest_gump_qr_code_nomargin.png" -m 0
echo "\033[1;33m ### forrest_gump_qr_code_nomargin.png is created ### \033[0m"
#change the dots size with -s option
cat forrest_gump_vcf.vcf | qrencode -o "forrest_gump_qr_code_dots.png" -s 4
echo "\033[1;33m ### forrest_gump_qr_code_dots.png is created ### \033[0m"
|
bflaven/BlogArticlesExamples
|
generating-qr-code-testing-universal-links-deeplinks/qr_code_generator.sh
|
Shell
|
mit
| 1,587 |
#!/usr/bin/env bash
plugin_names=(
"mileszs/ack.vim.git"
"ctrlpvim/ctrlp.vim.git"
"fatih/molokai.git"
"scrooloose/nerdcommenter.git"
"scrooloose/nerdtree.git"
"joshdick/onedark.vim"
"godlygeek/tabular.git"
"SirVer/ultisnips.git"
"tpope/vim-abolish.git"
"bling/vim-airline.git"
"vim-airline/vim-airline-themes.git"
"altercation/vim-colors-solarized.git"
"tpope/vim-fugitive.git"
"fatih/vim-go.git"
"dgryski/vim-godef"
"tfnico/vim-gradle"
"pangloss/vim-javascript.git"
"tpope/vim-markdown.git"
"Harenome/vim-mipssyntax.git"
"rakr/vim-one.git"
"jpo/vim-railscasts-theme.git"
"derekwyatt/vim-scala.git"
"lervag/vimtex.git"
"majutsushi/tagbar.git"
"FelikZ/ctrlp-py-matcher.git"
)
for name in "${plugin_names[@]}"
do
echo "Git cloning $name..."
git clone "https://github.com/$name"
done
git clone "https://github.com/dracula/vim.git dracula-theme"
git clone "[email protected]:yaojingguo/vim-snippets.git"
git clone "[email protected]:yaojingguo/ir_black.git"
|
yaojingguo/vimfiles
|
bundle.sh
|
Shell
|
mit
| 1,005 |
!#/bin/sh
make test
rm -f valgrind-*.out
for f in src/libjsapi/build/Debug/GNU-Linux-x86/tests/TestFiles/*; do
valgrind --log-file=valgrind-%p.out $f --gtest_repeat=10
done
cat valgrind-*.out
|
RipcordSoftware/libjsapi
|
grind.sh
|
Shell
|
mit
| 196 |
#!/bin/bash -x
tests=($(seq 1 3))
sizes=(562 1000 1779 3162 5623 10000);
strategies=("full" "mid-quarter-zero" "custom")
echo "-> Time store query compressed... ($1)"
for size in "${sizes[@]}"
do
let ub=($size - 1)
for test_idx in "${tests[@]}"
do
for strategy in "${strategies[@]}"
do
echo "query all store ${size}-${test_idx}-${strategy} $1"
bs_cmd="$3 -t $1 -q -l $2/sample.${size}.${test_idx}.bed -s $2/sample.${size}.${test_idx}.${strategy}.$1.bs -i 0-$ub"
{ time -p $bs_cmd > /dev/null; } 2> $2/sample.${size}.${test_idx}.${strategy}.$1.bs.query_all_time
done
done
done
|
alexpreynolds/byte-store
|
test/time_store_query_all_compressed.sh
|
Shell
|
mit
| 655 |
#!/bin/sh
CLOSURE_LIB="../closure-library"
CLOSURE_COMPILER="../closure-compiler"
python "${CLOSURE_LIB}/closure/bin/build/closurebuilder.py" \
--namespace embedly.exports \
--root . \
--root "${CLOSURE_LIB}" \
-o compiled \
-c "${CLOSURE_COMPILER}/compiler.jar" \
-f --compilation_level=ADVANCED_OPTIMIZATIONS \
--output_file=embedly.min.js
|
embedly/embedly-closure
|
build.sh
|
Shell
|
mit
| 358 |
#!/bin/sh
echo "Installing dependencies..."
apt-get install wget user-mode-linux uml-utilities bridge-utils debootstrap realpath
echo "Setting up the tap0 network device..."
echo "
auto tap0
iface tap0 inet static
address 10.10.10.1
netmask 255.255.255.0
tunctl_user uml-net
" >> /etc/network/interfaces
echo "Adding the switched device to /etc/defaults/uml-utilities..."
echo 'UML_SWITCH_OPTIONS="-tap tap0"' >> /etc/defaults/uml-utilities
echo "Stopping UML daemon..."
/etc/init.d/uml-utilities stop
echo "Bringing up tap0 adapter..."
ifup tap0
echo "Starting UML daemon..."
/etc/init.d/uml-utilities start
echo "Done."
|
AVGP/usermode-linux-containers
|
bootstrap.sh
|
Shell
|
mit
| 674 |
# By referring to Figure 5, identify the prompt, command, options, arguments, and cursor in each line of Figure 6.
[~] prompt
cd command
ruby argument
cursor
# Most modern terminal programs have the ability to create multiple tabs (Figure 7), which are useful for organizing a set of related terminal windows.5 By examining the menu items for your terminal program (Figure 8), figure out how to create a new tab. Extra credit: Learn the keyboard shortcut for creating a new tab. (Learning keyboard shortcuts for your system is an excellent habit to cultivate.)
CMD T
|
pporche87/Unix-and-Bash
|
command-line/exercises1-1.sh
|
Shell
|
mit
| 569 |
#!/bin/bash
# PHP
apt-get install -y php5-fpm
apt-get install -y php5-cli
apt-get install -y php5-mysql
apt-get install -y php5-curl
apt-get install -y php5-gd
apt-get install -y php5-mcrypt
# Install mcrypt (Needed for Ubuntu)
php5enmod mcrypt
cat /etc/php5/fpm/php.ini | sed -e "s/.*;cgi.fix_pathinfo=1.*/cgi.fix_pathinfo=0/" > /etc/php5/fpm/php.ini.1
cat /etc/php5/fpm/php.ini.1 | sed -e "s/.*post_max_size.*/post_max_size = 8M/" > /etc/php5/fpm/php.ini.2
cat /etc/php5/fpm/php.ini.2 | sed -e "s/.*upload_max_filesize.*/upload_max_filesize = 8M/" > /etc/php5/fpm/php.ini.3
cat /etc/php5/fpm/php.ini.3 | sed -e "s/.*max_file_uploads.*/max_file_uploads = 5/" > /etc/php5/fpm/php.ini.4
cat /etc/php5/fpm/php.ini.4 | sed -e "s/.*expose_php.*/expose_php = off/" > /etc/php5/fpm/php.ini.new
mv /etc/php5/fpm/php.ini.new /etc/php5/fpm/php.ini.conf
# Remove for debugging
rm /etc/php5/fpm/php.ini.1
rm /etc/php5/fpm/php.ini.2
rm /etc/php5/fpm/php.ini.3
rm /etc/php5/fpm/php.ini.4
# Reload PHP
service php5-fpm restart
|
Timekiller11/server_installer
|
modules/php.sh
|
Shell
|
mit
| 1,016 |
mkdir app/components/$1
mkdir app/components/$1/tests
touch app/components/$1/$1.js
touch app/components/$1/$1.scss
touch app/components/$1/tests/$1.test.js
echo "import React from 'react';
require('./$1.scss');
const $1 = () => {
return (
);
};
$1.defaultProps = {
};
$1.propTypes = {
};
export default $1;" >> app/components/$1/$1.js
echo "@import '../../style/colors';
.$1 {
}" >> app/components/$1/$1.scss
echo "import jasmineEnzyme from 'jasmine-enzyme';
import { shallow } from 'enzyme';
import React from 'react';
import $1 from '../$1';
describe('<$1 />', () => {
beforeEach(() => {
jasmineEnzyme();
});
it('exists', () => {
const wrapper = shallow(<$1 />);
expect(wrapper.find('div')).toBePresent();
});
});" >> app/components/$1/tests/$1.test.js
|
sunnymis/Swoleciety
|
create-component.sh
|
Shell
|
mit
| 792 |
#!/bin/bash
# This program blinks pin 9-41 (GPIO_20)
DIR=/sys/class/gpio
# If it isn't already, get gpio20 to appear in DIR
if [ ! -e ${DIR}/gpio20 ]
then
echo 20 > ${DIR}/export
fi
# Set pin as output
echo high > ${DIR}/gpio20/direction
# Run infinite loop to blink led
while :
do
# turn led on and wait for half a second
echo 1 > ${DIR}/gpio20/value
sleep .5
# turn led off and wait for half a second
echo 0 > ${DIR}/gpio20/value
sleep .5
done
|
EliHar/mr_robot
|
tools/lab/lab_5/blink.sh
|
Shell
|
mit
| 481 |
#! /bin/bash
# convert any video files in current directory to mp3
# exit on error
set -e
# extended globbing
shopt -s extglob
shopt -s nullglob
videos=*(*.flv|*.mp4|*.avi|*.mkv)
for file in $videos; do
outputname="${file%.*}.mp3"
ffmpeg -i "$file" -vn -acodec libmp3lame -ar 44100 -ab 128k -ac 2 -f mp3 "$outputname"
if [[ $? -eq 0 ]]; then
rm -f -- "$file"
else
rm -f -- "$outputname"
fi
done
|
ajaybhatia/archlinux-dotfiles
|
Scripts/any2mp3.sh
|
Shell
|
mit
| 438 |
#!/usr/bin/env bash
# Written by Wu Jianxiao and CBIG under MIT license: https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
# This function runs RF-ANTs vol2surf mapping creation and projection using 1 subject
RF_DIR=$CBIG_CODE_DIR/stable_projects/registration/Wu2017_RegistrationFusion
RF_SURF2VOL_DIR=$RF_DIR/registration_fusion/scripts_surf2vol
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
DATA_DIR=$CBIG_CODE_DIR/data/example_data/RegistrationFusion_example_data
SUB_DIR=$CBIG_CODE_DIR/data/example_data/CoRR_HNU/subj01_FS
###########################################
# Main commands
###########################################
main(){
# set up for input subject
sublist=$output_dir/temp_sublist.csv
echo "subj01_sess1_FS" > $sublist
#if [ ! -d $SUB_DIR/fsaverage ]; then ln -s $FREESURFER_HOME/subjects/fsaverage $SUB_DIR; fi
# Step 1: create fsaverage index files in each subject's surface space
cmd="$RF_SURF2VOL_DIR/CBIG_RF_step1_make_xyzIndex_fsaverage.sh -n 1 -o $output_dir/index_fsaverage -l $sublist -g $SUB_DIR"
echo $cmd
eval $cmd
# Step 2: Project index files to MNI152
cmd="$RF_SURF2VOL_DIR/CBIG_RF_step2B_RF_ANTs_fsaverage2vol_proj.sh -p Colin27_orig -s SPM_Colin27_FS4.5.0 -n 1 -i $output_dir/index_fsaverage -w $DATA_DIR/CoRR_HNU/subj01 -o $output_dir -a $ANTs_dir -l $sublist -g $SUB_DIR"
echo $cmd
eval $cmd
# Step 3: Generate average mapping
cmd="$RF_SURF2VOL_DIR/CBIG_RF_step3_compute_fsaverage2vol_avgMapping.sh -s SPM_Colin27_FS4.5.0 -i $output_dir/index_SPM_Colin27_FS4.5.0 -n 1 -o $output_dir -c 0 -l $sublist"
echo $cmd
eval $cmd
# Project a probabilistic map to fsaverage using the average mapping
input=$DATA_DIR/surface_parcel.mat
echo $input > $output_dir/temp.csv
cmd="$RF_DIR/bin/scripts_final_proj/CBIG_RF_projectfsaverage2Vol_batch.sh -l $output_dir/temp.csv -n 1 -d $output_dir/mapping -o $output_dir/projected_fsaverage2vol -s SPM_Colin27_FS4.5.0 -m $RF_DIR/bin/liberal_cortex_masks_FS5.3/SPM_Colin27_FS4.5.0_cortex_estimate.nii.gz"
echo $cmd
eval $cmd
# Remove temporary files and directories
rm $output_dir/temp.csv
}
##################################################################
# Function usage
##################################################################
# Usage
usage() { echo "
Usage: $0 -o output_dir
This script generates an RF-ANTs fsaverage-to-Colin27 mapping using a single subject and use it to project a surface parcellation.
The projected map should be compared to example_results/projected_surface_parcel.nii.gz.
REQUIRED ARGUMENTS:
-o <output_dir> absolute path to output directory
OPTIONAL ARGUMENTS:
-a <ANTs_dir> directory where ANTs is installed
[ default: $CBIG_ANTS_DIR ]
-h display help message
OUTPUTS:
$0 will create 5 folders.
1) index_fsaverage folder: 6 files will be generated, corresponding to the x/y/z index files in the subject's surface space.
The file names will be:
lh.xIndex_fsaverage_to_subj01_sess1.index
rh.xIndex_fsaverage_to_subj01_sess1.index
lh.yIndex_fsaverage_to_subj01_sess1.index
rh.yIndex_fsaverage_to_subj01_sess1.index
lh.zIndex_fsaverage_to_subj01_sess1.index
rh.zIndex_fsaverage_to_subj01_sess1.index
2) index_T1 folder: 6 files will be generated, corresponding to the x/y/z index files projected to the subject's T1 space, from left and right hemispheres of the subject's surface respectively.
The file names will be:
lh.xIndex_fsaverage_to_subj01_sess1_T1.nii.gz
rh.xIndex_fsaverage_to_subj01_sess1_T1.nii.gz
lh.yIndex_fsaverage_to_subj01_sess1_T1.nii.gz
rh.yIndex_fsaverage_to_subj01_sess1_T1.nii.gz
lh.zIndex_fsaverage_to_subj01_sess1_T1.nii.gz
rh.zIndex_fsaverage_to_subj01_sess1_T1.nii.gz
3) index_SPM_Colin27_FS4.5.0: 6 files will be generated, corresponding to the x/y/z index files from left and right hemispheres, registered to the volumetric atlas space.
The file names will be:
lh.xIndex_fsaverage_to_subj01_sess1_to_SPM_Colin27_FS4.5.0_RF_ANTs.nii.gz
rh.xIndex_fsaverage_to_subj01_sess1_to_SPM_Colin27_FS4.5.0_RF_ANTs.nii.gz
lh.yIndex_fsaverage_to_subj01_sess1_to_SPM_Colin27_FS4.5.0_RF_ANTs.nii.gz
rh.yIndex_fsaverage_to_subj01_sess1_to_SPM_Colin27_FS4.5.0_RF_ANTs.nii.gz
lh.zIndex_fsaverage_to_subj01_sess1_to_SPM_Colin27_FS4.5.0_RF_ANTs.nii.gz
rh.zIndex_fsaverage_to_subj01_sess1_to_SPM_Colin27_FS4.5.0_RF_ANTs.nii.gz
4) mapping folder: corresponding to the average mapping from the fsaverage surface to Colin27 space and the count map in Colin27 space.
The file names will be:
1Sub_fsaverage_to_SPM_Colin27_FS4.5.0_RF_ANTs_avgMapping.prop.mat
1Sub_fsaverage_to_SPM_Colin27_FS4.5.0_RF_ANTs.count.mat
5) projected_fsaverage2vol folder: 2 files will be generated, corresponding to the projected data in the volumetric atlas sapce and the projected data in segmentation form (with left hemisphere values starting from 0 and right hemisphere values starting from 1000).
The file names will be:
prob_map_central_sulc.1Sub_fsaverage_to_SPM_Colin27_FS4.5_RF_ANTs.nii.gz
seg.prob_map_central_sulc.1Sub_fsaverage_to_SPM_Colin27_FS4.5_RF_ANTs.nii.gz
EXAMPLE:
$0 -o ~/unit_test_results
" 1>&2; exit 1; }
# Display help message if no argument is supplied
if [ $# -eq 0 ]; then
usage; 1>&2; exit 1
fi
##################################################################
# Assign input variables
##################################################################
# Default parameter
ANTs_dir=$CBIG_ANTS_DIR
# Assign parameter
while getopts "o:h" opt; do
case $opt in
o) output_dir=${OPTARG} ;;
h) usage; exit ;;
*) usage; 1>&2; exit 1 ;;
esac
done
##################################################################
# Check parameter
##################################################################
if [ -z $output_dir ]; then
echo "Output directory not defined."; 1>&2; exit 1
fi
##################################################################
# Disable multi-threading
##################################################################
ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=1
export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS
##################################################################
# Set up output directory
##################################################################
if [ ! -d "$output_dir" ]; then
echo "Output directory does not exist. Making directory now..."
mkdir -p $output_dir
fi
###########################################
# Implementation
###########################################
main
|
ThomasYeoLab/CBIG
|
stable_projects/registration/Wu2017_RegistrationFusion/examples/CBIG_RF_example_surf2vol.sh
|
Shell
|
mit
| 6,511 |
alias reload!='. ~/.zshrc'
#alias prun='pipenv run'
alias be='bundle exec'
alias dc="docker-compose"
alias knife="be knife"
alias wx="curl wttr.in/Portland"
#alias ansible="prun ansible"
|
jescholl/dotfiles
|
zsh/aliases.zsh
|
Shell
|
mit
| 189 |
#!/usr/bin/env bash
spider_list=(
youdianying
flkong
fuliba
fulidang
wuxianfuli
)
for spider in ${spider_list[@]}; do
scrapy crawl ${spider}
done
|
moonlet/fuli
|
src/fuli_spiders/crawl_all.sh
|
Shell
|
mit
| 172 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for CESA-2017:0190
#
# Security announcement date: 2017-01-26 20:24:55 UTC
# Script generation date: 2017-01-28 21:15:27 UTC
#
# Operating System: CentOS 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - firefox.i686:45.7.0-1.el6.centos
# - firefox.x86_64:45.7.0-1.el6.centos
#
# Last versions recommanded by security team:
# - firefox.i686:45.7.0-1.el6.centos
# - firefox.x86_64:45.7.0-1.el6.centos
#
# CVE List:
# - CVE-2017-5373
# - CVE-2017-5375
# - CVE-2017-5376
# - CVE-2017-5378
# - CVE-2017-5380
# - CVE-2017-5383
# - CVE-2017-5386
# - CVE-2017-5390
# - CVE-2017-5396
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install firefox.i686-45.7.0 -y
sudo yum install firefox.x86_64-45.7.0 -y
|
Cyberwatch/cbw-security-fixes
|
CentOS_6/x86_64/2017/CESA-2017:0190.sh
|
Shell
|
mit
| 893 |
#!/bin/bash
# script to test the Arlo camera switch and lights
# L. Shustek, 11 Aug 2015
# L. Shustek, 17 Aug 2015; add green LED
# GPIO9 is input with pullup: "up" momentary toggle switch leg
echo 9 | sudo tee /sys/class/gpio/export > /dev/null
echo "in" | sudo tee /sys/class/gpio/gpio9/direction > /dev/null
echo "high" | sudo tee /sys/class/gpio/gpio9/direction > /dev/null
#GPIO10 is input with pullup: "down" momentary toggle switch leg
echo 10 | sudo tee /sys/class/gpio/export > /dev/null
echo "in" | sudo tee /sys/class/gpio/gpio10/direction > /dev/null
echo "high" | sudo tee /sys/class/gpio/gpio10/direction > /dev/null
#GPIO11 is output: a red LED through a 680 ohm resistor to ground
echo 11 | sudo tee /sys/class/gpio/export > /dev/null
echo "out" | sudo tee /sys/class/gpio/gpio11/direction > /dev/null
#GPIO22 is output: a green LED through a 1K ohm resistor to ground
echo 22 | sudo tee /sys/class/gpio/export > /dev/null
echo "out" | sudo tee /sys/class/gpio/gpio22/direction > /dev/null
echo push the switch up or down
while true; do
read val < /sys/class/gpio/gpio9/value
if (( val == 0 )); then
echo switch up
echo 1 > /sys/class/gpio/gpio11/value
echo 0 > /sys/class/gpio/gpio22/value
sleep 1
fi
read val < /sys/class/gpio/gpio10/value
if (( val == 0 )); then
echo switch down
echo 0 > /sys/class/gpio/gpio11/value
echo 1 > /sys/class/gpio/gpio22/value
sleep 1
fi
done
|
LenShustek/ArloCamera
|
testswitch.bash
|
Shell
|
mit
| 1,424 |
#!/bin/bash
# ulimit -n 8192
set -e
FIRST_START_DONE="/etc/docker-openldap-first-start-done"
[ ! -e /var/lib/openldap/data ] && mkdir -p /var/lib/openldap/data
# container first start
if [ ! -e "$FIRST_START_DONE" ]; then
if [[ -z "$SLAPD_PASSWORD" ]]; then
echo -n >&2 "Error: Container not configured and SLAPD_PASSWORD not set. "
echo >&2 "Did you forget to add -e SLAPD_PASSWORD=... ?"
exit 1
fi
if [[ -z "$SLAPD_DOMAIN" ]]; then
echo -n >&2 "Error: Container not configured and SLAPD_DOMAIN not set. "
echo >&2 "Did you forget to add -e SLAPD_DOMAIN=... ?"
exit 1
fi
if [ ! -d /etc/openldap/slapd.d ]; then
mkdir /etc/openldap/slapd.d
fi
chown -R ldap:ldap /etc/openldap/slapd.d
if [ -n "$SLAPD_PASSWORD" ]; then
password_hash=`slappasswd -s "${SLAPD_PASSWORD}"`
sed_safe_password_hash=${password_hash//\//\\\/}
sed -i "s|rootpw.*|rootpw ${sed_safe_password_hash}|g" /etc/openldap/slapd.conf
fi
SLAPD_ORGANIZATION="${SLAPD_ORGANIZATION:-${SLAPD_DOMAIN}}"
dc_string=""
IFS="."; declare -a dc_parts=($SLAPD_DOMAIN)
odc=""
for dc_part in "${dc_parts[@]}"; do
[ -z "$dc_string" ] && odc="$dc_part"
dc_string="$dc_string,dc=$dc_part"
done
base_string="${dc_string:1}"
echo "BASE: ${base_string}"
echo "dc: $odc"
sed -i "s|dc=example,dc=net|$base_string|g" /etc/openldap/slapd.conf
sed -i "s|dc=example,dc=net|$base_string|g" /etc/openldap/modules/base.ldif
sed -i "s|dc: example|dc: $odc|g" /etc/openldap/modules/base.ldif
sed -i "s|o: Example|o: $SLAPD_ORGANIZATION|g" /etc/openldap/modules/base.ldif
sed -i "s/^#BASE.*/BASE ${base_string}/g" /etc/openldap/ldap.conf
if [ -f /etc/conf.d/slapd ]; then
sed -i "s|#OPTS=.*|OPTS=\"-F /etc/openldap/slapd.d -h 'ldap:// ldapi://%2fvar%2frun%2fopenldap%2fslapd.sock'\"|g" /etc/conf.d/slapd
fi
chown -R ldap:ldap /var/lib/openldap/data
chmod 700 /var/lib/openldap/data
if [ -z "$(ls -A /var/lib/openldap/data)" ]; then
echo "data directory is empty, init..."
cp /etc/openldap/DB_CONFIG.example /var/lib/openldap/data/DB_CONFIG
slapd -u ldap -g ldap >/dev/null 2>&1
ldapadd -x -D "cn=admin,${base_string}" -w "${SLAPD_PASSWORD}" -f /etc/openldap/modules/base.ldif
killall slapd
fi
touch $FIRST_START_DONE
else
slapd_configs_in_env=`env | grep 'SLAPD_'`
if [ -n "${slapd_configs_in_env:+x}" ]; then
echo "Info: Container already configured, therefore ignoring SLAPD_xxx environment variables"
fi
fi
echo "exec $@"
exec "$@"
|
wealthworks/docker-openldap
|
entrypoint.sh
|
Shell
|
mit
| 2,614 |
#!/bin/bash
VERSION=$( grep "version = \"" config.xml|sed 's/[^0-9.]*//g' )
MAJOR_RELEASE=$( echo $VERSION|cut -d '.' -f 1 )
MINOR_RELEASE=$( echo $VERSION|cut -d '.' -f 2 )
PATCH_RELEASE=$( echo $VERSION|cut -d '.' -f 3 )
if [ "$1" = "major" ];then
MAJOR_RELEASE=$(($MAJOR_RELEASE + 1))
MINOR_RELEASE=0
PATCH_RELEASE=0
fi
if [ "$1" = "minor" ];then
MINOR_RELEASE=$(($MINOR_RELEASE + 1))
PATCH_RELEASE=0
fi
if [ "$1" = "patch" ];then
PATCH_RELEASE=$(($PATCH_RELEASE + 1))
fi
echo $VERSION
echo $MAJOR_RELEASE.$MINOR_RELEASE.$PATCH_RELEASE
sed -i -- "s/$VERSION/$MAJOR_RELEASE.$MINOR_RELEASE.$PATCH_RELEASE/" ./config.xml
sed -i -- "s/$VERSION/$MAJOR_RELEASE.$MINOR_RELEASE.$PATCH_RELEASE/" ./src/views/Version.js
|
isaaguilar/pumpkin-basket
|
version-er.sh
|
Shell
|
mit
| 731 |
alias gps='ps -ef | grep -i'
alias gh='history | grep -i'
alias gf='find . \
-path "*/.git/*" -prune -o \
-path "*/.svn/*" -prune -o \
-path "*/.hg/*" -prune -o \
-path "*/.silp_backup/*" -prune -o \
-path "*/.silp_test/*" -prune -o \
-print | grep -i'
|
yjpark/dotfiles
|
bash/aliases/grep.bash
|
Shell
|
mit
| 331 |
go get github.com/nsf/gocode
go get golang.org/x/tools/cmd/goimports
#go get code.google.com/p/rog-go/exp/cmd/godef
go get github.com/rogpeppe/godef
#go get golang.org/x/tools/cmd/oracle
go get golang.org/x/tools/cmd/gorename
go get github.com/kisielk/errcheck
go get github.com/jstemmer/gotags
|
rbastic/glowing-tyrion
|
misc-cfg/setup-vim-go.sh
|
Shell
|
mit
| 295 |
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for RHSA-2016:2946
#
# Security announcement date: 2016-12-14 10:13:23 UTC
# Script generation date: 2017-01-25 21:24:17 UTC
#
# Operating System: Red Hat 6
# Architecture: x86_64
#
# Vulnerable packages fix on version:
# - firefox.i686:45.6.0-1.el6_8
# - firefox-debuginfo.i686:45.6.0-1.el6_8
# - firefox.x86_64:45.6.0-1.el6_8
# - firefox-debuginfo.x86_64:45.6.0-1.el6_8
#
# Last versions recommanded by security team:
# - firefox.i686:45.7.0-1.el6_8
# - firefox-debuginfo.i686:45.7.0-1.el6_8
# - firefox.x86_64:45.7.0-1.el6_8
# - firefox-debuginfo.x86_64:45.7.0-1.el6_8
#
# CVE List:
# - CVE-2016-9893
# - CVE-2016-9895
# - CVE-2016-9897
# - CVE-2016-9898
# - CVE-2016-9899
# - CVE-2016-9900
# - CVE-2016-9901
# - CVE-2016-9902
# - CVE-2016-9904
# - CVE-2016-9905
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo yum install firefox.i686-45.7.0 -y
sudo yum install firefox-debuginfo.i686-45.7.0 -y
sudo yum install firefox.x86_64-45.7.0 -y
sudo yum install firefox-debuginfo.x86_64-45.7.0 -y
|
Cyberwatch/cbw-security-fixes
|
Red_Hat_6/x86_64/2016/RHSA-2016:2946.sh
|
Shell
|
mit
| 1,178 |
#!/bin/bash
#
# If you change these files in the Xcode UI, it will remove the symlinks and
# duplicate them. In that case you should copy them back to this repo, and
# re-run this script.
#
# http://www.openradar.me/42206958
#
set -e
set -o pipefail
set -u
if [ ! -d xcode ]; then
echo "Must be run from root of dotfiles"
exit 1
fi
colors="$HOME/Library/Developer/Xcode/UserData/FontAndColorThemes"
rm -f "$colors/panic.xccolortheme"
rm -f "$colors/gruvbox-dark.xccolortheme"
rm -f "$colors/gruvbox-light.xccolortheme"
mkdir -p "$colors"
ln -s "$DOTFILES/xcode/panic.xccolortheme" "$colors"
ln -s "$DOTFILES/xcode/gruvbox-dark.xccolortheme" "$colors"
ln -s "$DOTFILES/xcode/gruvbox-light.xccolortheme" "$colors"
|
thedavidharris/dotfiles
|
xcode/link.sh
|
Shell
|
mit
| 719 |
#!/bin/bash -
MOCHA="npm run testonly -- "
# ensure the user has started the server
echo "checking for server connection"
TEST_DIR=$(dirname $(dirname "$0"))
SEED_DIR=$(dirname $TEST_DIR)/seeds
LOG_FILE=$(dirname $TEST_DIR)/output-app/server/logs/all-logs-readable.log
$MOCHA "$TEST_DIR/index.js" > /dev/null 2> /dev/null
if [[ $? -ne 0 ]]; then
echo "FAILED: Ensure you have started the server and it is running on port 3000"
exit 1
fi
echo "seeding database"
mongoimport --drop --host 127.0.0.1:3002 --db database --collection user "$SEED_DIR/User.json"
if [[ $? -ne 0 ]]; then
echo "FAILED: Ensure that mongo is also running on 3002"
exit 1
fi
mongoimport --drop --host 127.0.0.1:3002 --db database --collection tweet "$SEED_DIR/Tweet.json"
echo "running tests"
# Test all...
$MOCHA "$TEST_DIR"
# Test only specific test files...
# $MOCHA "$TEST_DIR/test-1-roles.js"
# $MOCHA "$TEST_DIR/test-2-queries-with-user-role-admin.js"
# $MOCHA "$TEST_DIR/test-3-mutations-with-user-role-admin.js"
# $MOCHA "$TEST_DIR/test-4-mutations-with-unknown-user.js"
# $MOCHA "$TEST_DIR/test-5-mutations-with-user-role-user.js"
# $MOCHA "$TEST_DIR/test-6-mutations-with-user-role-editor.js"
echo "Please consider the log file for debugging $LOG_FILE"
echo ""
|
tobkle/create-graphql-server
|
test/output-app-end-to-end/scripts/run-end-to-end-tests.sh
|
Shell
|
mit
| 1,263 |
#!/bin/bash -ev
#
# Installation Script
# Written by: Tommy Lincoln <[email protected]>
# Github: https://github.com/pajamapants3000
# Legal: See LICENSE in parent directory
#
#
# Dependencies
#**************
# Begin Required
#cmake-3.3.1
#fltk-1.3.3
#gnutls-3.4.4.1
#libgcrypt-1.6.3
#libjpeg-turbo-1.4.1
#pixman-0.32.6
#xorg_applications
# End Required
# Begin Recommended
#imagemagick-6.9.1-0
#linux_pam-1.2.1
# End Recommended
# Begin Optional
# End Optional
# Begin Kernel
# End Kernel
#
source ${HOME}/.blfs_profile
# Installation
#**************
# Check for previous installation:
PROCEED="yes"
REINSTALL=0
grep "tigervnc-" /list-$CHRISTENED"-"$SURNAME > /dev/null && ((\!$?)) &&\
REINSTALL=1 && echo "Previous installation detected, proceed?" && read PROCEED
[ $PROCEED = "yes" ] || [ $PROCEED = "y" ] || exit 0
# Download:
wget http://anduin.linuxfromscratch.org/sources/BLFS/conglomeration/tigervnc/tigervnc-1.6.0.tar.gz
# md5sum:
#echo "b11cc4c4d5249b9b8e355ee6f47ec4fe tigervnc-1.5.0.tar.gz" | md5sum -c ;\
# ( exit ${PIPESTATUS[0]} )
#
# Required file download
wget http://ftp.x.org/pub/individual/xserver/xorg-server-1.18.0.tar.bz2
#
# Required patch download
wget http://www.linuxfromscratch.org/patches/blfs/svn/tigervnc-1.6.0-xorg118-1.patch
wget http://www.linuxfromscratch.org/patches/blfs/svn/tigervnc-1.6.0-gethomedir-1.patch
#
tar -xvf tigervnc-1.6.0.tar.gz
cd tigervnc-1.6.0
patch -Np1 -i ../tigervnc-1.6.0-xorg118-1.patch
patch -Np1 -i ../tigervnc-1.6.0-gethomedir-1.patch
mkdir -vp build
cd build
cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release -Wno-dev ..
make -j3
#
cp -vR ../unix/xserver unix/
tar -xf ../../xorg-server-1.18.0.tar.bz2 -C unix/xserver --strip-components=1
pushd unix/xserver
patch -Np1 -i ../../../unix/xserver117.patch
autoreconf -fi
./configure $XORG_CONFIG \
--disable-xwayland --disable-dri --disable-dmx \
--disable-xorg --disable-xnest --disable-xvfb \
--disable-xwin --disable-xephyr --disable-kdrive \
--disable-devel-docs --disable-config-hal --disable-config-udev \
--disable-unit-tests --disable-selective-werror \
--disable-static --enable-dri3 \
--without-dtrace --enable-dri2 --enable-glx \
--enable-glx-tls --with-pic
make -j3 TIGERVNC_SRCDIR=`pwd`/../../../
popd
#
as_root make install
pushd unix/xserver/hw/vnc
as_root make install
popd
as_root [ -e /usr/bin/Xvnc ] || ln -svf $XORG_PREFIX/bin/Xvnc /usr/bin/Xvnc
#
as_root cp -v ${BLFSDIR}/files/usr/share/applications/vncviewer.desktop /usr/share/applications/
as_root install -vm644 ../media/icons/tigervnc_24.png /usr/share/pixmaps
as_root ln -sfv tigervnc_24.png /usr/share/pixmaps/tigervnc.png
install -Dm755 ${BLFSDIR}/files/home/profile/.vnc/xstartup $HOME/.vnc/xstartup
#
cd ..
as_root rm -rf tigervnc-1.6.0
#
# Add to installed list for this computer:
echo "tigervnc-1.6.0" >> /list-$CHRISTENED"-"$SURNAME
#
###################################################
#
|
pajamapants3000/BLFS_scripts_etc
|
scripts/tigervnc-1.5.0.sh
|
Shell
|
mit
| 3,117 |
#!/bin/sh
set -e
sudo apt -y update
sudo apt -y upgrade
sudo apt -y install docker.io
sudo mv /tmp/driftapp.service /etc/systemd/system/
sudo mkdir /etc/driftapp
sudo mv /tmp/docker-compose.yml /etc/driftapp/
sudo curl -L "https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo systemctl enable driftapp
|
dgnorth/drift-base
|
aws/packer.sh
|
Shell
|
mit
| 435 |
#!/bin/sh
set -e
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
# use filter instead of exclude so missing patterns dont' throw errors
echo "rsync -av --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync -av --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u && exit ${PIPESTATUS[0]})
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY}" -a "${CODE_SIGNING_REQUIRED}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identitiy
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
echo "/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements \"$1\""
/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} --preserve-metadata=identifier,entitlements "$1"
fi
}
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
# Get architectures for current file
archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | rev)"
stripped=""
for arch in $archs; do
if ! [[ "${VALID_ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary" || exit 1
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "Pods-ECParticleBackgroundView_Example/ECParticleBackgroundView.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "Pods-ECParticleBackgroundView_Example/ECParticleBackgroundView.framework"
fi
|
skyhacker2/ECParticleBackgroundView
|
Example/Pods/Target Support Files/Pods-ECParticleBackgroundView_Example/Pods-ECParticleBackgroundView_Example-frameworks.sh
|
Shell
|
mit
| 3,616 |
#!/bin/bash
#
echo -e "\n"
echo -e "*********************************************"
echo -e "********** WARP 2D ***********"
echo -e "*********************************************"
echo -e "\n"
if [ $# -lt 3 ] ; then
echo This script will allow you to register two 2D images like this
echo -e "\n"
echo $0 source.tif source_MRI.png source_Atlas.png
echo -e "\n"
echo Please enter at least these three arguments: source.tif,source_MRI.png and source_Atlas.png
echo -e "\n"
exit
fi
# Input arguments
sec=$1
mri=$2
atl=$3
### General parameters
DISPLAY_OUTPUT=1
WEIGHT_LANDMARKS=0
### Parameters
work_width=1024
atlas_pixel_resolution=0.0390625 # mm
image_density=$(echo "scale=7;10/$atlas_pixel_resolution"|bc) # pixels per cm
### Separate filename and extension for section
sec_ext="${sec##*.}"
sec_fn="${sec%.*}"
### Separate filename and extension for mri
mri_ext="${mri##*.}"
mri_fn="${mri%.*}"
### Separate filename and extension for atlas
atl_ext="${atl##*.}"
atl_fn="${atl%.*}"
echo -e "\n*********************************************"
echo -e "Input section filename: "${sec_fn}
echo -e "Input MRI filename: "${mri_fn}
echo -e "Input Atlas filename: "${atl_fn}
echo -e "Atlas pixel resolution (mm): "${atlas_pixel_resolution}
echo -e "Pixel density (pixels per cm): "${image_density}
echo -e "*********************************************\n"
### Change resolution in tiff file
sec_meta=${sec_fn}_meta.${sec_ext}
#
sec_meta_ext="${sec_meta##*.}"
sec_meta_fn="${sec_meta%.*}"
#
#echo -e "\nMetadata of" ${sec_fn}":"
#identify -verbose ${sec} | grep -E '(Resolution|Units|Print size|Geometry)'
echo -e "\nStriping physical space metadata of" ${sec_fn}
convert ${sec} -strip ${sec_meta}
#echo -e "\nMetadata of" ${sec_meta} "after stripping the physical space metadata:"
#identify -verbose ${sec_meta} | grep -E '(Resolution|Units|Print size|Geometry)'
#
#
#
######
### Resize the section
#
sec_dwn=${sec_fn}_resize.${sec_ext}
#
sec_dwn_ext="${sec_dwn##*.}"
sec_dwn_fn="${sec_dwn%.*}"
#
echo -e "\nResizing the original section to a width of ${work_width} pixels"
convert ${sec_meta} -resize ${work_width} -interpolate bilinear -gravity NorthWest ${sec_dwn}
echo -e "\nMetadata after resizing"
identify -verbose ${sec_dwn} | grep -E '(Resolution|Units|Print size|Geometry)'
######
# remove extra cranial stuff
mri_msk=${mri_fn}_msk.${mri_ext}
echo -e "\nRemoving extra cranial objects in the MRI"
remove_outside_brain.sh ${mri} ${atl} ${mri_msk} ${DISPLAY_OUTPUT}
echo -e "\nRemoving extra cranial objects in the MRI -- done"
#######
### Resize (i.e upsample) the MRI to the same size as the experimental data
mri_res=${mri_fn}_resize.${mri_ext}
#
mri_res_ext="${mri_res##*.}"
mri_res_fn="${mri_res%.*}"
#
sec_width=$(convert ${sec_dwn} -ping -format "%w" info:)
sec_height=$(convert ${sec_dwn} -ping -format "%h" info:)
echo -e "\nResizing MRI file:" ${mri_fn} "to" ${sec_width}"x"${sec_height}
convert ${mri_msk} -resize ${sec_width}"x"${sec_height}! -interpolate bilinear -gravity NorthWest ${mri_res}
######
### Add spatial information
echo -e "\nAdding physical space information to the resized file"
convert ${sec_dwn} -units PixelsPerCentimeter ${sec_dwn}
convert ${sec_dwn} -density ${image_density}x${image_density} ${sec_dwn}
echo -e "\nMetadata of" ${sec_dwn} "after adding the physical space information:"
identify -verbose ${sec_dwn} | grep -E '(Resolution|Units|Print size|Geometry)'
######
### Tiff to Nifti, the resolution is preserved by ImageMath
echo -e "\nConverting to Nifti"
sec_dwn_nii=${sec_dwn_fn}.nii.gz
ImageMath 2 ${sec_dwn_nii} Byte ${sec_dwn}
echo -e "\nSection nifti file" ${sec_dwn_nii} "info:"
c3d ${sec_dwn_nii} -info-full | grep -E '(Bounding Box|Voxel Spacing|Image Dimensions)'
######
### PNG to Nifti, we have to get the transform from somewhere else
mri_res_nii=${mri_res_fn}.nii.gz
ImageMath 2 ${mri_res_nii} Byte ${mri_res}
# Copy header
c3d ${sec_dwn_nii} ${mri_res_nii} -copy-transform -o ${mri_res_nii}
# Change to short
c3d ${mri_res_nii} -type ushort -o ${mri_res_nii}
echo -e "\nMRI nifti file" ${mri_res_nii} "info:"
c3d ${mri_res_nii} -info-full | grep -E '(Bounding Box|Voxel Spacing|Image Dimensions)'
### Draw landmarks manually
mri_res_lm_nii=${mri_res_fn}_landmarks.nii.gz
sec_dwn_lm_nii=${sec_dwn_fn}_landmarks.nii.gz
#if [ ! -f "${mri_res_lm}" ]
#then
# echo -e "\n"
# echo -e "**********************************************************"
# echo -e "* Draw landmarks for" ${mri_res_nii} "using ITKSnap "
# echo -e "**********************************************************"
# echo -e "\n"
# exit 3
#fi
#if [ ! -f "${sec_dwn_lm}" ]
#then
# echo -e "\n"
# echo -e "**********************************************************"
# echo -e "* Draw landmarks for" ${sec_dwn_nii} "using ITKSnap "
# echo -e "**********************************************************"
# echo -e "\n"
# exit 3
#fi
### Registration
echo -e "\nRegistration"
ants_trans=${sec_fn}_transf
time runElastic.sh ${ants_trans} ${sec_dwn_nii} ${mri_res_nii} ${sec_dwn_lm_nii} ${mri_res_lm_nii} ${WEIGHT_LANDMARKS}
echo -e "\nRegistration done with success"
### This part is only for validation of the regiostration process and achieves applies the transformation to the Atlas, both low res and high res
echo -e "**********************************************************"
echo -e "************ Working with the downsampled atlas **********"
echo -e "**********************************************************"
######
### Resize (i.e upsample) the Atlas to the same size as the resized experimental data
atl_res=${atl_fn}_resize.${atl_ext}
#
atl_res_ext="${atl_res##*.}"
atl_res_fn="${atl_res%.*}"
#
echo -e "Resizing Atlas file:" ${atl_fn} "to" ${sec_width}"x"${sec_height}
convert ${atl} -resize ${sec_width}"x"${sec_height}! -interpolate nearest-neighbor -gravity NorthWest ${atl_res}
######
### PNG to Nifti, we have to get the transform from somewhere else
# split channels
convert ${atl_res} -separate ${atl_res_fn}_splitRGB%d.${atl_res_ext}
atl_res_nii_0=${atl_res_fn}_splitRGB0.nii.gz
atl_res_nii_1=${atl_res_fn}_splitRGB1.nii.gz
atl_res_nii_2=${atl_res_fn}_splitRGB2.nii.gz
ImageMath 2 ${atl_res_nii_0} Byte ${atl_res_fn}_splitRGB0.${atl_res_ext}
ImageMath 2 ${atl_res_nii_1} Byte ${atl_res_fn}_splitRGB1.${atl_res_ext}
ImageMath 2 ${atl_res_nii_2} Byte ${atl_res_fn}_splitRGB2.${atl_res_ext}
# Copy header
c3d ${sec_dwn_nii} ${atl_res_nii_0} -copy-transform -o ${atl_res_nii_0}
c3d ${sec_dwn_nii} ${atl_res_nii_1} -copy-transform -o ${atl_res_nii_1}
c3d ${sec_dwn_nii} ${atl_res_nii_2} -copy-transform -o ${atl_res_nii_2}
# Change to short
c3d ${atl_res_nii_0} -type ushort -o ${atl_res_nii_0}
c3d ${atl_res_nii_1} -type ushort -o ${atl_res_nii_1}
c3d ${atl_res_nii_2} -type ushort -o ${atl_res_nii_2}
echo -e "\nAtlas nifti file" ${atl_res_nii_0} "info:"
c3d ${atl_res_nii_0} -info-full | grep -E '(Bounding Box|Voxel Spacing|Image Dimensions)'
### Apply transformation to the downsampled Atlas
echo -e "\nApply transformation to atlas"
comp_transf=${ants_trans}Composite.h5
#mri_res_warp_fn=${mri_res_fn}_warp
atl_res_warp_fn=${atl_res_fn}_warp
atl_res_warp_fn_0=${atl_res_fn}_splitRGB0_warp
atl_res_warp_fn_1=${atl_res_fn}_splitRGB1_warp
atl_res_warp_fn_2=${atl_res_fn}_splitRGB2_warp
# apply transforms to red green and blue
echo -e "\nApply transformation" ${comp_transf} "to" ${atl_res_nii_0}
time runApplyTransform.sh ${comp_transf} ${sec_dwn_nii} ${atl_res_nii_0} ${atl_res_warp_fn_0}
echo -e "Apply transformation" ${comp_transf} "to" ${atl_res_nii_1}
time runApplyTransform.sh ${comp_transf} ${sec_dwn_nii} ${atl_res_nii_1} ${atl_res_warp_fn_1}
echo -e "Apply transformation" ${comp_transf} "to" ${atl_res_nii_2}
time runApplyTransform.sh ${comp_transf} ${sec_dwn_nii} ${atl_res_nii_2} ${atl_res_warp_fn_2}
# regroup channels
echo -e "\nRegrouping the RGB channels into a warped low res Atlas"
convert ${atl_res_warp_fn_0}.png ${atl_res_warp_fn_1}.png ${atl_res_warp_fn_2}.png -set colorspace RGB -combine -set colorspace sRGB ${atl_res_warp_fn}.png
### Blend with target
composite -blend 50 -gravity South ${atl_res_warp_fn}.png ${sec_dwn} ${sec_dwn_fn}_warp_blend.png
composite -blend 50 -gravity South ${atl_res} ${sec_dwn} ${sec_dwn_fn}_blend.png
### Display
if [ ${DISPLAY_OUTPUT} -eq 1 ]
then
display ${sec_dwn_fn}_blend.png &
display ${sec_dwn_fn}_warp_blend.png &
fi
### Clean
rm *split*.*
echo -e "**********************************************************"
echo -e "******** Working with the high resolution atlas **********"
echo -e "**********************************************************"
### Resize (i.e upsample) the atlas to the same size as the experimental data
atl_ori=${atl_fn}_original.${atl_ext}
#
atl_ori_ext="${atl_ori##*.}"
atl_ori_fn="${atl_ori%.*}"
#
ori_width=$(convert ${sec} -ping -format "%w" info:)
ori_height=$(convert ${sec} -ping -format "%h" info:)
echo -e "Resizing Atlas file:" ${atl_fn} "to" ${ori_width}"x"${ori_height}
convert ${atl} -resize ${ori_width}"x"${ori_height}! -interpolate nearest-neighbor ${atl_ori}
######
### Add spatial information
echo -e "\nAdding physical space information to the original file"
image_density_high_res=$(echo "scale=7;${image_density}*${ori_width}/${work_width}"|bc)
echo -e "\nPixel density high res (pixels per cm):" ${image_density_high_res}
convert ${sec_meta} -units PixelsPerCentimeter ${sec_meta}
convert ${sec_meta} -density ${image_density_high_res}x${image_density_high_res} ${sec_meta}
echo -e "\nConverting to Nifti the high res section"
sec_meta_nii=${sec_meta_fn}.nii.gz
ImageMath 2 ${sec_meta_nii} Byte ${sec_meta}
echo -e "\nHigh res section nifti file" ${sec_meta_nii} "info:"
c3d ${sec_meta_nii} -info-full | grep -E '(Bounding Box|Voxel Spacing|Image Dimensions)'
######
### PNG to Nifti, we have to get the transform from somewhere else
echo -e "\nSplitting the high res Atlas into RGB channels "
# split channels
convert ${atl_ori} -separate ${atl_ori_fn}_splitRGB%d
echo -e "\nConverting the high res Atlas to Nifti "
atl_ori_nii_0=${atl_ori_fn}_splitRGB0.nii.gz
atl_ori_nii_1=${atl_ori_fn}_splitRGB1.nii.gz
atl_ori_nii_2=${atl_ori_fn}_splitRGB2.nii.gz
ImageMath 2 ${atl_ori_nii_0} Byte ${atl_ori_fn}_splitRGB0.${atl_ori_ext}
ImageMath 2 ${atl_ori_nii_1} Byte ${atl_ori_fn}_splitRGB1.${atl_ori_ext}
ImageMath 2 ${atl_ori_nii_2} Byte ${atl_ori_fn}_splitRGB2.${atl_ori_ext}
# Copy header
c3d ${sec_meta_nii} ${atl_ori_nii_0} -copy-transform -o ${atl_ori_nii_0}
c3d ${sec_meta_nii} ${atl_ori_nii_1} -copy-transform -o ${atl_ori_nii_1}
c3d ${sec_meta_nii} ${atl_ori_nii_2} -copy-transform -o ${atl_ori_nii_2}
# Change to short
c3d ${atl_ori_nii_0} -type ushort -o ${atl_ori_nii_0}
c3d ${atl_ori_nii_1} -type ushort -o ${atl_ori_nii_1}
c3d ${atl_ori_nii_2} -type ushort -o ${atl_ori_nii_2}
echo -e "\nHigh res Atlas nifti file" ${atl_ori_nii_0} "info:"
c3d ${atl_ori_nii_0} -info-full | grep -E '(Bounding Box|Voxel Spacing|Image Dimensions)'
### Apply transformation to the high res Atlas
echo -e "\nApply transformation to high res"
comp_transf=${ants_trans}Composite.h5
atl_ori_warp_fn=${atl_ori_fn}_warp
atl_ori_warp_fn_0=${atl_ori_fn}_splitRGB0_warp
atl_ori_warp_fn_1=${atl_ori_fn}_splitRGB1_warp
atl_ori_warp_fn_2=${atl_ori_fn}_splitRGB2_warp
# apply transforms to red green and blue
runApplyTransform.sh ${comp_transf} ${sec_meta_nii} ${atl_ori_nii_0} ${atl_ori_warp_fn_0}
runApplyTransform.sh ${comp_transf} ${sec_meta_nii} ${atl_ori_nii_1} ${atl_ori_warp_fn_1}
runApplyTransform.sh ${comp_transf} ${sec_meta_nii} ${atl_ori_nii_2} ${atl_ori_warp_fn_2}
# regroup channels
echo -e "\nRegrouping the RGB channels into a warped high res Atlas"
convert ${atl_ori_warp_fn_0}.png ${atl_ori_warp_fn_1}.png ${atl_ori_warp_fn_2}.png -set colorspace RGB -combine -set colorspace sRGB ${atl_ori_warp_fn}.png
### Blend with target
composite -blend 50 -gravity South ${atl_ori_warp_fn}.png ${sec} ${sec}_warp_blend.png
composite -blend 50 -gravity South ${atl_ori} ${sec} ${sec}_blend.png
### Display
#if [ ${DISPLAY_OUTPUT} -eq 1 ]
#then
#display ${sec}_blend.png &
#display ${sec}_warp_blend.png &
#fi
### Resize (i.e upsample) the MRI to the same size as the experimental data
#mri_ori=${mri_fn}_original.${mri_ext}
#
#mri_ori_ext="${mri_ori##*.}"
#mri_ori_fn="${mri_ori%.*}"
#
#ori_width=$(convert ${sec} -ping -format "%w" info:)
#ori_height=$(convert ${sec} -ping -format "%h" info:)
#echo -e "Resizing MRI file:" ${mri_fn} "to" ${ori_width}"x"${ori_height}
#convert ${mri} -resize ${ori_width}"x"${ori_height}! -interpolate bilinear ${mri_ori}
######
### tiff to Nifti high res
#echo -e "\nConverting to Nifti the high res section"
#sec_meta_nii=${sec_meta_fn}.nii
#ImageMath 2 ${sec_meta_nii} Byte ${sec_meta}
#echo -e "\nHigh res section nifti file" ${sec_meta_nii} "info:"
#c3d ${sec_meta_nii} -info-full | grep -E '(Bounding Box|Voxel Spacing|Image Dimensions)'
######
### PNG to Nifti, we have to get the transform from somewhere else
#echo -e "\nConverting to Nifti the high res MRI"
#mri_ori_nii=${mri_ori_fn}.nii
#ImageMath 2 ${mri_ori_nii} Byte ${mri_ori}
# Copy header
#c3d ${sec_meta_nii} ${mri_ori_nii} -copy-transform -o ${mri_ori_nii}
# Change to short
#c3d ${mri_ori_nii} -type ushort -o ${mri_ori_nii}
#echo -e "\nHigh res MRI nifti file" ${mri_ori_nii} "info:"
#c3d ${mri_ori_nii} -info-full | grep -E '(Bounding Box|Voxel Spacing|Image Dimensions)'
### Apply transformation to the high res MRI
#echo -e "\nApply transformation to high res"
#aff_transf=${ants_trans}0GenericAffine.mat
#nl_transf=${ants_trans}1Warp.nii.gz
#mri_ori_warp_fn=${mri_ori_fn}_warp
#atl_res_warp_fn=${atl_res_fn}_warp
#runApplyTransform.sh ${comp_transf} ${sec_meta_nii} ${mri_ori_nii} ${mri_ori_warp_fn} ${DISPLAY_OUTPUT}
|
ChrCoello/warp
|
src/warp_2D_highres.sh
|
Shell
|
mit
| 13,747 |
#!/bin/sh
source "$(dirname $0)/../../scripts/common"
brew_install nmap
|
mediweb/dotfiles
|
utils/nmap/install.sh
|
Shell
|
mit
| 73 |
#!/usr/bin/expect
set procname "ssh_login"
set host [lindex $argv 0]
set usr [lindex $argv 1]
set passwd [lindex $argv 2]
set serverhost "192.168.250.23"
spawn ssh -o PubkeyAuthentication=no $usr@$host
expect {
-timeout 30
-re "yes" {send "yes\n";exp_continue}
-re "password" {send "$passwd\n"}
-re "#' { send "echo 'welcome'\n"}
}
expect "root@*#"
send "apt-get install --reinstall salt-minion -y\n"
expect "root@*#"
send "sed -i 's/^#master: salt/master: $serverhost/g' /etc/salt/minion \n"
expect "root@*#"
send "service salt-minion restart \n"
|
fishcried/linux-profile
|
scripts/install-salt-minion.sh
|
Shell
|
mit
| 560 |
#!/bin/sh
# IrssiBot start / stop script for Unix platforms
# for release, comment this
JAR_DIR=/usr/local/java/tools
# for release, uncomment this
#JAR_DIR=jars
case "$1" in
start)
echo -n "starting IrssiBot.. "
java -classpath "$JAR_DIR/xerces.jar:$JAR_DIR/mysql.jar:." irssibot.core.Core > bot.log 2>&1 &
echo "[done]"
;;
stop)
echo -n "stopping IrssiBot.. "
kill `ps axuwww | grep "irssibot\.core\.Core" | grep -v grep | awk '{print $2}'`
echo "[done]"
;;
*)
echo "usage: $0 {start|stop}"
exit 1
esac
|
foomango/antex
|
chapter3_example_irssibot/irssibot/bin/bot.sh
|
Shell
|
mit
| 551 |
#! /bin/bash -
#########################
#
# Name: scalar-mesan.bash
#
# Purpose: Convert rotatad scalar fields (MESAN) from GRIB to unrotated netCDF
#
# Usage: ./scalar-mesan.bash -i <input-grib-file>
#
# Revision history: 2016-03-22 -- Script created, Martin Evaldsson, Rossby Centre
#
# Contact persons: [email protected]
#
########################
program=$0
function usage {
echo "Usage: ./scalar-mesan.bash -i <input-grib-file>
-i input GRIB file (must be a scalar field)
Convert rotatad scalar fields (MESAN) from GRIB to unrotated netCDF.
" 1>&2
}
function log {
echo "[ $(date -u '+%Y-%m-%d %H:%M') ]: " $*
}
info()
{
log "*II* $*"
}
warning()
{
log "*WW* $*" 1>&2
}
error()
{
log "*EE* $*" 1>&2
exit 1
}
function usage_and_exit {
exit_code=${1:-0}
usage
exit $exit_code
}
# Default behaviour without more than one
if [ $# -eq 0 ]; then
usage_and_exit 0
fi
while getopts "hi:" opt; do
case $opt in
h)
usage_and_exit 0
;;
i)
input_grib_file=$OPTARG
;;
?)
echo "Invalid option: -$OPTARG" >&2
usage_and_exit 1
;;
esac
done
# Remove any previous files
rm -f ${input_grib_file%*.grb}"-curvilinear.nc"
rm -f ${input_grib_file%*.grb}".nc"
grid_description_file=/tmp/grid_description.txt
# Convert to curvilinear netCDF
cdo -f nc setgridtype,curvilinear ${input_grib_file} ${input_grib_file%*.grb}"-curvilinear.nc"
echo "gridtype : lonlat
xsize : 1286
ysize : 1222
xinc : 0.084
yinc : 0.049
xfirst : -44.2
yfirst : 22.1" > ${grid_description_file}
# Reuse existing weight file
set -- $(md5sum ${grid_description_file})
md5grid=$1
weightfile=${input_grib_file%*.grb}"-weight-"${md5grid}".nc"
if [ ! -f ${weightfile} ]
then
cdo genbil,${grid_description_file} ${input_grib_file%*.grb}"-curvilinear.nc" ${weightfile}
fi
cdo remap,${grid_description_file},${weightfile} ${input_grib_file%*.grb}"-curvilinear.nc" ${input_grib_file%*.grb}".nc"
rm -f ${grid_description_file}
|
marev711/scripts
|
process-obsdata/scalar-mesan.bash
|
Shell
|
mit
| 2,034 |
OUT=pipeline_150_ui.tar
if [[ -f "${OUT}" ]]; then
rm $OUT
fi
if [[ -f "${OUT}.gz" ]]; then
rm ${OUT}.gz
fi
GZIP='-9'
tar --exclude="*.pyc" --dereference --hard-dereference --check-links -cvf $OUT ../ui/db ../ui/lib ../ui/techs ../ui/setup.json
tar --exclude="*.pyc" --exclude='../ui/db' --exclude='../ui/lib' --exclude='../ui/techs' --exclude="setup.json" -rvf $OUT ../ui
pigz -9 $OUT
|
sauloal/pycluster
|
pack.sh
|
Shell
|
mit
| 434 |
# Generated by Powerlevel10k configuration wizard on 2020-04-17 at 23:52 PDT.
# Based on romkatv/powerlevel10k/config/p10k-lean.zsh, checksum 50246.
# Wizard options: nerdfont-complete + powerline, large icons, unicode, lean, time,
# 2 lines, dotted, left frame, light-ornaments, compact, many icons, concise,
# transient_prompt, instant_prompt=verbose.
# Type `p10k configure` to generate another config.
#
# Config for Powerlevel10k with lean prompt style. Type `p10k configure` to generate
# your own config based on it.
#
# Tip: Looking for a nice color? Here's a one-liner to print colormap.
#
# for i in {0..255}; do print -Pn "%K{$i} %k%F{$i}${(l:3::0:)i}%f " ${${(M)$((i%6)):#3}:+$'\n'}; done
# Temporarily change options.
'builtin' 'local' '-a' 'p10k_config_opts'
[[ ! -o 'aliases' ]] || p10k_config_opts+=('aliases')
[[ ! -o 'sh_glob' ]] || p10k_config_opts+=('sh_glob')
[[ ! -o 'no_brace_expand' ]] || p10k_config_opts+=('no_brace_expand')
'builtin' 'setopt' 'no_aliases' 'no_sh_glob' 'brace_expand'
() {
emulate -L zsh -o extended_glob
# Unset all configuration options. This allows you to apply configuration changes without
# restarting zsh. Edit ~/.p10k.zsh and type `source ~/.p10k.zsh`.
unset -m 'POWERLEVEL9K_*'
# Zsh >= 5.1 is required.
autoload -Uz is-at-least && is-at-least 5.1 || return
# The list of segments shown on the left. Fill it with the most important segments.
typeset -g POWERLEVEL9K_LEFT_PROMPT_ELEMENTS=(
# =========================[ Line #1 ]=========================
os_icon # os identifier
dir # current directory
vcs # git status
# =========================[ Line #2 ]=========================
newline # \n
prompt_char # prompt symbol
)
# The list of segments shown on the right. Fill it with less important segments.
# Right prompt on the last prompt line (where you are typing your commands) gets
# automatically hidden when the input line reaches it. Right prompt above the
# last prompt line gets hidden if it would overlap with left prompt.
typeset -g POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS=(
# =========================[ Line #1 ]=========================
status # exit code of the last command
command_execution_time # duration of the last command
background_jobs # presence of background jobs
direnv # direnv status (https://direnv.net/)
asdf # asdf version manager (https://github.com/asdf-vm/asdf)
virtualenv # python virtual environment (https://docs.python.org/3/library/venv.html)
anaconda # conda environment (https://conda.io/)
pyenv # python environment (https://github.com/pyenv/pyenv)
goenv # go environment (https://github.com/syndbg/goenv)
nodenv # node.js version from nodenv (https://github.com/nodenv/nodenv)
nvm # node.js version from nvm (https://github.com/nvm-sh/nvm)
nodeenv # node.js environment (https://github.com/ekalinin/nodeenv)
# node_version # node.js version
# go_version # go version (https://golang.org)
# rust_version # rustc version (https://www.rust-lang.org)
# dotnet_version # .NET version (https://dotnet.microsoft.com)
# php_version # php version (https://www.php.net/)
# laravel_version # laravel php framework version (https://laravel.com/)
# java_version # java version (https://www.java.com/)
# package # name@version from package.json (https://docs.npmjs.com/files/package.json)
rbenv # ruby version from rbenv (https://github.com/rbenv/rbenv)
rvm # ruby version from rvm (https://rvm.io)
fvm # flutter version management (https://github.com/leoafarias/fvm)
luaenv # lua version from luaenv (https://github.com/cehoffman/luaenv)
jenv # java version from jenv (https://github.com/jenv/jenv)
plenv # perl version from plenv (https://github.com/tokuhirom/plenv)
phpenv # php version from phpenv (https://github.com/phpenv/phpenv)
haskell_stack # haskell version from stack (https://haskellstack.org/)
kubecontext # current kubernetes context (https://kubernetes.io/)
terraform # terraform workspace (https://www.terraform.io)
aws # aws profile (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html)
aws_eb_env # aws elastic beanstalk environment (https://aws.amazon.com/elasticbeanstalk/)
azure # azure account name (https://docs.microsoft.com/en-us/cli/azure)
gcloud # google cloud cli account and project (https://cloud.google.com/)
google_app_cred # google application credentials (https://cloud.google.com/docs/authentication/production)
context # user@hostname
nordvpn # nordvpn connection status, linux only (https://nordvpn.com/)
ranger # ranger shell (https://github.com/ranger/ranger)
nnn # nnn shell (https://github.com/jarun/nnn)
vim_shell # vim shell indicator (:sh)
midnight_commander # midnight commander shell (https://midnight-commander.org/)
nix_shell # nix shell (https://nixos.org/nixos/nix-pills/developing-with-nix-shell.html)
# vpn_ip # virtual private network indicator
# load # CPU load
# disk_usage # disk usage
# ram # free RAM
# swap # used swap
todo # todo items (https://github.com/todotxt/todo.txt-cli)
timewarrior # timewarrior tracking status (https://timewarrior.net/)
taskwarrior # taskwarrior task count (https://taskwarrior.org/)
time # current time
# =========================[ Line #2 ]=========================
newline
# ip # ip address and bandwidth usage for a specified network interface
# public_ip # public IP address
# proxy # system-wide http/https/ftp proxy
# battery # internal battery
# wifi # wifi speed
# example # example user-defined segment (see prompt_example function below)
)
# Defines character set used by powerlevel10k. It's best to let `p10k configure` set it for you.
typeset -g POWERLEVEL9K_MODE=nerdfont-complete
# When set to `moderate`, some icons will have an extra space after them. This is meant to avoid
# icon overlap when using non-monospace fonts. When set to `none`, spaces are not added.
typeset -g POWERLEVEL9K_ICON_PADDING=moderate
# Basic style options that define the overall look of your prompt. You probably don't want to
# change them.
typeset -g POWERLEVEL9K_BACKGROUND= # transparent background
typeset -g POWERLEVEL9K_{LEFT,RIGHT}_{LEFT,RIGHT}_WHITESPACE= # no surrounding whitespace
typeset -g POWERLEVEL9K_{LEFT,RIGHT}_SUBSEGMENT_SEPARATOR=' ' # separate segments with a space
typeset -g POWERLEVEL9K_{LEFT,RIGHT}_SEGMENT_SEPARATOR= # no end-of-line symbol
# When set to true, icons appear before content on both sides of the prompt. When set
# to false, icons go after content. If empty or not set, icons go before content in the left
# prompt and after content in the right prompt.
#
# You can also override it for a specific segment:
#
# POWERLEVEL9K_STATUS_ICON_BEFORE_CONTENT=false
#
# Or for a specific segment in specific state:
#
# POWERLEVEL9K_DIR_NOT_WRITABLE_ICON_BEFORE_CONTENT=false
typeset -g POWERLEVEL9K_ICON_BEFORE_CONTENT=true
# Add an empty line before each prompt.
typeset -g POWERLEVEL9K_PROMPT_ADD_NEWLINE=false
# Connect left prompt lines with these symbols.
typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_PREFIX='%242F╭─'
typeset -g POWERLEVEL9K_MULTILINE_NEWLINE_PROMPT_PREFIX='%242F├─'
typeset -g POWERLEVEL9K_MULTILINE_LAST_PROMPT_PREFIX='%242F╰─'
# Connect right prompt lines with these symbols.
typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_SUFFIX=
typeset -g POWERLEVEL9K_MULTILINE_NEWLINE_PROMPT_SUFFIX=
typeset -g POWERLEVEL9K_MULTILINE_LAST_PROMPT_SUFFIX=
# The left end of left prompt.
typeset -g POWERLEVEL9K_LEFT_PROMPT_FIRST_SEGMENT_START_SYMBOL=' '
# The right end of right prompt.
typeset -g POWERLEVEL9K_RIGHT_PROMPT_LAST_SEGMENT_END_SYMBOL=
# Ruler, a.k.a. the horizontal line before each prompt. If you set it to true, you'll
# probably want to set POWERLEVEL9K_PROMPT_ADD_NEWLINE=false above and
# POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_CHAR=' ' below.
typeset -g POWERLEVEL9K_SHOW_RULER=false
typeset -g POWERLEVEL9K_RULER_CHAR='─' # reasonable alternative: '·'
typeset -g POWERLEVEL9K_RULER_FOREGROUND=242
# Filler between left and right prompt on the first prompt line. You can set it to '·' or '─'
# to make it easier to see the alignment between left and right prompt and to separate prompt
# from command output. It serves the same purpose as ruler (see above) without increasing
# the number of prompt lines. You'll probably want to set POWERLEVEL9K_SHOW_RULER=false
# if using this. You might also like POWERLEVEL9K_PROMPT_ADD_NEWLINE=false for more compact
# prompt.
typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_CHAR='·'
if [[ $POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_CHAR != ' ' ]]; then
# The color of the filler.
typeset -g POWERLEVEL9K_MULTILINE_FIRST_PROMPT_GAP_FOREGROUND=242
# Add a space between the end of left prompt and the filler.
typeset -g POWERLEVEL9K_LEFT_PROMPT_LAST_SEGMENT_END_SYMBOL=' '
# Add a space between the filler and the start of right prompt.
typeset -g POWERLEVEL9K_RIGHT_PROMPT_FIRST_SEGMENT_START_SYMBOL=' '
# Start filler from the edge of the screen if there are no left segments on the first line.
typeset -g POWERLEVEL9K_EMPTY_LINE_LEFT_PROMPT_FIRST_SEGMENT_END_SYMBOL='%{%}'
# End filler on the edge of the screen if there are no right segments on the first line.
typeset -g POWERLEVEL9K_EMPTY_LINE_RIGHT_PROMPT_FIRST_SEGMENT_START_SYMBOL='%{%}'
fi
#################################[ os_icon: os identifier ]##################################
# OS identifier color.
typeset -g POWERLEVEL9K_OS_ICON_FOREGROUND=
# Make the icon bold.
typeset -g POWERLEVEL9K_OS_ICON_CONTENT_EXPANSION='%B${P9K_CONTENT}'
################################[ prompt_char: prompt symbol ]################################
# Green prompt symbol if the last command succeeded.
typeset -g POWERLEVEL9K_PROMPT_CHAR_OK_{VIINS,VICMD,VIVIS,VIOWR}_FOREGROUND=76
# Red prompt symbol if the last command failed.
typeset -g POWERLEVEL9K_PROMPT_CHAR_ERROR_{VIINS,VICMD,VIVIS,VIOWR}_FOREGROUND=196
# Default prompt symbol.
typeset -g POWERLEVEL9K_PROMPT_CHAR_{OK,ERROR}_VIINS_CONTENT_EXPANSION='❯'
# Prompt symbol in command vi mode.
typeset -g POWERLEVEL9K_PROMPT_CHAR_{OK,ERROR}_VICMD_CONTENT_EXPANSION='❮'
# Prompt symbol in visual vi mode.
typeset -g POWERLEVEL9K_PROMPT_CHAR_{OK,ERROR}_VIVIS_CONTENT_EXPANSION='Ⅴ'
# Prompt symbol in overwrite vi mode.
typeset -g POWERLEVEL9K_PROMPT_CHAR_{OK,ERROR}_VIOWR_CONTENT_EXPANSION='▶'
typeset -g POWERLEVEL9K_PROMPT_CHAR_OVERWRITE_STATE=true
# No line terminator if prompt_char is the last segment.
typeset -g POWERLEVEL9K_PROMPT_CHAR_LEFT_PROMPT_LAST_SEGMENT_END_SYMBOL=''
# No line introducer if prompt_char is the first segment.
typeset -g POWERLEVEL9K_PROMPT_CHAR_LEFT_PROMPT_FIRST_SEGMENT_START_SYMBOL=
##################################[ dir: current directory ]##################################
# Default current directory color.
typeset -g POWERLEVEL9K_DIR_FOREGROUND=31
# If directory is too long, shorten some of its segments to the shortest possible unique
# prefix. The shortened directory can be tab-completed to the original.
typeset -g POWERLEVEL9K_SHORTEN_STRATEGY=truncate_to_unique
# Replace removed segment suffixes with this symbol.
typeset -g POWERLEVEL9K_SHORTEN_DELIMITER=
# Color of the shortened directory segments.
typeset -g POWERLEVEL9K_DIR_SHORTENED_FOREGROUND=103
# Color of the anchor directory segments. Anchor segments are never shortened. The first
# segment is always an anchor.
typeset -g POWERLEVEL9K_DIR_ANCHOR_FOREGROUND=39
# Display anchor directory segments in bold.
typeset -g POWERLEVEL9K_DIR_ANCHOR_BOLD=true
# Don't shorten directories that contain any of these files. They are anchors.
local anchor_files=(
.bzr
.citc
.git
.hg
.node-version
.python-version
.go-version
.ruby-version
.lua-version
.java-version
.perl-version
.php-version
.tool-version
.shorten_folder_marker
.svn
.terraform
CVS
Cargo.toml
composer.json
go.mod
package.json
stack.yaml
)
typeset -g POWERLEVEL9K_SHORTEN_FOLDER_MARKER="(${(j:|:)anchor_files})"
# If set to true, remove everything before the last (deepest) subdirectory that contains files
# matching $POWERLEVEL9K_SHORTEN_FOLDER_MARKER. For example, when the current directory is
# /foo/bar/git_repo/baz, prompt will display git_repo/baz. This assumes that /foo/bar/git_repo
# contains a marker (.git) and other directories don't.
typeset -g POWERLEVEL9K_DIR_TRUNCATE_BEFORE_MARKER=false
# Don't shorten this many last directory segments. They are anchors.
typeset -g POWERLEVEL9K_SHORTEN_DIR_LENGTH=1
# Shorten directory if it's longer than this even if there is space for it. The value can
# be either absolute (e.g., '80') or a percentage of terminal width (e.g, '50%'). If empty,
# directory will be shortened only when prompt doesn't fit or when other parameters demand it
# (see POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS and POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS_PCT below).
# If set to `0`, directory will always be shortened to its minimum length.
typeset -g POWERLEVEL9K_DIR_MAX_LENGTH=80
# When `dir` segment is on the last prompt line, try to shorten it enough to leave at least this
# many columns for typing commands.
typeset -g POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS=40
# When `dir` segment is on the last prompt line, try to shorten it enough to leave at least
# COLUMNS * POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS_PCT * 0.01 columns for typing commands.
typeset -g POWERLEVEL9K_DIR_MIN_COMMAND_COLUMNS_PCT=50
# If set to true, embed a hyperlink into the directory. Useful for quickly
# opening a directory in the file manager simply by clicking the link.
# Can also be handy when the directory is shortened, as it allows you to see
# the full directory that was used in previous commands.
typeset -g POWERLEVEL9K_DIR_HYPERLINK=false
# Enable special styling for non-writable directories.
typeset -g POWERLEVEL9K_DIR_SHOW_WRITABLE=true
# Show this icon when the current directory is not writable. POWERLEVEL9K_DIR_SHOW_WRITABLE
# above must be set to true for this parameter to have effect.
# typeset -g POWERLEVEL9K_DIR_NOT_WRITABLE_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Custom prefix.
# typeset -g POWERLEVEL9K_DIR_PREFIX='%fin '
# POWERLEVEL9K_DIR_CLASSES allows you to specify custom icons for different directories.
# It must be an array with 3 * N elements. Each triplet consists of:
#
# 1. A pattern against which the current directory is matched. Matching is done with
# extended_glob option enabled.
# 2. Directory class for the purpose of styling.
# 3. Icon.
#
# Triplets are tried in order. The first triplet whose pattern matches $PWD wins. If there
# are no matches, the directory will have no icon.
#
# Example:
#
# typeset -g POWERLEVEL9K_DIR_CLASSES=(
# '~/work(|/*)' WORK '(╯°□°)╯︵ ┻━┻'
# '~(|/*)' HOME '⌂'
# '*' DEFAULT '')
#
# With these settings, the current directory in the prompt may look like this:
#
# (╯°□°)╯︵ ┻━┻ ~/work/projects/important/urgent
#
# Or like this:
#
# ⌂ ~/best/powerlevel10k
#
# You can also set different colors for directories of different classes. Remember to override
# FOREGROUND, SHORTENED_FOREGROUND and ANCHOR_FOREGROUND for every directory class that you wish
# to have its own color.
#
# typeset -g POWERLEVEL9K_DIR_WORK_FOREGROUND=31
# typeset -g POWERLEVEL9K_DIR_WORK_SHORTENED_FOREGROUND=103
# typeset -g POWERLEVEL9K_DIR_WORK_ANCHOR_FOREGROUND=39
#
# typeset -g POWERLEVEL9K_DIR_CLASSES=()
#####################################[ vcs: git status ]######################################
# Branch icon. Set this parameter to '\uF126 ' for the popular Powerline branch icon.
typeset -g POWERLEVEL9K_VCS_BRANCH_ICON='\uF126 '
# Untracked files icon. It's really a question mark, your font isn't broken.
# Change the value of this parameter to show a different icon.
typeset -g POWERLEVEL9K_VCS_UNTRACKED_ICON='?'
# Formatter for Git status.
#
# Example output: master ⇣42⇡42 *42 merge ~42 +42 !42 ?42.
#
# You can edit the function to customize how Git status looks.
#
# VCS_STATUS_* parameters are set by gitstatus plugin. See reference:
# https://github.com/romkatv/gitstatus/blob/master/gitstatus.plugin.zsh.
function my_git_formatter() {
emulate -L zsh
if [[ -n $P9K_CONTENT ]]; then
# If P9K_CONTENT is not empty, use it. It's either "loading" or from vcs_info (not from
# gitstatus plugin). VCS_STATUS_* parameters are not available in this case.
typeset -g my_git_format=$P9K_CONTENT
return
fi
if (( $1 )); then
# Styling for up-to-date Git status.
local meta='%f' # default foreground
local clean='%76F' # green foreground
local modified='%178F' # yellow foreground
local untracked='%39F' # blue foreground
local conflicted='%196F' # red foreground
else
# Styling for incomplete and stale Git status.
local meta='%244F' # grey foreground
local clean='%244F' # grey foreground
local modified='%244F' # grey foreground
local untracked='%244F' # grey foreground
local conflicted='%244F' # grey foreground
fi
local res
local where # branch or tag
if [[ -n $VCS_STATUS_LOCAL_BRANCH ]]; then
res+="${clean}${(g::)POWERLEVEL9K_VCS_BRANCH_ICON}"
where=${(V)VCS_STATUS_LOCAL_BRANCH}
elif [[ -n $VCS_STATUS_TAG ]]; then
res+="${meta}#"
where=${(V)VCS_STATUS_TAG}
fi
# If local branch name or tag is at most 32 characters long, show it in full.
# Otherwise show the first 12 … the last 12.
(( $#where > 32 )) && where[13,-13]="…"
res+="${clean}${where//\%/%%}" # escape %
# Display the current Git commit if there is no branch or tag.
# Tip: To always display the current Git commit, remove `[[ -z $where ]] &&` from the next line.
[[ -z $where ]] && res+="${meta}@${clean}${VCS_STATUS_COMMIT[1,8]}"
# Show tracking branch name if it differs from local branch.
if [[ -n ${VCS_STATUS_REMOTE_BRANCH:#$VCS_STATUS_LOCAL_BRANCH} ]]; then
res+="${meta}:${clean}${(V)VCS_STATUS_REMOTE_BRANCH//\%/%%}" # escape %
fi
# ⇣42 if behind the remote.
(( VCS_STATUS_COMMITS_BEHIND )) && res+=" ${clean}⇣${VCS_STATUS_COMMITS_BEHIND}"
# ⇡42 if ahead of the remote; no leading space if also behind the remote: ⇣42⇡42.
(( VCS_STATUS_COMMITS_AHEAD && !VCS_STATUS_COMMITS_BEHIND )) && res+=" "
(( VCS_STATUS_COMMITS_AHEAD )) && res+="${clean}⇡${VCS_STATUS_COMMITS_AHEAD}"
# ⇠42 if behind the push remote.
(( VCS_STATUS_PUSH_COMMITS_BEHIND )) && res+=" ${clean}⇠${VCS_STATUS_PUSH_COMMITS_BEHIND}"
(( VCS_STATUS_PUSH_COMMITS_AHEAD && !VCS_STATUS_PUSH_COMMITS_BEHIND )) && res+=" "
# ⇢42 if ahead of the push remote; no leading space if also behind: ⇠42⇢42.
(( VCS_STATUS_PUSH_COMMITS_AHEAD )) && res+="${clean}⇢${VCS_STATUS_PUSH_COMMITS_AHEAD}"
# *42 if have stashes.
(( VCS_STATUS_STASHES )) && res+=" ${clean}*${VCS_STATUS_STASHES}"
# 'merge' if the repo is in an unusual state.
[[ -n $VCS_STATUS_ACTION ]] && res+=" ${conflicted}${VCS_STATUS_ACTION}"
# ~42 if have merge conflicts.
(( VCS_STATUS_NUM_CONFLICTED )) && res+=" ${conflicted}~${VCS_STATUS_NUM_CONFLICTED}"
# +42 if have staged changes.
(( VCS_STATUS_NUM_STAGED )) && res+=" ${modified}+${VCS_STATUS_NUM_STAGED}"
# !42 if have unstaged changes.
(( VCS_STATUS_NUM_UNSTAGED )) && res+=" ${modified}!${VCS_STATUS_NUM_UNSTAGED}"
# ?42 if have untracked files. It's really a question mark, your font isn't broken.
# See POWERLEVEL9K_VCS_UNTRACKED_ICON above if you want to use a different icon.
# Remove the next line if you don't want to see untracked files at all.
(( VCS_STATUS_NUM_UNTRACKED )) && res+=" ${untracked}${(g::)POWERLEVEL9K_VCS_UNTRACKED_ICON}${VCS_STATUS_NUM_UNTRACKED}"
# "─" if the number of unstaged files is unknown. This can happen due to
# POWERLEVEL9K_VCS_MAX_INDEX_SIZE_DIRTY (see below) being set to a non-negative number lower
# than the number of files in the Git index, or due to bash.showDirtyState being set to false
# in the repository config. The number of staged and untracked files may also be unknown
# in this case.
(( VCS_STATUS_HAS_UNSTAGED == -1 )) && res+=" ${modified}─"
typeset -g my_git_format=$res
}
functions -M my_git_formatter 2>/dev/null
# Don't count the number of unstaged, untracked and conflicted files in Git repositories with
# more than this many files in the index. Negative value means infinity.
#
# If you are working in Git repositories with tens of millions of files and seeing performance
# sagging, try setting POWERLEVEL9K_VCS_MAX_INDEX_SIZE_DIRTY to a number lower than the output
# of `git ls-files | wc -l`. Alternatively, add `bash.showDirtyState = false` to the repository's
# config: `git config bash.showDirtyState false`.
typeset -g POWERLEVEL9K_VCS_MAX_INDEX_SIZE_DIRTY=-1
# Don't show Git status in prompt for repositories whose workdir matches this pattern.
# For example, if set to '~', the Git repository at $HOME/.git will be ignored.
# Multiple patterns can be combined with '|': '~|~/some/dir'.
typeset -g POWERLEVEL9K_VCS_DISABLED_WORKDIR_PATTERN='~'
# Disable the default Git status formatting.
typeset -g POWERLEVEL9K_VCS_DISABLE_GITSTATUS_FORMATTING=true
# Install our own Git status formatter.
typeset -g POWERLEVEL9K_VCS_CONTENT_EXPANSION='${$((my_git_formatter(1)))+${my_git_format}}'
typeset -g POWERLEVEL9K_VCS_LOADING_CONTENT_EXPANSION='${$((my_git_formatter(0)))+${my_git_format}}'
# Enable counters for staged, unstaged, etc.
typeset -g POWERLEVEL9K_VCS_{STAGED,UNSTAGED,UNTRACKED,CONFLICTED,COMMITS_AHEAD,COMMITS_BEHIND}_MAX_NUM=-1
# Icon color.
typeset -g POWERLEVEL9K_VCS_VISUAL_IDENTIFIER_COLOR=76
typeset -g POWERLEVEL9K_VCS_LOADING_VISUAL_IDENTIFIER_COLOR=244
# Custom icon.
# typeset -g POWERLEVEL9K_VCS_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Custom prefix.
# typeset -g POWERLEVEL9K_VCS_PREFIX='%fon '
# Show status of repositories of these types. You can add svn and/or hg if you are
# using them. If you do, your prompt may become slow even when your current directory
# isn't in an svn or hg reposotiry.
typeset -g POWERLEVEL9K_VCS_BACKENDS=(git)
# These settings are used for respositories other than Git or when gitstatusd fails and
# Powerlevel10k has to fall back to using vcs_info.
typeset -g POWERLEVEL9K_VCS_CLEAN_FOREGROUND=76
typeset -g POWERLEVEL9K_VCS_UNTRACKED_FOREGROUND=76
typeset -g POWERLEVEL9K_VCS_MODIFIED_FOREGROUND=178
##########################[ status: exit code of the last command ]###########################
# Enable OK_PIPE, ERROR_PIPE and ERROR_SIGNAL status states to allow us to enable, disable and
# style them independently from the regular OK and ERROR state.
typeset -g POWERLEVEL9K_STATUS_EXTENDED_STATES=true
# Status on success. No content, just an icon. No need to show it if prompt_char is enabled as
# it will signify success by turning green.
typeset -g POWERLEVEL9K_STATUS_OK=false
typeset -g POWERLEVEL9K_STATUS_OK_FOREGROUND=70
typeset -g POWERLEVEL9K_STATUS_OK_VISUAL_IDENTIFIER_EXPANSION='✔'
# Status when some part of a pipe command fails but the overall exit status is zero. It may look
# like this: 1|0.
typeset -g POWERLEVEL9K_STATUS_OK_PIPE=true
typeset -g POWERLEVEL9K_STATUS_OK_PIPE_FOREGROUND=70
typeset -g POWERLEVEL9K_STATUS_OK_PIPE_VISUAL_IDENTIFIER_EXPANSION='✔'
# Status when it's just an error code (e.g., '1'). No need to show it if prompt_char is enabled as
# it will signify error by turning red.
typeset -g POWERLEVEL9K_STATUS_ERROR=false
typeset -g POWERLEVEL9K_STATUS_ERROR_FOREGROUND=160
typeset -g POWERLEVEL9K_STATUS_ERROR_VISUAL_IDENTIFIER_EXPANSION='✘'
# Status when the last command was terminated by a signal.
typeset -g POWERLEVEL9K_STATUS_ERROR_SIGNAL=true
typeset -g POWERLEVEL9K_STATUS_ERROR_SIGNAL_FOREGROUND=160
# Use terse signal names: "INT" instead of "SIGINT(2)".
typeset -g POWERLEVEL9K_STATUS_VERBOSE_SIGNAME=false
typeset -g POWERLEVEL9K_STATUS_ERROR_SIGNAL_VISUAL_IDENTIFIER_EXPANSION='✘'
# Status when some part of a pipe command fails and the overall exit status is also non-zero.
# It may look like this: 1|0.
typeset -g POWERLEVEL9K_STATUS_ERROR_PIPE=true
typeset -g POWERLEVEL9K_STATUS_ERROR_PIPE_FOREGROUND=160
typeset -g POWERLEVEL9K_STATUS_ERROR_PIPE_VISUAL_IDENTIFIER_EXPANSION='✘'
###################[ command_execution_time: duration of the last command ]###################
# Show duration of the last command if takes longer than this many seconds.
typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_THRESHOLD=3
# Show this many fractional digits. Zero means round to seconds.
typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_PRECISION=0
# Execution time color.
typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_FOREGROUND=101
# Duration format: 1d 2h 3m 4s.
typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_FORMAT='d h m s'
# Custom icon.
# typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Custom prefix.
# typeset -g POWERLEVEL9K_COMMAND_EXECUTION_TIME_PREFIX='%ftook '
#######################[ background_jobs: presence of background jobs ]#######################
# Don't show the number of background jobs.
typeset -g POWERLEVEL9K_BACKGROUND_JOBS_VERBOSE=false
# Background jobs color.
typeset -g POWERLEVEL9K_BACKGROUND_JOBS_FOREGROUND=70
# Custom icon.
# typeset -g POWERLEVEL9K_BACKGROUND_JOBS_VISUAL_IDENTIFIER_EXPANSION='⭐'
#######################[ direnv: direnv status (https://direnv.net/) ]########################
# Direnv color.
typeset -g POWERLEVEL9K_DIRENV_FOREGROUND=178
# Custom icon.
# typeset -g POWERLEVEL9K_DIRENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
###############[ asdf: asdf version manager (https://github.com/asdf-vm/asdf) ]###############
# Default asdf color. Only used to display tools for which there is no color override (see below).
typeset -g POWERLEVEL9K_ASDF_FOREGROUND=66
# There are four parameters that can be used to hide asdf tools. Each parameter describes
# conditions under which a tool gets hidden. Parameters can hide tools but not unhide them. If at
# least one parameter decides to hide a tool, that tool gets hidden. If no parameter decides to
# hide a tool, it gets shown.
#
# Special note on the difference between POWERLEVEL9K_ASDF_SOURCES and
# POWERLEVEL9K_ASDF_PROMPT_ALWAYS_SHOW. Consider the effect of the following commands:
#
# asdf local python 3.8.1
# asdf global python 3.8.1
#
# After running both commands the current python version is 3.8.1 and its source is "local" as
# it takes precedence over "global". If POWERLEVEL9K_ASDF_PROMPT_ALWAYS_SHOW is set to false,
# it'll hide python version in this case because 3.8.1 is the same as the global version.
# POWERLEVEL9K_ASDF_SOURCES will hide python version only if the value of this parameter doesn't
# contain "local".
# Hide tool versions that don't come from one of these sources.
#
# Available sources:
#
# - shell `asdf current` says "set by ASDF_${TOOL}_VERSION environment variable"
# - local `asdf current` says "set by /some/not/home/directory/file"
# - global `asdf current` says "set by /home/username/file"
#
# Note: If this parameter is set to (shell local global), it won't hide tools.
# Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_SOURCES.
typeset -g POWERLEVEL9K_ASDF_SOURCES=(shell local global)
# If set to false, hide tool versions that are the same as global.
#
# Note: The name of this parameter doesn't reflect its meaning at all.
# Note: If this parameter is set to true, it won't hide tools.
# Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_PROMPT_ALWAYS_SHOW.
typeset -g POWERLEVEL9K_ASDF_PROMPT_ALWAYS_SHOW=false
# If set to false, hide tool versions that are equal to "system".
#
# Note: If this parameter is set to true, it won't hide tools.
# Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_SHOW_SYSTEM.
typeset -g POWERLEVEL9K_ASDF_SHOW_SYSTEM=true
# If set to non-empty value, hide tools unless there is a file matching the specified file pattern
# in the current directory, or its parent diretory, or its grandparent directory, and so on.
#
# Note: If this parameter is set to empty value, it won't hide tools.
# Note: SHOW_ON_UPGLOB isn't specific to asdf. It works with all prompt segments.
# Tip: Override this parameter for ${TOOL} with POWERLEVEL9K_ASDF_${TOOL}_SHOW_ON_UPGLOB.
#
# Example: Hide nodejs version when there is no package.json and no *.js files in the current
# directory, in `..`, in `../..` and so on.
#
# typeset -g POWERLEVEL9K_ASDF_NODEJS_SHOW_ON_UPGLOB='*.js|package.json'
typeset -g POWERLEVEL9K_ASDF_SHOW_ON_UPGLOB=
# Ruby version from asdf.
typeset -g POWERLEVEL9K_ASDF_RUBY_FOREGROUND=168
# typeset -g POWERLEVEL9K_ASDF_RUBY_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_RUBY_SHOW_ON_UPGLOB='*.foo|*.bar'
# Python version from asdf.
typeset -g POWERLEVEL9K_ASDF_PYTHON_FOREGROUND=37
# typeset -g POWERLEVEL9K_ASDF_PYTHON_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_PYTHON_SHOW_ON_UPGLOB='*.foo|*.bar'
# Go version from asdf.
typeset -g POWERLEVEL9K_ASDF_GOLANG_FOREGROUND=37
# typeset -g POWERLEVEL9K_ASDF_GOLANG_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_GOLANG_SHOW_ON_UPGLOB='*.foo|*.bar'
# Node.js version from asdf.
typeset -g POWERLEVEL9K_ASDF_NODEJS_FOREGROUND=70
# typeset -g POWERLEVEL9K_ASDF_NODEJS_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_NODEJS_SHOW_ON_UPGLOB='*.foo|*.bar'
# Rust version from asdf.
typeset -g POWERLEVEL9K_ASDF_RUST_FOREGROUND=37
# typeset -g POWERLEVEL9K_ASDF_RUST_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_RUST_SHOW_ON_UPGLOB='*.foo|*.bar'
# .NET Core version from asdf.
typeset -g POWERLEVEL9K_ASDF_DOTNET_CORE_FOREGROUND=134
# typeset -g POWERLEVEL9K_ASDF_DOTNET_CORE_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_DOTNET_SHOW_ON_UPGLOB='*.foo|*.bar'
# Flutter version from asdf.
typeset -g POWERLEVEL9K_ASDF_FLUTTER_FOREGROUND=38
# typeset -g POWERLEVEL9K_ASDF_FLUTTER_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_FLUTTER_SHOW_ON_UPGLOB='*.foo|*.bar'
# Lua version from asdf.
typeset -g POWERLEVEL9K_ASDF_LUA_FOREGROUND=32
# typeset -g POWERLEVEL9K_ASDF_LUA_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_LUA_SHOW_ON_UPGLOB='*.foo|*.bar'
# Java version from asdf.
typeset -g POWERLEVEL9K_ASDF_JAVA_FOREGROUND=32
# typeset -g POWERLEVEL9K_ASDF_JAVA_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_JAVA_SHOW_ON_UPGLOB='*.foo|*.bar'
# Perl version from asdf.
typeset -g POWERLEVEL9K_ASDF_PERL_FOREGROUND=67
# typeset -g POWERLEVEL9K_ASDF_PERL_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_PERL_SHOW_ON_UPGLOB='*.foo|*.bar'
# Erlang version from asdf.
typeset -g POWERLEVEL9K_ASDF_ERLANG_FOREGROUND=125
# typeset -g POWERLEVEL9K_ASDF_ERLANG_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_ERLANG_SHOW_ON_UPGLOB='*.foo|*.bar'
# Elixir version from asdf.
typeset -g POWERLEVEL9K_ASDF_ELIXIR_FOREGROUND=129
# typeset -g POWERLEVEL9K_ASDF_ELIXIR_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_ELIXIR_SHOW_ON_UPGLOB='*.foo|*.bar'
# Postgres version from asdf.
typeset -g POWERLEVEL9K_ASDF_POSTGRES_FOREGROUND=31
# typeset -g POWERLEVEL9K_ASDF_POSTGRES_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_POSTGRES_SHOW_ON_UPGLOB='*.foo|*.bar'
# PHP version from asdf.
typeset -g POWERLEVEL9K_ASDF_PHP_FOREGROUND=99
# typeset -g POWERLEVEL9K_ASDF_PHP_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_PHP_SHOW_ON_UPGLOB='*.foo|*.bar'
# Haskell version from asdf.
typeset -g POWERLEVEL9K_ASDF_HASKELL_FOREGROUND=172
# typeset -g POWERLEVEL9K_ASDF_HASKELL_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_ASDF_HASKELL_SHOW_ON_UPGLOB='*.foo|*.bar'
##########[ nordvpn: nordvpn connection status, linux only (https://nordvpn.com/) ]###########
# NordVPN connection indicator color.
typeset -g POWERLEVEL9K_NORDVPN_FOREGROUND=39
# Hide NordVPN connection indicator when not connected.
typeset -g POWERLEVEL9K_NORDVPN_{DISCONNECTED,CONNECTING,DISCONNECTING}_CONTENT_EXPANSION=
typeset -g POWERLEVEL9K_NORDVPN_{DISCONNECTED,CONNECTING,DISCONNECTING}_VISUAL_IDENTIFIER_EXPANSION=
# Custom icon.
# typeset -g POWERLEVEL9K_NORDVPN_VISUAL_IDENTIFIER_EXPANSION='⭐'
#################[ ranger: ranger shell (https://github.com/ranger/ranger) ]##################
# Ranger shell color.
typeset -g POWERLEVEL9K_RANGER_FOREGROUND=178
# Custom icon.
# typeset -g POWERLEVEL9K_RANGER_VISUAL_IDENTIFIER_EXPANSION='⭐'
######################[ nnn: nnn shell (https://github.com/jarun/nnn) ]#######################
# Nnn shell color.
typeset -g POWERLEVEL9K_NNN_FOREGROUND=72
# Custom icon.
# typeset -g POWERLEVEL9K_NNN_VISUAL_IDENTIFIER_EXPANSION='⭐'
###########################[ vim_shell: vim shell indicator (:sh) ]###########################
# Vim shell indicator color.
typeset -g POWERLEVEL9K_VIM_SHELL_FOREGROUND=34
# Custom icon.
# typeset -g POWERLEVEL9K_VIM_SHELL_VISUAL_IDENTIFIER_EXPANSION='⭐'
######[ midnight_commander: midnight commander shell (https://midnight-commander.org/) ]######
# Midnight Commander shell color.
typeset -g POWERLEVEL9K_MIDNIGHT_COMMANDER_FOREGROUND=178
# Custom icon.
# typeset -g POWERLEVEL9K_MIDNIGHT_COMMANDER_VISUAL_IDENTIFIER_EXPANSION='⭐'
#[ nix_shell: nix shell (https://nixos.org/nixos/nix-pills/developing-with-nix-shell.html) ]##
# Nix shell color.
typeset -g POWERLEVEL9K_NIX_SHELL_FOREGROUND=74
# Tip: If you want to see just the icon without "pure" and "impure", uncomment the next line.
# typeset -g POWERLEVEL9K_NIX_SHELL_CONTENT_EXPANSION=
# Custom icon.
# typeset -g POWERLEVEL9K_NIX_SHELL_VISUAL_IDENTIFIER_EXPANSION='⭐'
##################################[ disk_usgae: disk usage ]##################################
# Colors for different levels of disk usage.
typeset -g POWERLEVEL9K_DISK_USAGE_NORMAL_FOREGROUND=35
typeset -g POWERLEVEL9K_DISK_USAGE_WARNING_FOREGROUND=220
typeset -g POWERLEVEL9K_DISK_USAGE_CRITICAL_FOREGROUND=160
# Thresholds for different levels of disk usage (percentage points).
typeset -g POWERLEVEL9K_DISK_USAGE_WARNING_LEVEL=90
typeset -g POWERLEVEL9K_DISK_USAGE_CRITICAL_LEVEL=95
# If set to true, hide disk usage when below $POWERLEVEL9K_DISK_USAGE_WARNING_LEVEL percent.
typeset -g POWERLEVEL9K_DISK_USAGE_ONLY_WARNING=false
# Custom icon.
# typeset -g POWERLEVEL9K_DISK_USAGE_VISUAL_IDENTIFIER_EXPANSION='⭐'
######################################[ ram: free RAM ]#######################################
# RAM color.
typeset -g POWERLEVEL9K_RAM_FOREGROUND=66
# Custom icon.
# typeset -g POWERLEVEL9K_RAM_VISUAL_IDENTIFIER_EXPANSION='⭐'
#####################################[ swap: used swap ]######################################
# Swap color.
typeset -g POWERLEVEL9K_SWAP_FOREGROUND=96
# Custom icon.
# typeset -g POWERLEVEL9K_SWAP_VISUAL_IDENTIFIER_EXPANSION='⭐'
######################################[ load: CPU load ]######################################
# Show average CPU load over this many last minutes. Valid values are 1, 5 and 15.
typeset -g POWERLEVEL9K_LOAD_WHICH=5
# Load color when load is under 50%.
typeset -g POWERLEVEL9K_LOAD_NORMAL_FOREGROUND=66
# Load color when load is between 50% and 70%.
typeset -g POWERLEVEL9K_LOAD_WARNING_FOREGROUND=178
# Load color when load is over 70%.
typeset -g POWERLEVEL9K_LOAD_CRITICAL_FOREGROUND=166
# Custom icon.
# typeset -g POWERLEVEL9K_LOAD_VISUAL_IDENTIFIER_EXPANSION='⭐'
################[ todo: todo items (https://github.com/todotxt/todo.txt-cli) ]################
# Todo color.
typeset -g POWERLEVEL9K_TODO_FOREGROUND=110
# Hide todo when the total number of tasks is zero.
typeset -g POWERLEVEL9K_TODO_HIDE_ZERO_TOTAL=true
# Hide todo when the number of tasks after filtering is zero.
typeset -g POWERLEVEL9K_TODO_HIDE_ZERO_FILTERED=false
# Todo format. The following parameters are available within the expansion.
#
# - P9K_TODO_TOTAL_TASK_COUNT The total number of tasks.
# - P9K_TODO_FILTERED_TASK_COUNT The number of tasks after filtering.
#
# These variables correspond to the last line of the output of `todo.sh -p ls`:
#
# TODO: 24 of 42 tasks shown
#
# Here 24 is P9K_TODO_FILTERED_TASK_COUNT and 42 is P9K_TODO_TOTAL_TASK_COUNT.
#
# typeset -g POWERLEVEL9K_TODO_CONTENT_EXPANSION='$P9K_TODO_FILTERED_TASK_COUNT'
# Custom icon.
# typeset -g POWERLEVEL9K_TODO_VISUAL_IDENTIFIER_EXPANSION='⭐'
###########[ timewarrior: timewarrior tracking status (https://timewarrior.net/) ]############
# Timewarrior color.
typeset -g POWERLEVEL9K_TIMEWARRIOR_FOREGROUND=110
# If the tracked task is longer than 24 characters, truncate and append "…".
# Tip: To always display tasks without truncation, delete the following parameter.
# Tip: To hide task names and display just the icon when time tracking is enabled, set the
# value of the following parameter to "".
typeset -g POWERLEVEL9K_TIMEWARRIOR_CONTENT_EXPANSION='${P9K_CONTENT:0:24}${${P9K_CONTENT:24}:+…}'
# Custom icon.
# typeset -g POWERLEVEL9K_TIMEWARRIOR_VISUAL_IDENTIFIER_EXPANSION='⭐'
##############[ taskwarrior: taskwarrior task count (https://taskwarrior.org/) ]##############
# Taskwarrior color.
typeset -g POWERLEVEL9K_TASKWARRIOR_FOREGROUND=74
# Taskwarrior segment format. The following parameters are available within the expansion.
#
# - P9K_TASKWARRIOR_PENDING_COUNT The number of pending tasks: `task +PENDING count`.
# - P9K_TASKWARRIOR_OVERDUE_COUNT The number of overdue tasks: `task +OVERDUE count`.
#
# Zero values are represented as empty parameters.
#
# The default format:
#
# '${P9K_TASKWARRIOR_OVERDUE_COUNT:+"!$P9K_TASKWARRIOR_OVERDUE_COUNT/"}$P9K_TASKWARRIOR_PENDING_COUNT'
#
# typeset -g POWERLEVEL9K_TASKWARRIOR_CONTENT_EXPANSION='$P9K_TASKWARRIOR_PENDING_COUNT'
# Custom icon.
# typeset -g POWERLEVEL9K_TASKWARRIOR_VISUAL_IDENTIFIER_EXPANSION='⭐'
##################################[ context: user@hostname ]##################################
# Context color when running with privileges.
typeset -g POWERLEVEL9K_CONTEXT_ROOT_FOREGROUND=178
# Context color in SSH without privileges.
typeset -g POWERLEVEL9K_CONTEXT_{REMOTE,REMOTE_SUDO}_FOREGROUND=180
# Default context color (no privileges, no SSH).
typeset -g POWERLEVEL9K_CONTEXT_FOREGROUND=180
# Context format when running with privileges: bold user@hostname.
typeset -g POWERLEVEL9K_CONTEXT_ROOT_TEMPLATE='%B%n@%m'
# Context format when in SSH without privileges: user@hostname.
typeset -g POWERLEVEL9K_CONTEXT_{REMOTE,REMOTE_SUDO}_TEMPLATE='%n@%m'
# Default context format (no privileges, no SSH): user@hostname.
typeset -g POWERLEVEL9K_CONTEXT_TEMPLATE='%n@%m'
# Don't show context unless running with privileges or in SSH.
# Tip: Remove the next line to always show context.
typeset -g POWERLEVEL9K_CONTEXT_{DEFAULT,SUDO}_{CONTENT,VISUAL_IDENTIFIER}_EXPANSION=
# Custom icon.
# typeset -g POWERLEVEL9K_CONTEXT_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Custom prefix.
# typeset -g POWERLEVEL9K_CONTEXT_PREFIX='%fwith '
###[ virtualenv: python virtual environment (https://docs.python.org/3/library/venv.html) ]###
# Python virtual environment color.
typeset -g POWERLEVEL9K_VIRTUALENV_FOREGROUND=37
# Don't show Python version next to the virtual environment name.
typeset -g POWERLEVEL9K_VIRTUALENV_SHOW_PYTHON_VERSION=false
# Separate environment name from Python version only with a space.
typeset -g POWERLEVEL9K_VIRTUALENV_{LEFT,RIGHT}_DELIMITER=
# Custom icon.
# typeset -g POWERLEVEL9K_VIRTUALENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
#####################[ anaconda: conda environment (https://conda.io/) ]######################
# Anaconda environment color.
typeset -g POWERLEVEL9K_ANACONDA_FOREGROUND=37
# Don't show Python version next to the anaconda environment name.
typeset -g POWERLEVEL9K_ANACONDA_SHOW_PYTHON_VERSION=false
# Separate environment name from Python version only with a space.
typeset -g POWERLEVEL9K_ANACONDA_{LEFT,RIGHT}_DELIMITER=
# Custom icon.
# typeset -g POWERLEVEL9K_ANACONDA_VISUAL_IDENTIFIER_EXPANSION='⭐'
################[ pyenv: python environment (https://github.com/pyenv/pyenv) ]################
# Pyenv color.
typeset -g POWERLEVEL9K_PYENV_FOREGROUND=37
# Hide python version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_PYENV_SOURCES=(shell local global)
# If set to false, hide python version if it's the same as global:
# $(pyenv version-name) == $(pyenv global).
typeset -g POWERLEVEL9K_PYENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide python version if it's equal to "system".
typeset -g POWERLEVEL9K_PYENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_PYENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
################[ goenv: go environment (https://github.com/syndbg/goenv) ]################
# Goenv color.
typeset -g POWERLEVEL9K_GOENV_FOREGROUND=37
# Hide go version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_GOENV_SOURCES=(shell local global)
# If set to false, hide go version if it's the same as global:
# $(goenv version-name) == $(goenv global).
typeset -g POWERLEVEL9K_GOENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide go version if it's equal to "system".
typeset -g POWERLEVEL9K_GOENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_GOENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
##########[ nodenv: node.js version from nodenv (https://github.com/nodenv/nodenv) ]##########
# Nodenv color.
typeset -g POWERLEVEL9K_NODENV_FOREGROUND=70
# Hide node version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_NODENV_SOURCES=(shell local global)
# If set to false, hide node version if it's the same as global:
# $(nodenv version-name) == $(nodenv global).
typeset -g POWERLEVEL9K_NODENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide node version if it's equal to "system".
typeset -g POWERLEVEL9K_NODENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_NODENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
##############[ nvm: node.js version from nvm (https://github.com/nvm-sh/nvm) ]###############
# Nvm color.
typeset -g POWERLEVEL9K_NVM_FOREGROUND=70
# Custom icon.
# typeset -g POWERLEVEL9K_NVM_VISUAL_IDENTIFIER_EXPANSION='⭐'
############[ nodeenv: node.js environment (https://github.com/ekalinin/nodeenv) ]############
# Nodeenv color.
typeset -g POWERLEVEL9K_NODEENV_FOREGROUND=70
# Don't show Node version next to the environment name.
typeset -g POWERLEVEL9K_NODEENV_SHOW_NODE_VERSION=false
# Separate environment name from Node version only with a space.
typeset -g POWERLEVEL9K_NODEENV_{LEFT,RIGHT}_DELIMITER=
# Custom icon.
# typeset -g POWERLEVEL9K_NODEENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
##############################[ node_version: node.js version ]###############################
# Node version color.
typeset -g POWERLEVEL9K_NODE_VERSION_FOREGROUND=70
# Show node version only when in a directory tree containing package.json.
typeset -g POWERLEVEL9K_NODE_VERSION_PROJECT_ONLY=true
# Custom icon.
# typeset -g POWERLEVEL9K_NODE_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
#######################[ go_version: go version (https://golang.org) ]########################
# Go version color.
typeset -g POWERLEVEL9K_GO_VERSION_FOREGROUND=37
# Show go version only when in a go project subdirectory.
typeset -g POWERLEVEL9K_GO_VERSION_PROJECT_ONLY=true
# Custom icon.
# typeset -g POWERLEVEL9K_GO_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
#################[ rust_version: rustc version (https://www.rust-lang.org) ]##################
# Rust version color.
typeset -g POWERLEVEL9K_RUST_VERSION_FOREGROUND=37
# Show rust version only when in a rust project subdirectory.
typeset -g POWERLEVEL9K_RUST_VERSION_PROJECT_ONLY=true
# Custom icon.
# typeset -g POWERLEVEL9K_RUST_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
###############[ dotnet_version: .NET version (https://dotnet.microsoft.com) ]################
# .NET version color.
typeset -g POWERLEVEL9K_DOTNET_VERSION_FOREGROUND=134
# Show .NET version only when in a .NET project subdirectory.
typeset -g POWERLEVEL9K_DOTNET_VERSION_PROJECT_ONLY=true
# Custom icon.
# typeset -g POWERLEVEL9K_DOTNET_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
#####################[ php_version: php version (https://www.php.net/) ]######################
# PHP version color.
typeset -g POWERLEVEL9K_PHP_VERSION_FOREGROUND=99
# Show PHP version only when in a PHP project subdirectory.
typeset -g POWERLEVEL9K_PHP_VERSION_PROJECT_ONLY=true
# Custom icon.
# typeset -g POWERLEVEL9K_PHP_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
##########[ laravel_version: laravel php framework version (https://laravel.com/) ]###########
# Laravel version color.
typeset -g POWERLEVEL9K_LARAVEL_VERSION_FOREGROUND=161
# Custom icon.
# typeset -g POWERLEVEL9K_LARAVEL_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
####################[ java_version: java version (https://www.java.com/) ]####################
# Java version color.
typeset -g POWERLEVEL9K_JAVA_VERSION_FOREGROUND=32
# Show java version only when in a java project subdirectory.
typeset -g POWERLEVEL9K_JAVA_VERSION_PROJECT_ONLY=true
# Show brief version.
typeset -g POWERLEVEL9K_JAVA_VERSION_FULL=false
# Custom icon.
# typeset -g POWERLEVEL9K_JAVA_VERSION_VISUAL_IDENTIFIER_EXPANSION='⭐'
###[ package: name@version from package.json (https://docs.npmjs.com/files/package.json) ]####
# Package color.
typeset -g POWERLEVEL9K_PACKAGE_FOREGROUND=117
# Package format. The following parameters are available within the expansion.
#
# - P9K_PACKAGE_NAME The value of `name` field in package.json.
# - P9K_PACKAGE_VERSION The value of `version` field in package.json.
#
# typeset -g POWERLEVEL9K_PACKAGE_CONTENT_EXPANSION='${P9K_PACKAGE_NAME//\%/%%}@${P9K_PACKAGE_VERSION//\%/%%}'
# Custom icon.
# typeset -g POWERLEVEL9K_PACKAGE_VISUAL_IDENTIFIER_EXPANSION='⭐'
#############[ rbenv: ruby version from rbenv (https://github.com/rbenv/rbenv) ]##############
# Rbenv color.
typeset -g POWERLEVEL9K_RBENV_FOREGROUND=168
# Hide ruby version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_RBENV_SOURCES=(shell local global)
# If set to false, hide ruby version if it's the same as global:
# $(rbenv version-name) == $(rbenv global).
typeset -g POWERLEVEL9K_RBENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide ruby version if it's equal to "system".
typeset -g POWERLEVEL9K_RBENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_RBENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
#######################[ rvm: ruby version from rvm (https://rvm.io) ]########################
# Rvm color.
typeset -g POWERLEVEL9K_RVM_FOREGROUND=168
# Don't show @gemset at the end.
typeset -g POWERLEVEL9K_RVM_SHOW_GEMSET=false
# Don't show ruby- at the front.
typeset -g POWERLEVEL9K_RVM_SHOW_PREFIX=false
# Custom icon.
# typeset -g POWERLEVEL9K_RVM_VISUAL_IDENTIFIER_EXPANSION='⭐'
###########[ fvm: flutter version management (https://github.com/leoafarias/fvm) ]############
# Fvm color.
typeset -g POWERLEVEL9K_FVM_FOREGROUND=38
# Custom icon.
# typeset -g POWERLEVEL9K_FVM_VISUAL_IDENTIFIER_EXPANSION='⭐'
##########[ luaenv: lua version from luaenv (https://github.com/cehoffman/luaenv) ]###########
# Lua color.
typeset -g POWERLEVEL9K_LUAENV_FOREGROUND=32
# Hide lua version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_LUAENV_SOURCES=(shell local global)
# If set to false, hide lua version if it's the same as global:
# $(luaenv version-name) == $(luaenv global).
typeset -g POWERLEVEL9K_LUAENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide lua version if it's equal to "system".
typeset -g POWERLEVEL9K_LUAENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_LUAENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
###############[ jenv: java version from jenv (https://github.com/jenv/jenv) ]################
# Java color.
typeset -g POWERLEVEL9K_JENV_FOREGROUND=32
# Hide java version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_JENV_SOURCES=(shell local global)
# If set to false, hide java version if it's the same as global:
# $(jenv version-name) == $(jenv global).
typeset -g POWERLEVEL9K_JENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide java version if it's equal to "system".
typeset -g POWERLEVEL9K_JENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_JENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
###########[ plenv: perl version from plenv (https://github.com/tokuhirom/plenv) ]############
# Perl color.
typeset -g POWERLEVEL9K_PLENV_FOREGROUND=67
# Hide perl version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_PLENV_SOURCES=(shell local global)
# If set to false, hide perl version if it's the same as global:
# $(plenv version-name) == $(plenv global).
typeset -g POWERLEVEL9K_PLENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide perl version if it's equal to "system".
typeset -g POWERLEVEL9K_PLENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_PLENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
############[ phpenv: php version from phpenv (https://github.com/phpenv/phpenv) ]############
# PHP color.
typeset -g POWERLEVEL9K_PHPENV_FOREGROUND=99
# Hide php version if it doesn't come from one of these sources.
typeset -g POWERLEVEL9K_PHPENV_SOURCES=(shell local global)
# If set to false, hide php version if it's the same as global:
# $(phpenv version-name) == $(phpenv global).
typeset -g POWERLEVEL9K_PHPENV_PROMPT_ALWAYS_SHOW=false
# If set to false, hide php version if it's equal to "system".
typeset -g POWERLEVEL9K_PHPENV_SHOW_SYSTEM=true
# Custom icon.
# typeset -g POWERLEVEL9K_PHPENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
##########[ haskell_stack: haskell version from stack (https://haskellstack.org/) ]###########
# Haskell color.
typeset -g POWERLEVEL9K_HASKELL_STACK_FOREGROUND=172
# Hide haskell version if it doesn't come from one of these sources.
#
# shell: version is set by STACK_YAML
# local: version is set by stack.yaml up the directory tree
# global: version is set by the implicit global project (~/.stack/global-project/stack.yaml)
typeset -g POWERLEVEL9K_HASKELL_STACK_SOURCES=(shell local)
# If set to false, hide haskell version if it's the same as in the implicit global project.
typeset -g POWERLEVEL9K_HASKELL_STACK_ALWAYS_SHOW=true
# Custom icon.
# typeset -g POWERLEVEL9K_HASKELL_STACK_VISUAL_IDENTIFIER_EXPANSION='⭐'
#############[ kubecontext: current kubernetes context (https://kubernetes.io/) ]#############
# Show kubecontext only when the the command you are typing invokes one of these tools.
# Tip: Remove the next line to always show kubecontext.
typeset -g POWERLEVEL9K_KUBECONTEXT_SHOW_ON_COMMAND='kubectl|helm|kubens|kubectx|oc|istioctl|kogito'
# Kubernetes context classes for the purpose of using different colors, icons and expansions with
# different contexts.
#
# POWERLEVEL9K_KUBECONTEXT_CLASSES is an array with even number of elements. The first element
# in each pair defines a pattern against which the current kubernetes context gets matched.
# More specifically, it's P9K_CONTENT prior to the application of context expansion (see below)
# that gets matched. If you unset all POWERLEVEL9K_KUBECONTEXT_*CONTENT_EXPANSION parameters,
# you'll see this value in your prompt. The second element of each pair in
# POWERLEVEL9K_KUBECONTEXT_CLASSES defines the context class. Patterns are tried in order. The
# first match wins.
#
# For example, given these settings:
#
# typeset -g POWERLEVEL9K_KUBECONTEXT_CLASSES=(
# '*prod*' PROD
# '*test*' TEST
# '*' DEFAULT)
#
# If your current kubernetes context is "deathray-testing/default", its class is TEST
# because "deathray-testing/default" doesn't match the pattern '*prod*' but does match '*test*'.
#
# You can define different colors, icons and content expansions for different classes:
#
# typeset -g POWERLEVEL9K_KUBECONTEXT_TEST_FOREGROUND=28
# typeset -g POWERLEVEL9K_KUBECONTEXT_TEST_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_KUBECONTEXT_TEST_CONTENT_EXPANSION='> ${P9K_CONTENT} <'
typeset -g POWERLEVEL9K_KUBECONTEXT_CLASSES=(
# '*prod*' PROD # These values are examples that are unlikely
# '*test*' TEST # to match your needs. Customize them as needed.
'*' DEFAULT)
typeset -g POWERLEVEL9K_KUBECONTEXT_DEFAULT_FOREGROUND=134
# typeset -g POWERLEVEL9K_KUBECONTEXT_DEFAULT_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Use POWERLEVEL9K_KUBECONTEXT_CONTENT_EXPANSION to specify the content displayed by kubecontext
# segment. Parameter expansions are very flexible and fast, too. See reference:
# http://zsh.sourceforge.net/Doc/Release/Expansion.html#Parameter-Expansion.
#
# Within the expansion the following parameters are always available:
#
# - P9K_CONTENT The content that would've been displayed if there was no content
# expansion defined.
# - P9K_KUBECONTEXT_NAME The current context's name. Corresponds to column NAME in the
# output of `kubectl config get-contexts`.
# - P9K_KUBECONTEXT_CLUSTER The current context's cluster. Corresponds to column CLUSTER in the
# output of `kubectl config get-contexts`.
# - P9K_KUBECONTEXT_NAMESPACE The current context's namespace. Corresponds to column NAMESPACE
# in the output of `kubectl config get-contexts`. If there is no
# namespace, the parameter is set to "default".
# - P9K_KUBECONTEXT_USER The current context's user. Corresponds to column AUTHINFO in the
# output of `kubectl config get-contexts`.
#
# If the context points to Google Kubernetes Engine (GKE) or Elastic Kubernetes Service (EKS),
# the following extra parameters are available:
#
# - P9K_KUBECONTEXT_CLOUD_NAME Either "gke" or "eks".
# - P9K_KUBECONTEXT_CLOUD_ACCOUNT Account/project ID.
# - P9K_KUBECONTEXT_CLOUD_ZONE Availability zone.
# - P9K_KUBECONTEXT_CLOUD_CLUSTER Cluster.
#
# P9K_KUBECONTEXT_CLOUD_* parameters are derived from P9K_KUBECONTEXT_CLUSTER. For example,
# if P9K_KUBECONTEXT_CLUSTER is "gke_my-account_us-east1-a_my-cluster-01":
#
# - P9K_KUBECONTEXT_CLOUD_NAME=gke
# - P9K_KUBECONTEXT_CLOUD_ACCOUNT=my-account
# - P9K_KUBECONTEXT_CLOUD_ZONE=us-east1-a
# - P9K_KUBECONTEXT_CLOUD_CLUSTER=my-cluster-01
#
# If P9K_KUBECONTEXT_CLUSTER is "arn:aws:eks:us-east-1:123456789012:cluster/my-cluster-01":
#
# - P9K_KUBECONTEXT_CLOUD_NAME=eks
# - P9K_KUBECONTEXT_CLOUD_ACCOUNT=123456789012
# - P9K_KUBECONTEXT_CLOUD_ZONE=us-east-1
# - P9K_KUBECONTEXT_CLOUD_CLUSTER=my-cluster-01
typeset -g POWERLEVEL9K_KUBECONTEXT_DEFAULT_CONTENT_EXPANSION=
# Show P9K_KUBECONTEXT_CLOUD_CLUSTER if it's not empty and fall back to P9K_KUBECONTEXT_NAME.
POWERLEVEL9K_KUBECONTEXT_DEFAULT_CONTENT_EXPANSION+='${P9K_KUBECONTEXT_CLOUD_CLUSTER:-${P9K_KUBECONTEXT_NAME}}'
# Append the current context's namespace if it's not "default".
POWERLEVEL9K_KUBECONTEXT_DEFAULT_CONTENT_EXPANSION+='${${:-/$P9K_KUBECONTEXT_NAMESPACE}:#/default}'
# Custom prefix.
# typeset -g POWERLEVEL9K_KUBECONTEXT_PREFIX='%fat '
################[ terraform: terraform workspace (https://www.terraform.io) ]#################
# POWERLEVEL9K_TERRAFORM_CLASSES is an array with even number of elements. The first element
# in each pair defines a pattern against which the current terraform workspace gets matched.
# More specifically, it's P9K_CONTENT prior to the application of context expansion (see below)
# that gets matched. If you unset all POWERLEVEL9K_TERRAFORM_*CONTENT_EXPANSION parameters,
# you'll see this value in your prompt. The second element of each pair in
# POWERLEVEL9K_TERRAFORM_CLASSES defines the workspace class. Patterns are tried in order. The
# first match wins.
#
# For example, given these settings:
#
# typeset -g POWERLEVEL9K_TERRAFORM_CLASSES=(
# '*prod*' PROD
# '*test*' TEST
# '*' DEFAULT)
#
# If your current terraform workspace is "project_test", its class is TEST because "project_test"
# doesn't match the pattern '*prod*' but does match '*test*'.
#
# You can define different colors, icons and content expansions for different classes:
#
# typeset -g POWERLEVEL9K_TERRAFORM_TEST_FOREGROUND=28
# typeset -g POWERLEVEL9K_TERRAFORM_TEST_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_TERRAFORM_TEST_CONTENT_EXPANSION='> ${P9K_CONTENT} <'
typeset -g POWERLEVEL9K_TERRAFORM_CLASSES=(
# '*prod*' PROD # These values are examples that are unlikely
# '*test*' TEST # to match your needs. Customize them as needed.
'*' DEFAULT)
typeset -g POWERLEVEL9K_TERRAFORM_DEFAULT_FOREGROUND=38
# typeset -g POWERLEVEL9K_TERRAFORM_DEFAULT_VISUAL_IDENTIFIER_EXPANSION='⭐'
#[ aws: aws profile (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html) ]#
# Show aws only when the the command you are typing invokes one of these tools.
# Tip: Remove the next line to always show aws.
typeset -g POWERLEVEL9K_AWS_SHOW_ON_COMMAND='aws|awless|terraform|pulumi'
# POWERLEVEL9K_AWS_CLASSES is an array with even number of elements. The first element
# in each pair defines a pattern against which the current AWS profile gets matched.
# More specifically, it's P9K_CONTENT prior to the application of context expansion (see below)
# that gets matched. If you unset all POWERLEVEL9K_AWS_*CONTENT_EXPANSION parameters,
# you'll see this value in your prompt. The second element of each pair in
# POWERLEVEL9K_AWS_CLASSES defines the profile class. Patterns are tried in order. The
# first match wins.
#
# For example, given these settings:
#
# typeset -g POWERLEVEL9K_AWS_CLASSES=(
# '*prod*' PROD
# '*test*' TEST
# '*' DEFAULT)
#
# If your current AWS profile is "company_test", its class is TEST
# because "company_test" doesn't match the pattern '*prod*' but does match '*test*'.
#
# You can define different colors, icons and content expansions for different classes:
#
# typeset -g POWERLEVEL9K_AWS_TEST_FOREGROUND=28
# typeset -g POWERLEVEL9K_AWS_TEST_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_AWS_TEST_CONTENT_EXPANSION='> ${P9K_CONTENT} <'
typeset -g POWERLEVEL9K_AWS_CLASSES=(
# '*prod*' PROD # These values are examples that are unlikely
# '*test*' TEST # to match your needs. Customize them as needed.
'*' DEFAULT)
typeset -g POWERLEVEL9K_AWS_DEFAULT_FOREGROUND=208
# typeset -g POWERLEVEL9K_AWS_DEFAULT_VISUAL_IDENTIFIER_EXPANSION='⭐'
#[ aws_eb_env: aws elastic beanstalk environment (https://aws.amazon.com/elasticbeanstalk/) ]#
# AWS Elastic Beanstalk environment color.
typeset -g POWERLEVEL9K_AWS_EB_ENV_FOREGROUND=70
# Custom icon.
# typeset -g POWERLEVEL9K_AWS_EB_ENV_VISUAL_IDENTIFIER_EXPANSION='⭐'
##########[ azure: azure account name (https://docs.microsoft.com/en-us/cli/azure) ]##########
# Show azure only when the the command you are typing invokes one of these tools.
# Tip: Remove the next line to always show azure.
typeset -g POWERLEVEL9K_AZURE_SHOW_ON_COMMAND='az|terraform|pulumi'
# Azure account name color.
typeset -g POWERLEVEL9K_AZURE_FOREGROUND=32
# Custom icon.
# typeset -g POWERLEVEL9K_AZURE_VISUAL_IDENTIFIER_EXPANSION='⭐'
##########[ gcloud: google cloud account and project (https://cloud.google.com/) ]###########
# Show gcloud only when the the command you are typing invokes one of these tools.
# Tip: Remove the next line to always show gcloud.
typeset -g POWERLEVEL9K_GCLOUD_SHOW_ON_COMMAND='gcloud|gcs'
# Google cloud color.
typeset -g POWERLEVEL9K_GCLOUD_FOREGROUND=32
# Google cloud format. Change the value of POWERLEVEL9K_GCLOUD_CONTENT_EXPANSION if the default
# is too verbose or not informative enough.
#
# P9K_GCLOUD_ACCOUNT: the output of `gcloud config get-value account`
# P9K_GCLOUD_PROJECT: the output of `gcloud config get-value project`
# ${VARIABLE//\%/%%}: ${VARIABLE} with all occurences of '%' replaced with '%%'.
#
typeset -g POWERLEVEL9K_GCLOUD_CONTENT_EXPANSION='${P9K_GCLOUD_PROJECT//\%/%%}'
# Custom icon.
# typeset -g POWERLEVEL9K_GCLOUD_VISUAL_IDENTIFIER_EXPANSION='⭐'
#[ google_app_cred: google application credentials (https://cloud.google.com/docs/authentication/production) ]#
# Show google_app_cred only when the the command you are typing invokes one of these tools.
# Tip: Remove the next line to always show google_app_cred.
typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_SHOW_ON_COMMAND='terraform|pulumi'
# Google application credentials classes for the purpose of using different colors, icons and
# expansions with different credentials.
#
# POWERLEVEL9K_GOOGLE_APP_CRED_CLASSES is an array with even number of elements. The first
# element in each pair defines a pattern against which the current kubernetes context gets
# matched. More specifically, it's P9K_CONTENT prior to the application of context expansion
# (see below) that gets matched. If you unset all POWERLEVEL9K_GOOGLE_APP_CRED_*CONTENT_EXPANSION
# parameters, you'll see this value in your prompt. The second element of each pair in
# POWERLEVEL9K_GOOGLE_APP_CRED_CLASSES defines the context class. Patterns are tried in order.
# The first match wins.
#
# For example, given these settings:
#
# typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_CLASSES=(
# '*:*prod*:*' PROD
# '*:*test*:*' TEST
# '*' DEFAULT)
#
# If your current Google application credentials is "service_account deathray-testing [email protected]",
# its class is TEST because it doesn't match the pattern '* *prod* *' but does match '* *test* *'.
#
# You can define different colors, icons and content expansions for different classes:
#
# typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_TEST_FOREGROUND=28
# typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_TEST_VISUAL_IDENTIFIER_EXPANSION='⭐'
# typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_TEST_CONTENT_EXPANSION='$P9K_GOOGLE_APP_CRED_PROJECT_ID'
typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_CLASSES=(
# '*:*prod*:*' PROD # These values are examples that are unlikely
# '*:*test*:*' TEST # to match your needs. Customize them as needed.
'*' DEFAULT)
typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_DEFAULT_FOREGROUND=32
# typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_DEFAULT_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Use POWERLEVEL9K_GOOGLE_APP_CRED_CONTENT_EXPANSION to specify the content displayed by
# google_app_cred segment. Parameter expansions are very flexible and fast, too. See reference:
# http://zsh.sourceforge.net/Doc/Release/Expansion.html#Parameter-Expansion.
#
# You can use the following parameters in the expansion. Each of them corresponds to one of the
# fields in the JSON file pointed to by GOOGLE_APPLICATION_CREDENTIALS.
#
# Parameter | JSON key file field
# ---------------------------------+---------------
# P9K_GOOGLE_APP_CRED_TYPE | type
# P9K_GOOGLE_APP_CRED_PROJECT_ID | project_id
# P9K_GOOGLE_APP_CRED_CLIENT_EMAIL | client_email
#
# Note: ${VARIABLE//\%/%%} expands to ${VARIABLE} with all occurences of '%' replaced by '%%'.
typeset -g POWERLEVEL9K_GOOGLE_APP_CRED_DEFAULT_CONTENT_EXPANSION='${P9K_GOOGLE_APP_CRED_PROJECT_ID//\%/%%}'
###############################[ public_ip: public IP address ]###############################
# Public IP color.
typeset -g POWERLEVEL9K_PUBLIC_IP_FOREGROUND=94
# Custom icon.
# typeset -g POWERLEVEL9K_PUBLIC_IP_VISUAL_IDENTIFIER_EXPANSION='⭐'
########################[ vpn_ip: virtual private network indicator ]#########################
# VPN IP color.
typeset -g POWERLEVEL9K_VPN_IP_FOREGROUND=81
# When on VPN, show just an icon without the IP address.
# Tip: To display the private IP address when on VPN, remove the next line.
typeset -g POWERLEVEL9K_VPN_IP_CONTENT_EXPANSION=
# Regular expression for the VPN network interface. Run `ifconfig` or `ip -4 a show` while on VPN
# to see the name of the interface.
typeset -g POWERLEVEL9K_VPN_IP_INTERFACE='(wg|(.*tun))[0-9]*'
# If set to true, show one segment per matching network interface. If set to false, show only
# one segment corresponding to the first matching network interface.
# Tip: If you set it to true, you'll probably want to unset POWERLEVEL9K_VPN_IP_CONTENT_EXPANSION.
typeset -g POWERLEVEL9K_VPN_IP_SHOW_ALL=false
# Custom icon.
# typeset -g POWERLEVEL9K_VPN_IP_VISUAL_IDENTIFIER_EXPANSION='⭐'
###########[ ip: ip address and bandwidth usage for a specified network interface ]###########
# IP color.
typeset -g POWERLEVEL9K_IP_FOREGROUND=38
# The following parameters are accessible within the expansion:
#
# Parameter | Meaning
# ----------------------+---------------
# P9K_IP_IP | IP address
# P9K_IP_INTERFACE | network interface
# P9K_IP_RX_BYTES | total number of bytes received
# P9K_IP_TX_BYTES | total number of bytes sent
# P9K_IP_RX_RATE | receive rate (since last prompt)
# P9K_IP_TX_RATE | send rate (since last prompt)
typeset -g POWERLEVEL9K_IP_CONTENT_EXPANSION='$P9K_IP_IP${P9K_IP_RX_RATE:+ %70F⇣$P9K_IP_RX_RATE}${P9K_IP_TX_RATE:+ %215F⇡$P9K_IP_TX_RATE}'
# Show information for the first network interface whose name matches this regular expression.
# Run `ifconfig` or `ip -4 a show` to see the names of all network interfaces.
typeset -g POWERLEVEL9K_IP_INTERFACE='e.*'
# Custom icon.
# typeset -g POWERLEVEL9K_IP_VISUAL_IDENTIFIER_EXPANSION='⭐'
#########################[ proxy: system-wide http/https/ftp proxy ]##########################
# Proxy color.
typeset -g POWERLEVEL9K_PROXY_FOREGROUND=68
# Custom icon.
# typeset -g POWERLEVEL9K_PROXY_VISUAL_IDENTIFIER_EXPANSION='⭐'
################################[ battery: internal battery ]#################################
# Show battery in red when it's below this level and not connected to power supply.
typeset -g POWERLEVEL9K_BATTERY_LOW_THRESHOLD=20
typeset -g POWERLEVEL9K_BATTERY_LOW_FOREGROUND=160
# Show battery in green when it's charging or fully charged.
typeset -g POWERLEVEL9K_BATTERY_{CHARGING,CHARGED}_FOREGROUND=70
# Show battery in yellow when it's discharging.
typeset -g POWERLEVEL9K_BATTERY_DISCONNECTED_FOREGROUND=178
# Battery pictograms going from low to high level of charge.
typeset -g POWERLEVEL9K_BATTERY_STAGES='\uf58d\uf579\uf57a\uf57b\uf57c\uf57d\uf57e\uf57f\uf580\uf581\uf578'
# Don't show the remaining time to charge/discharge.
typeset -g POWERLEVEL9K_BATTERY_VERBOSE=false
#####################################[ wifi: wifi speed ]#####################################
# WiFi color.
typeset -g POWERLEVEL9K_WIFI_FOREGROUND=68
# Custom icon.
# typeset -g POWERLEVEL9K_WIFI_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Use different colors and icons depending on signal strength ($P9K_WIFI_BARS).
#
# # Wifi colors and icons for different signal strength levels (low to high).
# typeset -g my_wifi_fg=(68 68 68 68 68) # <-- change these values
# typeset -g my_wifi_icon=('WiFi' 'WiFi' 'WiFi' 'WiFi' 'WiFi') # <-- change these values
#
# typeset -g POWERLEVEL9K_WIFI_CONTENT_EXPANSION='%F{${my_wifi_fg[P9K_WIFI_BARS+1]}}$P9K_WIFI_LAST_TX_RATE Mbps'
# typeset -g POWERLEVEL9K_WIFI_VISUAL_IDENTIFIER_EXPANSION='%F{${my_wifi_fg[P9K_WIFI_BARS+1]}}${my_wifi_icon[P9K_WIFI_BARS+1]}'
#
# The following parameters are accessible within the expansions:
#
# Parameter | Meaning
# ----------------------+---------------
# P9K_WIFI_SSID | service set identifier, a.k.a. network name
# P9K_WIFI_LINK_AUTH | authentication protocol such as "wpa2-psk" or "none"
# P9K_WIFI_LAST_TX_RATE | wireless transmit rate in megabits per second
# P9K_WIFI_RSSI | signal strength in dBm, from -120 to 0
# P9K_WIFI_NOISE | noise in dBm, from -120 to 0
# P9K_WIFI_BARS | signal strength in bars, from 0 to 4 (derived from P9K_WIFI_RSSI and P9K_WIFI_NOISE)
#
# All parameters except P9K_WIFI_BARS are extracted from the output of the following command:
#
# /System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport -I
####################################[ time: current time ]####################################
# Current time color.
typeset -g POWERLEVEL9K_TIME_FOREGROUND=66
# Format for the current time: 09:51:02. See `man 3 strftime`.
typeset -g POWERLEVEL9K_TIME_FORMAT='%D{%H:%M:%S}'
# If set to true, time will update when you hit enter. This way prompts for the past
# commands will contain the start times of their commands as opposed to the default
# behavior where they contain the end times of their preceding commands.
typeset -g POWERLEVEL9K_TIME_UPDATE_ON_COMMAND=false
# Custom icon.
# typeset -g POWERLEVEL9K_TIME_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Custom prefix.
# typeset -g POWERLEVEL9K_TIME_PREFIX='%fat '
# Example of a user-defined prompt segment. Function prompt_example will be called on every
# prompt if `example` prompt segment is added to POWERLEVEL9K_LEFT_PROMPT_ELEMENTS or
# POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS. It displays an icon and orange text greeting the user.
#
# Type `p10k help segment` for documentation and a more sophisticated example.
function prompt_example() {
p10k segment -f 208 -i '⭐' -t 'hello, %n'
}
# User-defined prompt segments may optionally provide an instant_prompt_* function. Its job
# is to generate the prompt segment for display in instant prompt. See
# https://github.com/romkatv/powerlevel10k/blob/master/README.md#instant-prompt.
#
# Powerlevel10k will call instant_prompt_* at the same time as the regular prompt_* function
# and will record all `p10k segment` calls it makes. When displaying instant prompt, Powerlevel10k
# will replay these calls without actually calling instant_prompt_*. It is imperative that
# instant_prompt_* always makes the same `p10k segment` calls regardless of environment. If this
# rule is not observed, the content of instant prompt will be incorrect.
#
# Usually, you should either not define instant_prompt_* or simply call prompt_* from it. If
# instant_prompt_* is not defined for a segment, the segment won't be shown in instant prompt.
function instant_prompt_example() {
# Since prompt_example always makes the same `p10k segment` calls, we can call it from
# instant_prompt_example. This will give us the same `example` prompt segment in the instant
# and regular prompts.
prompt_example
}
# User-defined prompt segments can be customized the same way as built-in segments.
# typeset -g POWERLEVEL9K_EXAMPLE_FOREGROUND=208
# typeset -g POWERLEVEL9K_EXAMPLE_VISUAL_IDENTIFIER_EXPANSION='⭐'
# Transient prompt works similarly to the builtin transient_rprompt option. It trims down prompt
# when accepting a command line. Supported values:
#
# - off: Don't change prompt when accepting a command line.
# - always: Trim down prompt when accepting a command line.
# - same-dir: Trim down prompt when accepting a command line unless this is the first command
# typed after changing current working directory.
typeset -g POWERLEVEL9K_TRANSIENT_PROMPT=always
# Instant prompt mode.
#
# - off: Disable instant prompt. Choose this if you've tried instant prompt and found
# it incompatible with your zsh configuration files.
# - quiet: Enable instant prompt and don't print warnings when detecting console output
# during zsh initialization. Choose this if you've read and understood
# https://github.com/romkatv/powerlevel10k/blob/master/README.md#instant-prompt.
# - verbose: Enable instant prompt and print a warning when detecting console output during
# zsh initialization. Choose this if you've never tried instant prompt, haven't
# seen the warning, or if you are unsure what this all means.
typeset -g POWERLEVEL9K_INSTANT_PROMPT=verbose
# Hot reload allows you to change POWERLEVEL9K options after Powerlevel10k has been initialized.
# For example, you can type POWERLEVEL9K_BACKGROUND=red and see your prompt turn red. Hot reload
# can slow down prompt by 1-2 milliseconds, so it's better to keep it turned off unless you
# really need it.
typeset -g POWERLEVEL9K_DISABLE_HOT_RELOAD=true
# If p10k is already loaded, reload configuration.
# This works even with POWERLEVEL9K_DISABLE_HOT_RELOAD=true.
(( ! $+functions[p10k] )) || p10k reload
}
(( ${#p10k_config_opts} )) && setopt ${p10k_config_opts[@]}
'builtin' 'unset' 'p10k_config_opts'
|
finaldie/final_dev_env
|
zsh/p10k.zsh
|
Shell
|
mit
| 76,045 |
#!/usr/bin/env bash
declare -x WATCHTOWER_CLEANUP
[[ -z "${WATCHTOWER_CLEANUP}" ]] && WATCHTOWER_CLEANUP="false"
declare -x WATCHTOWER_DEBUG
[[ -z "${WATCHTOWER_DEBUG}" ]] && WATCHTOWER_DEBUG="false"
declare -x WATCHTOWER_ENABLE_LIFECYCLE_HOOKS
[[ -z "${WATCHTOWER_ENABLE_LIFECYCLE_HOOKS}" ]] && WATCHTOWER_ENABLE_LIFECYCLE_HOOKS="false"
declare -x WATCHTOWER_HOST
[[ -z "${WATCHTOWER_HOST}" ]] && WATCHTOWER_HOST="unix:///var/run/docker.sock"
declare -x WATCHTOWER_HTTP_API_METRICS
[[ -z "${WATCHTOWER_HTTP_API_METRICS}" ]] && WATCHTOWER_HTTP_API_METRICS="false"
declare -x WATCHTOWER_HTTP_API_TOKEN
[[ -z "${WATCHTOWER_HTTP_API_TOKEN}" ]] && WATCHTOWER_HTTP_API_TOKEN=""
declare -x WATCHTOWER_HTTP_API_UPDATE
[[ -z "${WATCHTOWER_HTTP_API_UPDATE}" ]] && WATCHTOWER_HTTP_API_UPDATE="false"
declare -x WATCHTOWER_INCLUDE_RESTARTING
[[ -z "${WATCHTOWER_INCLUDE_RESTARTING}" ]] && WATCHTOWER_INCLUDE_RESTARTING="false"
declare -x WATCHTOWER_INCLUDE_STOPPED
[[ -z "${WATCHTOWER_INCLUDE_STOPPED}" ]] && WATCHTOWER_INCLUDE_STOPPED="false"
declare -x WATCHTOWER_INTERVAL
[[ -z "${WATCHTOWER_INTERVAL}" ]] && WATCHTOWER_INTERVAL="300"
declare -x WATCHTOWER_LABEL_ENABLE
[[ -z "${WATCHTOWER_LABEL_ENABLE}" ]] && WATCHTOWER_LABEL_ENABLE="false"
declare -x WATCHTOWER_MONITOR_ONLY
[[ -z "${WATCHTOWER_MONITOR_ONLY}" ]] && WATCHTOWER_MONITOR_ONLY="false"
declare -x WATCHTOWER_NO_COLOR
[[ -z "${WATCHTOWER_NO_COLOR}" ]] && WATCHTOWER_NO_COLOR="false"
declare -x WATCHTOWER_NO_PULL
[[ -z "${WATCHTOWER_NO_PULL}" ]] && WATCHTOWER_NO_PULL="false"
declare -x WATCHTOWER_NO_RESTART
[[ -z "${WATCHTOWER_NO_RESTART}" ]] && WATCHTOWER_NO_RESTART="false"
declare -x WATCHTOWER_NO_STARTUP_MESSAGE
[[ -z "${WATCHTOWER_NO_STARTUP_MESSAGE}" ]] && WATCHTOWER_NO_STARTUP_MESSAGE="false"
declare -x WATCHTOWER_REMOVE_VOLUMES
[[ -z "${WATCHTOWER_REMOVE_VOLUMES}" ]] && WATCHTOWER_REMOVE_VOLUMES="false"
declare -x WATCHTOWER_REVIVE_STOPPED
[[ -z "${WATCHTOWER_REVIVE_STOPPED}" ]] && WATCHTOWER_REVIVE_STOPPED="false"
declare -x WATCHTOWER_ROLLING_RESTART
[[ -z "${WATCHTOWER_ROLLING_RESTART}" ]] && WATCHTOWER_ROLLING_RESTART="false"
declare -x WATCHTOWER_RUN_ONCE
[[ -z "${WATCHTOWER_RUN_ONCE}" ]] && WATCHTOWER_RUN_ONCE="false"
declare -x WATCHTOWER_SCHEDULE
[[ -z "${WATCHTOWER_SCHEDULE}" ]] && WATCHTOWER_SCHEDULE=""
declare -x WATCHTOWER_SCOPE
[[ -z "${WATCHTOWER_SCOPE}" ]] && WATCHTOWER_SCOPE=""
declare -x WATCHTOWER_STOP_TIMEOUT
[[ -z "${WATCHTOWER_STOP_TIMEOUT}" ]] && WATCHTOWER_STOP_TIMEOUT=""
declare -x WATCHTOWER_TLS_VERIFY
[[ -z "${WATCHTOWER_TLS_VERIFY}" ]] && WATCHTOWER_TLS_VERIFY="false"
declare -x WATCHTOWER_TRACE
[[ -z "${WATCHTOWER_TRACE}" ]] && WATCHTOWER_TRACE="false"
declare -x WATCHTOWER_WARN_ON_HEAD_FAILURE
[[ -z "${WATCHTOWER_WARN_ON_HEAD_FAILURE}" ]] && WATCHTOWER_WARN_ON_HEAD_FAILURE=""
declare -x WATCHTOWER_OPTS
[[ -z "${WATCHTOWER_OPTS}" ]] && WATCHTOWER_OPTS=""
declare -x DOCKER_API_VERSION
[[ -z "${DOCKER_API_VERSION}" ]] && DOCKER_API_VERSION="1.24"
true
|
dockhippie/watchtower
|
latest/overlay/etc/entrypoint.d/05-base.sh
|
Shell
|
mit
| 2,998 |
#!/bin/bash
UP=$(uptime | awk '{print $3}' | sed 's/,//')
HOST=$(hostname -s)
UTC=$(date -u +"%H:%M")
STRING_UP=$(echo "$UP")
STRING_GMT=$(echo "UTC $UTC")
echo '#('echo $STRING_GMT') #[fg=green] #('echo $STRING_UP') #[fg=black bg=green] #('echo $HOST') '
|
lwslade/fmux
|
tmux-right.sh
|
Shell
|
mit
| 256 |
#!/bin/sh
#############################################################################
# LongShort-ButtonPressed.sh
#
# This script is used in conjunction with the LongShort-ButtonReleased.sh
# script to allow a single button to serve two functions. When the button
# is held for a short period of time, the first action is performed and when
# the button is held for a long period of time, the second action is
# performed. This script initiates the button press timer by recording
# the time when the button was pressed.
#############################################################################
date +%s.%N > /var/tmp/buttonPressedSystem.txt
|
atomicnumber1/falcon_player_configurations
|
scripts/button_press_system.sh
|
Shell
|
mit
| 651 |
#!/bin/sh
@THEODEN@_HOME=$(dirname $0)/..
export @THEODEN@_HOME
. ${@THEODEN@_HOME}/etc/common
@[email protected]
exec ${@THEODEN@_BIN}/sqlite3.sh -init ${@THEODEN@_ETC}/@[email protected] ${@THEODEN@_DB}/@[email protected] \
'.exit'
|
cdsi/theoden
|
bin/theoden-db-load.sh
|
Shell
|
mit
| 240 |
#!/bin/bash
python exploit11/exploit11.py &>exploit11/exploit11.log
|
vhazali/cs5331
|
assignment2/scripts/exploit11.sh
|
Shell
|
mit
| 69 |
#! /bin/sh
######################################################################
#
# kinsyu
# 金種計算するスクリプト
#
# このソフトウェアは Public Domain (CC0)であることを宣言する。
#
######################################################################
# === 初期化 =========================================================
set -eu
umask 0022
export LC_ALL='C' PATH="$(command -p getconf PATH):$PATH"
kinsyu(){
# === 変数 ===========================================================
local kinsyu='10000 5000 1000 500 100 50 10 5 1'
local kinsyu_=0
local kingaku=""
# === 判定 ===========================================================
if [ -t 0 ] ; then
kingaku=$1
else
kingaku='-'
kingaku=$( cat $kingaku )
fi
expr "$kingaku" + 1 >/dev/null 2>&1
if [ ! $? -lt 2 ]; then
printf "数値を入力してください\n"
exit 1
fi
if [ $kingaku -le 0 ]; then
printf "金額を入力してください\n"
exit 1
fi
# === 金種計算 =========================================================
kinsyu_=$(printf "%s" "$kinsyu" | cut -d' ' -f 1)
printf "%d " "$(( $kingaku / $kinsyu_ ))"
kingaku=$(( $kingaku % $kinsyu_ ))
kinsyu_=$(printf "%s" "$kinsyu" | cut -d' ' -f 2)
printf "%d " "$(( $kingaku / $kinsyu_ ))"
kingaku=$(( $kingaku % $kinsyu_ ))
kinsyu_=$(printf "%s" "$kinsyu" | cut -d' ' -f 3)
printf "%d " "$(( $kingaku / $kinsyu_ ))"
kingaku=$(( $kingaku % $kinsyu_ ))
kinsyu_=$(printf "%s" "$kinsyu" | cut -d' ' -f 4)
printf "%d " "$(( $kingaku / $kinsyu_ ))"
kingaku=$(( $kingaku % $kinsyu_ ))
kinsyu_=$(printf "%s" "$kinsyu" | cut -d' ' -f 5)
printf "%d " "$(( $kingaku / $kinsyu_ ))"
kingaku=$(( $kingaku % $kinsyu_ ))
kinsyu_=$(printf "%s" "$kinsyu" | cut -d' ' -f 6)
printf "%d " "$(( $kingaku / $kinsyu_ ))"
kingaku=$(( $kingaku % $kinsyu_ ))
kinsyu_=$(printf "%s" "$kinsyu" | cut -d' ' -f 7)
printf "%d " "$(( $kingaku / $kinsyu_ ))"
kingaku=$(( $kingaku % $kinsyu_ ))
kinsyu_=$(printf "%s" "$kinsyu" | cut -d' ' -f 8)
printf "%d " "$(( $kingaku / $kinsyu_ ))"
kingaku=$(( $kingaku % $kinsyu_ ))
kinsyu_=$(printf "%s" "$kinsyu" | cut -d' ' -f 9)
printf "%d " "$(( $kingaku / $kinsyu_ ))"
kingaku=$(( $kingaku % $kinsyu_ ))
printf "\n"
}
#kinsyu 25343
|
div-jp/code
|
TOOL/kinsyu.sh
|
Shell
|
mit
| 2,281 |
#!/bin/bash
function install_x2go {
#from https://www.howtoforge.com/tutorial/x2go-server-ubuntu-14-04
REPO_CHECK=$(dpkg-query -W --showformat='${Status}\n' x2goserver | grep "install ok installed")
if [ "" == "$REPO_CHECK" ]; then
apt_install software-properties-common
add-apt-repository -y ppa:x2go/stable
apt_quiet_update
apt_install x2goserver
apt_install x2goserver-xsession
# apt_install x2gomatebindings # if you use MATE/mubuntu
# apt_install x2golxdebindings # if you use LXDE/lubuntu
# See http://askubuntu.com/questions/763597/x2go-with-ubuntu-mate-xfce-16-04-fails-to-start
# to add the correct libs to the profile
#client
apt_install x2goclient
fi
}
|
neilkidd/xubuntu-16.04.1-desktop-setup
|
install_x2go.sh
|
Shell
|
mit
| 723 |
# (c) Liviu Balan <[email protected]>
# http://www.liviubalan.com/
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
LIV_TUT_META_URL='http://www.liviubalan.com/set-php-timezone-on-ubuntu-server'
|
liviubalan/liviubalan.com-vagrant-ubuntu
|
provision-shell/tutorials/000/038/meta.sh
|
Shell
|
mit
| 278 |
#!/bin/sh
#
# MAQAO - Modular Assembly Quality Analyzer and Optimizer
# http://maqao.org/
source ./helper.sh $*
set_stage
#wget http://maqao.org/release/maqao.intel64-xeonphi.tar.bz2
cp $TOTIENT_PKG/archives/maqao.intel64-xeonphi.tar.bz2 .
tar -xjf maqao.intel64-xeonphi.tar.bz2
mkdir -p $PREFIX/maqao-2.1.1
mkdir -p $PREFIX/maqao-2.1.1/bin
mv maqao.intel64-xeonphi $PREFIX/maqao-2.1.1/bin/maqao
mv man $PREFIX/maqao-2.1.1
leave_stage
|
cornell-cs5220-f15/totient-pkg
|
configs/maqao.sh
|
Shell
|
mit
| 439 |
#!/bin/sh
# Display Linux distribution
#
# Example:
# Distributor ID: Debian
# Description: Debian GNU/Linux 7.1 (wheezy)
# Release: 7.1
# Codename: wheezy
lsb_release -a 2>/dev/null
|
milosz/shell-octo-adventure
|
scripts/which_distribution.sh
|
Shell
|
mit
| 204 |
#!/bin/bash
# clean up
rm -rf /var/lib/jenkins/elis/deployed/*.jar
# copy dependencies in place
cp -R /var/lib/jenkins/workspace/elis-platform/code/felix-configuration/bundle/*.jar /var/lib/jenkins/elis/deployed
cp -R /var/lib/jenkins/elis/osgi/*.jar /var/lib/jenkins/elis/deployed
# restart felix
sudo /etc/init.d/felix-run restart
|
iotap-center/elis-platform
|
felix-configuration/scripts/post_build.sh
|
Shell
|
mit
| 337 |
#!/usr/bin/env bash
# Update ubuntu
/vagrant/ubuntu.sh
# Install nodejs
/vagrant/nodejs.sh
# Build and install CouchDB from source
/vagrant/couchdb.sh
|
redgeoff/couchdb2-vagrant
|
bootstrap.sh
|
Shell
|
mit
| 154 |
#!/bin/sh
#
# Setup ZSH as default shell
# Install Oh-My-ZSH
function install_zsh() {
ask_for_confirmation "Do you want to set up ZSH with Oh-My-ZSH?"
if answer_is_yes; then
# Test to see if zshell is installed with brew
if brew_test_package 'zsh'; then
print_success 'Homebrew has installed ZSH.'
# Install Oh My Zsh if it isn't already present
if [[ ! -d ~/.oh-my-zsh/ ]]; then
print_info 'Installing Oh My ZSH.'
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
print_success 'Oh My ZSH has been installed.'
fi
# Set the default shell to zsh if it isn't currently set to zsh
if [[ ! $(echo $SHELL) == $(which zsh) ]]; then
sudo sh -c 'echo /usr/local/bin/zsh >> /etc/shells'
chsh -s $(which zsh)
fi
else
# If zsh isn't installed, get the platform of the current machine
platform=$(uname);
# If the platform is Linux, try an apt-get to install zsh and then recurse
if [[ $platform == 'Linux' ]]; then
if [[ -f /etc/redhat-release ]]; then
sudo yum install zsh
install_zsh
fi
if [[ -f /etc/debian_version ]]; then
sudo apt-get install zsh
install_zsh
fi
# If the platform is OS X, tell the user to install zsh :)
elif [[ $platform == 'Darwin' ]]; then
print_info "We'll install zsh, then re-run this script!"
if ! brew_test_package 'zsh'; then
print_info 'Installing ZSH with Homebrew'
brew install zsh --with-unicode9
fi
exit
fi
fi
fi
}
function main() {
install_zsh
}
if [ "${1}" != "--source-only" ]; then
main "${@}"
fi
|
nielsgl/dotfiles
|
zsh/setup.sh
|
Shell
|
mit
| 1,759 |
#!/bin/bash
local sshs_str="$1"
local rvm_environment="$2"
local foreman_str="$3"
declare -a ssh_strs="${sshs_str[@]}"
local load_rvm=`cat $bakesale_source/stages/wave/_rvm.sh`
for ssh_str in "${ssh_strs[@]}" ; do
echo "$ssh_str"
ssh -t $ssh_str "
sudo bash -c \"
$load_rvm
rvm use $rvm_environment
rvm-prompt
gem list -i foreman || gem install foreman
foreman $foreman_str
\"
"
done
|
tiredpixel/bakesale
|
stages/wave/ssh_rvm_foreman.sh
|
Shell
|
mit
| 487 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.