code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
#
# Copyright 2014 Kitsilano Software Inc.
#
# This file is part of MonoTizen.
#
# MonoTizen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MonoTizen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MonoTizen. If not, see <http://www.gnu.org/licenses/>.
set -e
tools/tar-in.sh "$1" "$2" tmp/buildbot.tar
|
kitsilanosoftware/MonoTizen.VMs
|
bundles/buildbot/setup.sh
|
Shell
|
gpl-3.0
| 770 |
sed "s/ReIm_[0-9][0-9][0-9][0-9].dat//" movie.gplt | sed "s/ veff_[0-9][0-9][0-9][0-9].dat//"
|
LTCC-UFG/eSPec
|
tools&scrips/chg_mvie.sh
|
Shell
|
gpl-3.0
| 94 |
#!/usr/bin/env bash
set -eux
export ANSIBLE_COLLECTIONS_PATH=$PWD/collection_root_user:$PWD/collection_root_sys
export ANSIBLE_GATHERING=explicit
export ANSIBLE_GATHER_SUBSET=minimal
export ANSIBLE_HOST_PATTERN_MISMATCH=error
unset ANSIBLE_COLLECTIONS_ON_ANSIBLE_VERSION_MISMATCH
# FUTURE: just use INVENTORY_PATH as-is once ansible-test sets the right dir
ipath=../../$(basename "${INVENTORY_PATH:-../../inventory}")
export INVENTORY_PATH="$ipath"
# ensure we can call collection module
ansible localhost -m testns.testcoll.testmodule
# ensure we can call collection module with ansible_collections in path
ANSIBLE_COLLECTIONS_PATH=$PWD/collection_root_sys/ansible_collections ansible localhost -m testns.testcoll.testmodule
echo "--- validating callbacks"
# validate FQ callbacks in ansible-playbook
ANSIBLE_CALLBACKS_ENABLED=testns.testcoll.usercallback ansible-playbook noop.yml | grep "usercallback says ok"
# use adhoc for the rest of these tests, must force it to load other callbacks
export ANSIBLE_LOAD_CALLBACK_PLUGINS=1
# validate redirected callback
ANSIBLE_CALLBACKS_ENABLED=formerly_core_callback ansible localhost -m debug 2>&1 | grep -- "usercallback says ok"
## validate missing redirected callback
ANSIBLE_CALLBACKS_ENABLED=formerly_core_missing_callback ansible localhost -m debug 2>&1 | grep -- "Skipping callback plugin 'formerly_core_missing_callback'"
## validate redirected + removed callback (fatal)
ANSIBLE_CALLBACKS_ENABLED=formerly_core_removed_callback ansible localhost -m debug 2>&1 | grep -- "testns.testcoll.removedcallback has been removed"
# validate avoiding duplicate loading of callback, even if using diff names
[ "$(ANSIBLE_CALLBACKS_ENABLED=testns.testcoll.usercallback,formerly_core_callback ansible localhost -m debug 2>&1 | grep -c 'usercallback says ok')" = "1" ]
# ensure non existing callback does not crash ansible
ANSIBLE_CALLBACKS_ENABLED=charlie.gomez.notme ansible localhost -m debug 2>&1 | grep -- "Skipping callback plugin 'charlie.gomez.notme'"
unset ANSIBLE_LOAD_CALLBACK_PLUGINS
# adhoc normally shouldn't load non-default plugins- let's be sure
output=$(ANSIBLE_CALLBACKS_ENABLED=testns.testcoll.usercallback ansible localhost -m debug)
if [[ "${output}" =~ "usercallback says ok" ]]; then echo fail; exit 1; fi
echo "--- validating docs"
# test documentation
ansible-doc testns.testcoll.testmodule -vvv | grep -- "- normal_doc_frag"
# same with symlink
ln -s "${PWD}/testcoll2" ./collection_root_sys/ansible_collections/testns/testcoll2
ansible-doc testns.testcoll2.testmodule2 -vvv | grep "Test module"
# now test we can list with symlink
ansible-doc -l -vvv| grep "testns.testcoll2.testmodule2"
echo "testing bad doc_fragments (expected ERROR message follows)"
# test documentation failure
ansible-doc testns.testcoll.testmodule_bad_docfrags -vvv 2>&1 | grep -- "unknown doc_fragment"
echo "--- validating default collection"
# test adhoc default collection resolution (use unqualified collection module with playbook dir under its collection)
echo "testing adhoc default collection support with explicit playbook dir"
ANSIBLE_PLAYBOOK_DIR=./collection_root_user/ansible_collections/testns/testcoll ansible localhost -m testmodule
# we need multiple plays, and conditional import_playbook is noisy and causes problems, so choose here which one to use...
if [[ ${INVENTORY_PATH} == *.winrm ]]; then
export TEST_PLAYBOOK=windows.yml
else
export TEST_PLAYBOOK=posix.yml
echo "testing default collection support"
ansible-playbook -i "${INVENTORY_PATH}" collection_root_user/ansible_collections/testns/testcoll/playbooks/default_collection_playbook.yml "$@"
fi
echo "--- validating collections support in playbooks/roles"
# run test playbooks
ansible-playbook -i "${INVENTORY_PATH}" -v "${TEST_PLAYBOOK}" "$@"
if [[ ${INVENTORY_PATH} != *.winrm ]]; then
ansible-playbook -i "${INVENTORY_PATH}" -v invocation_tests.yml "$@"
fi
echo "--- validating bypass_host_loop with collection search"
ansible-playbook -i host1,host2, -v test_bypass_host_loop.yml "$@"
echo "--- validating inventory"
# test collection inventories
ansible-playbook inventory_test.yml -i a.statichost.yml -i redirected.statichost.yml "$@"
if [[ ${INVENTORY_PATH} != *.winrm ]]; then
# base invocation tests
ansible-playbook -i "${INVENTORY_PATH}" -v invocation_tests.yml "$@"
# run playbook from collection, test default again, but with FQCN
ansible-playbook -i "${INVENTORY_PATH}" testns.testcoll.default_collection_playbook.yml "$@"
# run playbook from collection, test default again, but with FQCN and no extension
ansible-playbook -i "${INVENTORY_PATH}" testns.testcoll.default_collection_playbook "$@"
# run playbook that imports from collection
ansible-playbook -i "${INVENTORY_PATH}" import_collection_pb.yml "$@"
fi
# test collection inventories
ansible-playbook inventory_test.yml -i a.statichost.yml -i redirected.statichost.yml "$@"
# test plugin loader redirect_list
ansible-playbook test_redirect_list.yml -v "$@"
# test adjacent with --playbook-dir
export ANSIBLE_COLLECTIONS_PATH=''
ANSIBLE_INVENTORY_ANY_UNPARSED_IS_FAILED=1 ansible-inventory --list --export --playbook-dir=. -v "$@"
# use an inventory source with caching enabled
ansible-playbook -i a.statichost.yml -i ./cache.statichost.yml -v check_populated_inventory.yml
# Check that the inventory source with caching enabled was stored
if [[ "$(find ./inventory_cache -type f ! -path "./inventory_cache/.keep" | wc -l)" -ne "1" ]]; then
echo "Failed to find the expected single cache"
exit 1
fi
CACHEFILE="$(find ./inventory_cache -type f ! -path './inventory_cache/.keep')"
if [[ $CACHEFILE != ./inventory_cache/prefix_* ]]; then
echo "Unexpected cache file"
exit 1
fi
# Check the cache for the expected hosts
if [[ "$(grep -wc "cache_host_a" "$CACHEFILE")" -ne "1" ]]; then
echo "Failed to cache host as expected"
exit 1
fi
if [[ "$(grep -wc "dynamic_host_a" "$CACHEFILE")" -ne "0" ]]; then
echo "Cached an incorrect source"
exit 1
fi
./vars_plugin_tests.sh
./test_task_resolved_plugin.sh
|
thnee/ansible
|
test/integration/targets/collections/runme.sh
|
Shell
|
gpl-3.0
| 6,071 |
#!/bin/bash
# This short script detects whether or not a new display was added
# before restarting the i3 window manager. If so, it will run some
# `xrandr' commands to enable it.
# Test whether there is a display which is not running in its
# prefered mode.
if [ $( xrandr --current | grep -E "[[:digit:]]* \+" | wc -c) > 0 ];then
# Number of line the detection above occured in.
mode_line=$(xrandr --current | awk '/[:digit:]* \+/ { print NR }')
# Number of line the discription of the display is residing in.
# It checks for all line numbers of the device descriptions if they
# are smaller than the mode_line (since the different modes are
# printed below the description) and returns the last matching
# element.
display_line=$(xrandr --current | awk '/connected/{print NR}' | awk -v mode_line=$mode_line '{if (mode_line > $0) display_line=$1} END {print display_line}')
# Extract the name of the display
display_name=$(xrandr --current | awk -v display_line=$display_line '{if (NR==display_line) print $1}')
# Extract the display's prefered mode
display_mode=$(xrandr --current | awk -v mode_line=$mode_line '{if (NR==mode_line) print $1}')
# Activate the display in its prefered mode
xrandr --output $display_name --mode $display_mode
# Source my custom X11 configuration.
source ~/.xprofile
fi
# Check whether an additional display was removed.
# When removing additional displays in i3 the xrandr does not seem to
# turn them off. Instead one manually has to do it manually.
# So in here it will be checked whether there is a disconnected displays
# with still a mode attached to it.
if [ $(xrandr --current | grep -E "disconnected [[:digit:]]+" | wc -c) > 0 ];then.
display_names_off=$(xrandr --current | grep -E "disconnected [[:digit:]]+" | awk '{print $1}')
# Turn off all extracted devices
for dd in $display_names_off;do
xrandr --output $dd --off
done
fi
# Just to be save also run the `xrandr' command with the output
# option on all connected displays.
display_names_auto=$(xrandr --current | grep -E "connected [[:digit:]]+" | awk '{print $1}')
for dd in $display_names_auto;do
xrandr --output $dd --auto
done
|
theGreatWhiteShark/configurations-and-scripts
|
i3/toggle-display.sh
|
Shell
|
gpl-3.0
| 2,225 |
#!/bin/sh
tail -n +5 cv/index.html > _pdf.html
pandoc _pdf.html -o cv/cv-stefan_jonker.pdf
rm _pdf.html
|
sgnl02/personal-site
|
_scripts/cv.sh
|
Shell
|
gpl-3.0
| 105 |
#!/bin/bash
# Original version of this script based on sample sourced from Enlightenment
# Contributing page: http://enlightenment.org/p.php?p=contribute&l=en
##############
# WARNING
# This script is nasty hack with no error control.
# This script assumes you have installed the build dependencies recommended.
# It also installs and builds everything into /opt/e18
# Best of luck!
set -e
# Where is the source code?
SRCPATH="/home/craige/git/e19/modules"
# My quick and dirty dev install location
PREFIX="/opt/e19"
APPS=(
alarm
diskio
empris
eweather
moon
comp-scale
edgar
engage
forecasts
mpdule
cpu
eenvader.fractald
everything-places
mail
net
desksanity
elev8
everything-websearch
mem
)
SITE="git.enlightenment.org"
GIT="git://$SITE/modules"
OPT="--prefix=$PREFIX"
export PKG_CONFIG_PATH="$PREFIX/lib/pkgconfig:$PKG_CONFIG_PATH"
export PATH="$PREFIX/bin:$PATH"
export LD_LIBRARY_PATH="$PREFIX/lib:$LD_LIBRARY_PATH"
for package in "${APPS[@]}"
do
# Check the source is already there
if [ -a $SRCPATH/$package ]
then
cd $SRCPATH/$package
make clean distclean || true
# Update the repos
echo "Commencing $package rebase operations master!"
git pull --rebase
else
# No source? Clone it!
cd $SRCPATH
echo "Got to clone the $package source sir!"
git clone $GIT/$package.git
cd $SRCPATH/$package
fi
cd $SRCPATH/$package
./autogen.sh $OPT
make
sudo make install
cd ../..
sudo ldconfig
done
|
craigem/Scripts
|
e19/build-e19-modules.sh
|
Shell
|
gpl-3.0
| 1,497 |
#Search makefile for automake
#find . -name Makefile.am -print
# ./configure \
# --prefix=/usr \
# --mandir=/usr/share/man \
# --infodir=/usr/share/info \
# && make && make install
#Using GNU Stow to manage source installs, .stowrc but for now with EXPORT
#Structure base
#For make folders use
#mkdir -p {bin,include,lib,ib64,local,sbin,share,tmp}
# /bin
# /include
# /lib
# /lib64
# /local
# /sbin
# /share
# /tmp
#mkdir -p $HOME/stow
#mkdir -p ~/stow/flaketill-says-0.0.1
#cd ~/stow/flaketill-says-0.0.1 \
# && ./configure --prefix=~/stow/flaketill-says-0.0.1 --help
#Or save on home
#~/.stowrc
# --dir=$HOME/flaketill-says/src
# --target=/home/myusername/local
#stow -d . --target= ~/stow/flaketill-says-0.0.1
#stow -d . -t="~/stow/flaketill-says-0.0.1"
#stow -S -d . -t="$HOME/stow/flaketill-says-0.0.1"
#stow -t="$HOME/stow/flaketill-says-0.0.1"
stow -t="$HOME/stow/flaketill-says"
stow --no-folding --dir=. --target=$HOME/stow/flaketill-says
#make
#make install prefix=/usr/local/stow/package
#Test your ignore/include file
# stow -S -v -n flaketill-says
#stow -S -v -n -R -d src -t $HOME/stow/flaketill-says
#Install with GNU Stow
#stow -S program-x.x.x
#Unistall - stow -D flaketill-says
#Install for example with PKGBUILD
#install -g 0 -o 0 -m 0644 flaketill-says.1 /usr/local/man/man1/
#gzip /usr/local/man/man1/flaketill-says.1
#man ./flaketill-says.1
man flaketill-says.1
|
flaketill/flaketill-says
|
test/manpage_test.sh
|
Shell
|
gpl-3.0
| 1,419 |
git fetch . HEAD:prod &&
git push origin --all && git push origin --tags &&
ansible-playbook -t prod playbook.yml
|
aliencyborg/tilltax-api
|
deploy.sh
|
Shell
|
gpl-3.0
| 118 |
#!/bin/sh
#An example script for submiting a GPI-2 test (proc_init.bin) to
#a SGE's queue using one process per node and enabling NUMA.
#It is assumed the mpi PE is setup with a `round_robin` allocation rule.
#$ -S /bin/sh
#$ -N gpi_test
#$ -pe mpi 2
#$ -V
#$ -cwd
#$ -w e
gaspihome=$HOME/local
gaspirun=$gaspihome/bin/gaspi_run
# Generate a `machine` file from $PE_HOSTFILE. Repeated hosts are
# omitted.
machine_file="machines_"$JOB_ID
cat $PE_HOSTFILE | cut -f1 -d" " | cut -f1 -d. >> $machine_file
test_bin="proc_init.bin"
$gaspirun -m $machine_file -N $gaspihome/tests/bin/$test_bin
|
cc-hpc-itwm/GPI-2
|
docs/batch_examples/submit_to_sge.sh
|
Shell
|
gpl-3.0
| 592 |
#!/usr/bin/env bash
# Copyright (c) 2017 Red Hat, Inc. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Xiaowu <[email protected]>
MP=${MP:-"/home"}
KPATH=${KPATH:-"/nonrootlvm"}
. ../lib/kdump.sh
. ../lib/kdump_report.sh
. ../lib/crash.sh
dump_lvm()
{
if [ ! -f "${C_REBOOT}" ]; then
kdump_prepare
findmnt -kcno SOURCE "$MP" | grep "/dev/mapper/"
[ $? -eq 0 ] || log_fatal_error "- $MP is not LVM."
config_kdump_fs
report_system_info
trigger_sysrq_crash
else
rm -f "${C_REBOOT}"
validate_vmcore_exists
ready_to_exit
fi
}
log_info "- Start"
dump_lvm
|
yahcong/kdump-test
|
dump-lvm/runtest.sh
|
Shell
|
gpl-3.0
| 1,262 |
#!/bin/bash
####
# @author stev leibelt <[email protected]>
# @since 2016-09-03
####
IS_DRY_RUN=0
function execute_or_output_command_call ()
{
if [[ $IS_DRY_RUN -eq 1 ]];
then
echo "$1"
else
$1
fi
}
execute_or_output_command_call "echo \"foo\""
IS_DRY_RUN=1
execute_or_output_command_call "echo \"foo\""
|
stevleibelt/examples
|
bash/function/execute_or_output_command_call.sh
|
Shell
|
gpl-3.0
| 343 |
#!/bin/bash
################################################################
# LAB 1 PART 2 GRADING SCRIPT
################################################################
#
# The script needs to be in same directory as the SimpleC File
# and Expects there to be a tests folder with all of the
# provided c files. The provided token.py file should also
# be in the same directory
#
# There are two kinds of tests
# 1) Specific tokens (ex. "char*", "'c'")
# 2) The provided c files (ex. hello.c, ampersand.c)
#
# The script runs the output through the SimpeC Compiler and
# uses the token.py script to remove token names and replace
# them with integer values. The integers are assigned in
# increasing order and all subsequent tokens of that type
# will have the same integer ID. This allows the tokens
# names to be arbitrary as long as no two tokens have the
# same name
#
# Here is an example for hello.c
# The first token is void, the second is id...
#
# void -> 0
# main -> 1
# ( -> 2
# ) -> 3
# { -> 4
# printf -> 1
# ( -> 2
# Hello world...\n -> 5
# ) -> 3
# ; -> 6
# } -> 7
################################################################
# test lexing a single token
testtok() {
token=$1
weight=$2
echo "--------------------------------------------------"
echo "token: $token $weight pts"
echo "--------------------------------------------------"
solution_out=$(printf $token | java SimpleC | token.py)
echo "$solution_out"
echo ""
}
# test lexing all of the tokens in a file
testfile() {
file=$1
weight=$2
echo "--------------------------------------------------"
echo "file: $file $weight pts"
echo "--------------------------------------------------"
solution_out=$(java SimpleC < $file | token.py)
echo "$solution_out"
echo ""
}
testkeywords() {
# Test keywords 15 points
testtok "char*" 1
testtok "char**" 1
testtok "long*" 1
testtok "long" 1
testtok "void" 1
testtok "if" 1
testtok "else" 1
testtok "while" 1
testtok "do" 1
testtok "for" 1
testtok "continue" 1
testtok "break" 1
testtok "return" 1
testtok "double" 1
testtok "double*" 1
}
testbasicsymbols() {
#test operators/symbols 23 points
testtok "(" 1
testtok ")" 1
testtok "[" 1
testtok "[" 1
testtok "{" 1
testtok "}" 1
testtok "," 1
testtok ";" 1
testtok "=" 1
testtok "&" 1
testtok "||" 1
testtok "&&" 1
testtok "==" 1
testtok "!=" 1
testtok "<" 1
testtok ">" 1
testtok "<=" 1
testtok ">=" 1
testtok "+" 1
testtok "-" 1
testtok '\052' 1
testtok '/' 1
testtok '\045' 1
}
testcomplex() {
# test regex based tokens 22 points
testtok '/\052\052/test' 2
testtok '/\052comment\052/test' 1
testtok '/\052double(10)\052/test' 1
testtok "//\ntest" 2
testtok "//comment\ntest" 1
testtok "//double(10)\ntest" 1
testtok "hello" 2
testtok "ThIsIs_4_T35T" 2
testtok "'c'" 2
testtok "'!'" 2
testtok "'\\045'" 1
testtok "10" 1
testtok "1234567891234567890" 2
testtok "1234567890.0987654321" 2
testtok ".1337E4" 1
}
testfiles() {
# test against provided files 32 points
files=(tests/*)
for ((i=0; i<${#files[@]}; i++)); do
testfile "${files[$i]}" 1
done
}
testkeywords
testbasicsymbols
testcomplex
testfiles
|
trankq/Coursework
|
cs352/lab1-src/test_tok_release.sh
|
Shell
|
gpl-3.0
| 4,220 |
#!/bin/bash
echo "Настройка операционной системы "ОСь"..."
. ./functions.sh
. ./env_vars.sh
mv /etc/hosts /etc/hosts.$DATETIME
cat <<EOF > /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
$IP $(hostname) $(hostname -A)
EOF
echo "Загрузка пакетов "ОСи" и создание,настройка репозитория os-rt-base для домена"
cd /var/www/html | reposync http://betapkgs.os-rt.ru/os-rt/2.1/os/x86_64/
cd /var/www/html/os-rt-base | rpm -i deltarpm* python-deltarpm* createrepo*
createrepo /var/www/html/os-rt-base
chmod -R 755 /var/www/html/os-rt-base
cat <<EOF > /etc/httpd/conf.d/repos.conf
Alias /repos /var/www/html/os-rt-base/
<directory /var/www/html/os-rt-base>
Options +Indexes
Require all granted
</directory>
EOF
systemctl enable httpd
systemctl start httpd
cat <<EOF > /etc/yum.repos.d/os-rt-base.repo
[os-rt-base]
name=Operating system OS-RT 2.1 - Base
#baseurl=http://betapkgs.os-rt.ru/os-rt/$releasever/os/$basearch/
baseurl=http://$IP/repos
metadata_expire=14d
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-NCI
skip_if_unavailable=True
EOF
restorecon -vR /var/www/html
yum clean all
yum repolist
echo "Настройка DNS /etc/resolv.conf"
cat <<EOF > /etc/resolv.conf
domain $DOMAIN
search $DOMAIN
nameserver $DNS1
nameserver 8.8.8.8
EOF
echo "Установка необходимого ПО ...."
yum -y install gcc libacl-devel libblkid-devel gnutls-devel \
readline-devel python-devel gdb pkgconfig krb5-workstation \
zlib-devel setroubleshoot-server libaio-devel \
setroubleshoot-plugins policycoreutils-python \
libsemanage-python python-setuptools setools-libs \
popt-devel libpcap-devel sqlite-devel libidn-devel \
libxml2-devel libacl-devel libsepol-devel libattr-devel \
keyutils-libs-devel cyrus-sasl-devel cups-devel bind-utils \
docbook-style-xsl libxslt perl gamin-devel openldap-devel \
perl-Parse-Yapp xfsprogs-devel NetworkManager \
samba samba-client samba-dc samba-krb5-printing \
samba-test tdb-tools krb5-workstation samba-winbind-clients \
openldap-clients bind bind-utils python-dns nmap-ncat dhcp ntp ntpdate
echo "Настройка часового пояса,точного времени,настройка локального NTPd-сервера"
mv /etc/localtime /etc/localtime.$DATETIME
ln -s /usr/share/zoneinfo/Europe/Moscow /etc/localtime
cat <<EOF > /etc/ntp.conf
server 127.127.1.0
fudge 127.127.1.0 stratum 13
driftfile /var/lib/ntp/ntp.drift
logfile /var/log/ntp
ntpsigndsocket /var/lib/samba/ntp_signd/
restrict default kod nomodify notrap nopeer mssntp
restrict 127.0.0.1
EOF
enableService ntpd
startService ntpd
echo "Подготовка окружения в "ОСи""
mv /etc/samba/smb.conf /etc/smb.conf.$DATETIME
rm -rf /var/lib/samba
kdestroy
cat <<EOF > /etc/krb5.conf
#includedir /etc/krb5.conf.d/
EOF
echo "Инициализация и настройка домена $DOMAIN..."
samba-tool domain provision --use-rfc2307 --dns-backend=BIND9_DLZ --realm=$REALM --domain=$SHORTDOMAIN --host-ip=$IP --adminpass=$ADMINPASSWORD --server-role=dc --use-xattrs=yes
rndc-confgen -a -r /dev/urandom
cat <<EOF > /var/named/forwarders.conf
forwarders { 8.8.8.8; 8.8.4.4; } ;
EOF
echo "Настройка конфигурации для DNS"
cp -p /etc/named.conf /etc/named.conf.$DATETIME
cat <<EOF > /etc/named.conf
options {
listen-on port 53 { 127.0.0.1; $IP; };
// listen-on-v6 port 53 { any; };
directory "/var/named";
dump-file "/var/named/data/cache_dump.db";
statistics-file "/var/named/data/named_stats.txt";
memstatistics-file "/var/named/data/named_mem_stats.txt";
allow-query { localnets; $SUBNET/$PREFIX; };
allow-update { localnets; $SUBNET/$PREFIX; };
recursion yes;
dnssec-enable no;
dnssec-validation no;
auth-nxdomain yes;
// dnssec-enable yes;
// dnssec-validation yes;
// dnssec-lookaside auto;
/* Path to ISC DLV key */
bindkeys-file "/etc/named.iscdlv.key";
managed-keys-directory "/var/named/dynamic";
tkey-gssapi-keytab "/var/lib/samba/private/dns.keytab";
allow-transfer { "none"; };
tkey-domain "$(hostname -d)";
include "forwarders.conf";
};
logging {
channel default_debug {
file "data/named.run";
severity dynamic;
};
};
zone "." IN {
type hint;
file "named.ca";
};
include "/etc/named.rfc1912.zones";
include "/etc/named.root.key";
include "/var/lib/samba/private/named.conf";
include "/etc/rndc.key";
EOF
mv /etc/krb5.conf /etc/krb5.conf.$DATETIME
cp /var/lib/samba/private/krb5.conf /etc/
chgrp named /etc/krb5.conf
#cp -p /etc/sysconfig/named /etc/sysconfig/named.$DATETIME
echo OPTIONS="-4" >> /etc/sysconfig/named
echo "Настройка прав доступа на каталог SAMBA"
chown -R named:named /var/lib/samba/private/dns
chown -R named:named /var/lib/samba/private/sam.ldb.d
chown named:named /var/lib/samba/private/dns.keytab
chown named:named /etc/rndc.key
chown named:named /var/lib/samba/private/named.conf
chown root:named /var/lib/samba/private/
chmod 775 /var/lib/samba/private/
chgrp ntp /var/lib/samba/ntp_signd/
chmod g+rwx /var/lib/samba/ntp_signd/
systemctl restart ntpd
echo "Настройка контекстов безопасности SELinux на каталог SAMBA"
rm -rf /etc/selinux/targeted/semanage.*.LOCK
chcon -t named_conf_t /var/lib/samba/private/dns.keytab
semanage fcontext -a -t named_conf_t /var/lib/samba/private/dns.keytab
chcon -t named_conf_t /var/lib/samba/private/named.conf
semanage fcontext -a -t named_conf_t /var/lib/samba/private/named.conf
chcon -t named_var_run_t /var/lib/samba/private/dns
semanage fcontext -a -t named_var_run_t /var/lib/samba/private/dns
chcon -t named_var_run_t /var/lib/samba/private/dns/sam.ldb
semanage fcontext -a -t named_var_run_t /var/lib/samba/private/dns/sam.ldb
chcon -t named_var_run_t /var/lib/samba/private/dns/sam.ldb.d
semanage fcontext -a -t named_var_run_t /var/lib/samba/private/dns/sam.ldb.d
for file in `ls /var/lib/samba/private/dns/sam.ldb.d`
do
chcon -t named_var_run_t /var/lib/samba/private/dns/sam.ldb.d/$file
semanage fcontext -a -t named_var_run_t /var/lib/samba/private/dns/sam.ldb.d/$file
done
for file in `ls /var/lib/samba/private/sam.ldb.d`
do
chcon -t named_var_run_t /var/lib/samba/private/sam.ldb.d/$file
semanage fcontext -a -t named_var_run_t /var/lib/samba/private/sam.ldb.d/$file
done
setsebool -P samba_domain_controller on
restorecon -vR /var/lib/samba/
echo "Настройка правил межсетевого экрана Firewalld..."
firewall-cmd --add-port=53/tcp --permanent;
firewall-cmd --add-port=53/udp --permanent;
firewall-cmd --add-port=88/tcp --permanent;
firewall-cmd --add-port=88/udp --permanent; \
firewall-cmd --add-port=135/tcp --permanent;
firewall-cmd --add-port=137-138/udp --permanent;
firewall-cmd --add-port=139/tcp --permanent; \
firewall-cmd --add-port=389/tcp --permanent;
firewall-cmd --add-port=389/udp --permanent;
firewall-cmd --add-port=445/tcp --permanent; \
firewall-cmd --add-port=464/tcp --permanent;
firewall-cmd --add-port=464/udp --permanent;
firewall-cmd --add-port=636/tcp --permanent; \
firewall-cmd --add-port=1024-5000/tcp --permanent;
firewall-cmd --add-port=3268-3269/tcp --permanent
firewall-cmd --reload
echo "Запуск службы DNS..."
enableService named
startService named
cat <<EOF > /etc/init.d/samba4
#!/bin/bash
#
# samba4 This shell script takes care of starting and stopping
# samba4 daemons.
#
# chkconfig: - 58 74
# description: Samba 4 acts as an Active Directory Domain Controller.
### BEGIN INIT INFO
# Provides: samba4
# Required-Start: \$network \$local_fs \$remote_fs
# Required-Stop: \$network \$local_fs \$remote_fs
# Should-Start: \$syslog \$named
# Should-Stop: \$syslog \$named
# Short-Description: start and stop samba4
# Description: Samba 4 acts as an Active Directory Domain Controller.
### END INIT INFO
# Source function library.
. /etc/init.d/functions
# Source networking configuration.
. /etc/sysconfig/network
prog=samba
prog_args="-d2 -l /var/log/ -D"
prog_dir=/usr/sbin
lockfile=/var/lock/subsys/\$prog
start() {
[ "\$NETWORKING" = "no" ] && exit 1
# Start daemons.
echo -n $"Starting samba4: "
daemon \$prog_dir/\$prog \$prog_args
RETVAL=\$?
echo
[ \$RETVAL -eq 0 ] && touch \$lockfile
return \$RETVAL
}
stop() {
[ "\$EUID" != "0" ] && exit 4
echo -n $"Shutting down samba4: "
killproc \$prog_dir/\$prog
RETVAL=\$?
echo
[ \$RETVAL -eq 0 ] && rm -f \$lockfile
return \$RETVAL
}
# See how we were called.
case "\$1" in
start)
start
;;
stop)
stop
;;
status)
status \$prog
;;
restart)
stop
start
;;
reload)
echo "Not implemented yet."
exit 3
;;
*)
echo $"Usage: \$0 {start|stop|status|restart|reload}"
exit 2
esac
EOF
chmod 555 /etc/init.d/samba4
echo "Запуск и настройка службы SAMBA Active Directory Domain Controller..."
#cp -f $dir_config/smb.conf /etc/samba/
#smbcontrol all reload-config
#touch /etc/samba/smbpasswd
enableService samba4
#startService samba4
# Run samba like this to test
#/usr/sbin/samba -i -M single -d2
# Run named like this to test
#named -u named -4 -f -g -d2
echo "Настройка правил пароля для доменных пользователей..."
echo "$ADMINPASSWORD" | kinit Administrator@$REALM
samba-tool domain passwordsettings set --complexity=off --history-length=0 --min-pwd-age=0 --max-pwd-age=0 --min-pwd-length=6
smbpasswd -a root
smbpasswd -e root
echo "Создание и настройка служебного пользователя домена dhcpd..."
samba-tool user create dhcpd --description="Unprivileged user for DNS updates via DHCP server" --random-password
samba-tool user setexpiry dhcpd --noexpiry
samba-tool group addmembers DnsAdmins dhcpd
samba-tool group addmembers DnsUpdateProxy dhcpd
install -vdm 755 /etc/dhcp
samba-tool domain exportkeytab --principal=dhcpd@$REALM /etc/dhcp/dhcpd.keytab
chown dhcpd.dhcpd /etc/dhcp/dhcpd.keytab
smbpasswd -e dhcpd
cp -f $dir_config/dhcpd.conf /etc/dhcp/
#cp -f $dir_config/dhcpd-update-samba-dns.conf /etc/dhcp/
#cp -f $dir_config/dhcpd-update-samba-dns.sh /usr/local/bin/
#cp -f $dir_config/samba-dnsupdate.sh /usr/local/bin/
cp -f $dir_config/dhcpd-update.sh /etc/dhcp/scripts/
chmod u+x /etc/dhcp/scripts/dhcpd-update.sh
chown -R dhcpd.dhcpd /etc/dhcp
systemctl stop samba4
systemctl enable dhcpd
systemctl start dhcpd
systemctl enable ntpd
systemctl restart named
systemctl start samba4
echo "Создание обратной зоны DNS..."
#echo "$ADMINPASSWORD" | kinit Administrator@$REALM
#klist
samba-tool dns zonelist $(hostname) --username=$Administrator --password="$ADMINPASSWORD"
samba-tool dns zonecreate $(hostname -A) 1.168.192.in-addr.arpa --username=$Administrator --password="$ADMINPASSWORD"
samba-tool dns add 1.168.192.in-addr.arpa 2 PTR $(hostname)
systemctl restart named
samba_dnsupdate --all-names --current-ip=$IP
echo "Создание тестовых пользователей user1 и user2..."
samba-tool user create user1 Passw0rd --must-change-at-next-login --given-name=Tester1 --mail-address='[email protected]' --uid=user1 --uid-number=10000 --gid-number=10000 --login-shell=/bin/bash
samba-tool user create user2 Passw0rd --must-change-at-next-login --given-name=Tester2 --mail-address='[email protected]' --uid=user2 --uid-number=10001 --gid-number=10000 --login-shell=/bin/bash
echo "Конфигурирование утилиты веб-администрирования - phpLdapAdmin"
yum -y install phpldapadmin
cp -f $dir_config/phpldapadmin/config.php /etc/phpldapadmin/
cp -f $dir_config/phpldapadmin/phpldapadmin.conf /etc/httpd/conf.d/
systemctl restart httpd.service
systemctl restart named
systemctl restart dhcpd
systemctl stop samba4
systemctl start samba4
#echo "Now manually set the group id and NIS domain using dsa.msc"
# Change passwords like this (on domain controller box)
#samba-tool user setpassword user1
#samba-tool fsmo show
echo "Проверка имени хостов и динамического обновления зоны DNS"
klist
host -t SRV _kerberos._udp.$(hostname -d).
host -t SRV _ldap._tcp.$(hostname -d).
host -t A $(hostname).
echo "$ADMINPASSWORD" | smbclient //localhost/netlogon -UAdministrator -c 'ls'
#echo "$ADMINPASSWORD" | kinit Administrator@$REALM
|
grikon/samba-usefull
|
setup_samba.sh
|
Shell
|
gpl-3.0
| 12,816 |
#!/bin/bash
if [ $# -eq 2 ]
then
# Variables.
netbeansProjectFolder=$1
outputFolder=$2
pkg="automatictestingtool"
ATTPath="${outputFolder}ATT/"
srcPath="${ATTPath}src/"
pkgPath="${srcPath}$pkg/"
buildPath="${ATTPath}build/"
testsPath="${buildPath}$pkg/tests/"
# Creates the ATT directory in the user's specified folder.
echo ""
echo "Creating (if necessary) the folder tree..."
if [ ! -e "$ATTPath" ]
then
mkdir $ATTPath
echo "The \"$ATTPath\" does not exist: folder created."
else
echo "The \"$ATTPath\" already exists: folder not created."
fi
if [ ! -e "$srcPath" ]
then
mkdir $srcPath
echo "The \"$srcPath\" does not exist: folder created."
else
echo "The \"$srcPath\" already exists: folder not created."
fi
if [ ! -e "$pkgPath" ]
then
mkdir $pkgPath
echo "The \"$pkgPath\" does not exist: folder created."
else
echo "The \"$pkgPath\" already exists: folder not created."
fi
if [ ! -e "$buildPath" ]
then
mkdir $buildPath
echo "The \"$buildPath\" does not exist: folder created."
else
echo "The \"$buildPath\" already exists: folder not created."
#echo "Deleting all \".class\" files..."
#rm $buildPath$pkg/*.class
fi
#if [ ! -e "$buildPath$pkg" ]
#then
# mkdir $buildPath$pkg
# echo "The \"$buildPath$pkg\" does not exist: folder created."
#else
# echo "The \"$buildPath$pkg\" already exists: folder not created."
#fi
if [ ! -e $buildPath$pkg/programImages ]
then
mkdir "${buildPath}${pkg}/programImages"
echo "The \"${buildPath}${pkg}/programImages\" does not exist: folder created."
else
echo "The \"${buildPath}${pkg}/programImages\" already exists: folder not created."
fi
if [ ! -e $buildPath$pkg/programUtilFiles ]
then
mkdir ${buildPath}${pkg}/programUtilFiles
echo "The \"$buildPath$pkg/programUtilFiles\" does not exist: folder created."
else
echo "The \"$buildPath$pkg/programUtilFiles\" already exists: folder not created."
fi
if [ -e $buildPath$pkg/programUtilFiles/testXMLSchema.xsd ]
then
echo "Deleting the \"testXMLSchema.xsd\" file..."
rm $buildPath$pkg/programUtilFiles/testXMLSchema.xsd
else
echo "The \"testXMLSchema.xsd\" file does not exist."
fi
if [ -e $buildPath$pkg/programUtilFiles/reportsXMLSchema.xsd ]
then
echo "Deleting the \"reportsXMLSchema.xsd\" file..."
rm $buildPath$pkg/programUtilFiles/reportsXMLSchema.xsd
else
echo "The \"reportsXMLSchema.xsd\" file does not exist."
fi
# Copies the images in the programImages/ folder.
echo ""
echo "Copying the images from the \""${netbeansProjectFolder}"programImages\" folder to the \"$buildPath$pkg/programImages\" folder..."
cp "${netbeansProjectFolder}"programImages/*.png "${buildPath}${pkg}"/programImages/
# Copies the XML Schema files in the programUtilFiles/ folder.
echo ""
echo "Copying the XML Schema files from the \""${netbeansProjectFolder}"src/$pkg/\" folder to the \"$buildPath$pkg/tests\" folder..."
cp "${netbeansProjectFolder}"src/$pkg/*.xsd ${buildPath}${pkg}/programUtilFiles
# Copies *.java files in the ../src/automatictestingtool folder.
echo ""
echo "Copying java source file from \""${netbeansProjectFolder}"src/$pkg/\" to \"$pkgPath\"..."
cp "${netbeansProjectFolder}"src/$pkg/*.java $pkgPath
# Compiles all *.java files.
echo ""
echo "Transforming from \"Java Code\" to \"Byte Code\"..."
cd $pkgPath
javac -d $buildPath Main.java ConsoleManager.java SystemManagement.java TestCaseBuilder.java TestExecutor.java UserActionSimulator.java WindowManager.java ResultsValidator.java ScriptExecutor.java
# Creates the jar file in the ATT folder.
echo ""
echo "Creating the jar file \"att.jar\" in the \"$ATTPath\" folder..."
cd $buildPath
jar -cvfme "${ATTPath}att.jar" "${netbeansProjectFolder}manifest.mf" automatictestingtool.Main $pkg/*.class $pkg/programUtilFiles/*.xsd $pkg/programImages/*.png
# Executes the Main.
echo ""
echo "Executing the \"Automatic Testing Tool\" Program..."
cd $ATTPath
java -jar att.jar
echo "Execution terminated."
echo ""
else
echo "Usage: $0 netbeansProjectABSOLUTEFolder outputABSOLUTEFolder"
fi
|
psf563/att
|
it.sergioferraresi.att/exec.sh
|
Shell
|
gpl-3.0
| 4,211 |
# 810_create_pxelinux_cfg.sh
#
# create pxelinux config on PXE server for Relax-and-Recover
#
# This file is part of Relax-and-Recover, licensed under the GNU General
# Public License. Refer to the included COPYING for full text of license.
# we got PXE_KERNEL and PXE_INITRD set in the previous script
if [[ ! -z "$PXE_CONFIG_URL" ]] ; then
# E.g. PXE_CONFIG_URL=nfs://server/export/nfs/tftpboot/pxelinux.cfg
# Better be sure that on 'server' the directory /export/nfs/tftpboot/pxelinux.cfg exists
local scheme=$( url_scheme $PXE_CONFIG_URL )
# We need filesystem access to the destination (schemes like ftp:// are not supported)
if ! scheme_supports_filesystem $scheme ; then
Error "Scheme $scheme for PXE output not supported, use a scheme that supports mounting (like nfs: )"
fi
mount_url $PXE_CONFIG_URL $BUILD_DIR/tftpbootfs $BACKUP_OPTIONS
PXE_LOCAL_PATH=$BUILD_DIR/tftpbootfs
else
# legacy way using PXE_LOCAL_PATH default
PXE_LOCAL_PATH=$PXE_CONFIG_PATH
fi
# PXE_CONFIG_PREFIX is a "string" (by default rear-) - is the name of PXE boot configuration of $HOSTNAME
PXE_CONFIG_FILE="${PXE_CONFIG_PREFIX}$HOSTNAME"
if [[ ! -z "$PXE_CONFIG_URL" ]] ; then
if is_true "$PXE_CONFIG_GRUB_STYLE" ; then
make_pxelinux_config_grub >"$PXE_LOCAL_PATH/$PXE_CONFIG_FILE"
else
make_pxelinux_config >"$PXE_LOCAL_PATH/$PXE_CONFIG_FILE"
fi
chmod 444 "$PXE_LOCAL_PATH/$PXE_CONFIG_FILE"
else
# legacy way using PXE_LOCAL_PATH default
cat >"$PXE_LOCAL_PATH/$PXE_CONFIG_FILE" <<EOF
$(test -s $(get_template "PXE_pxelinux.cfg") && cat $(get_template "PXE_pxelinux.cfg"))
display $OUTPUT_PREFIX_PXE/$PXE_MESSAGE
say ----------------------------------------------------------
say rear = disaster recover this system with Relax-and-Recover
label rear
kernel $OUTPUT_PREFIX_PXE/$PXE_KERNEL
append initrd=$OUTPUT_PREFIX_PXE/$PXE_INITRD root=/dev/ram0 vga=normal rw $KERNEL_CMDLINE $PXE_RECOVER_MODE
EOF
fi
pushd "$PXE_LOCAL_PATH" >/dev/null
StopIfError "PXE_CONFIG_PATH [$PXE_CONFIG_PATH] does not exist !"
if test "$PXE_CREATE_LINKS" -a "$PXE_REMOVE_OLD_LINKS" ; then
# remove old links
find . -maxdepth 1 -type l | \
while read file ; do
if test "$(readlink -s $file)" = "$PXE_CONFIG_FILE" ; then
rm -f $file
fi
done
fi
# When using Grub network boot via tftp/bootp,
# the client is looking at a file named "grub.cfg-01-<MAC>"
# or grub.cfg-<IP in hex>. It is like PXE, but prefixed with "grub.cfg-"
if is_true $PXE_CONFIG_GRUB_STYLE ; then
pxe_link_prefix="grub.cfg-"
else
pxe_link_prefix=""
fi
case "$PXE_CREATE_LINKS" in
IP)
# look only at IPv4 and skip localhost (127...)
ip a | grep inet\ | grep -v inet\ 127 | \
while read inet IP junk ; do
IP=${IP%/*}
# check if gethostip is available.
if has_binary gethostip &>/dev/null ; then
ln -sf $v "$PXE_CONFIG_FILE" $(gethostip -x $IP) >&2
# to capture the whole subnet as well
ln -sf $v "$PXE_CONFIG_FILE" $(gethostip -x $IP | cut -c 1-6) >&2
else
# if gethostip is not available on your platform (like ppc64),
# use awk to generate IP in hex mode.
ln -sf $v "$PXE_CONFIG_FILE" $pxe_link_prefix$(printf '%02X' ${IP//./ }) >&2
# to capture the whole subnet as well
ln -sf $v "$PXE_CONFIG_FILE" $pxe_link_prefix$(printf '%02X' ${IP//./ } | cut -c 1-6) >&2
fi
done
;;
MAC)
# look at all devices that have link/ether
ip l | grep link/ether | \
while read link mac junk ; do
ln -sf $v "$PXE_CONFIG_FILE" 01-${mac//:/-} >&2
done
;;
"")
Log "Not creating symlinks to pxelinux configuration file"
;;
*)
Error "Invalid PXE_CREATE_LINKS specified, must be MAC or IP or ''"
;;
esac
popd >/dev/null
if [[ ! -z "$PXE_CONFIG_URL" ]] ; then
LogPrint "Created pxelinux config '${PXE_CONFIG_PREFIX}$HOSTNAME' and symlinks for $PXE_CREATE_LINKS adresses in $PXE_CONFIG_URL"
umount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs
else
LogPrint "Created pxelinux config '${PXE_CONFIG_PREFIX}$HOSTNAME' and symlinks for $PXE_CREATE_LINKS adresses in $PXE_CONFIG_PATH"
# Add to result files
RESULT_FILES+=( "$PXE_LOCAL_PATH/$PXE_CONFIG_FILE" )
fi
|
rear/rear
|
usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh
|
Shell
|
gpl-3.0
| 4,325 |
function hashMap {
function fromList {
required scope $1
local scope=$1
required varName $2
local varName=$2
required name $3
local name=$3
shift 3
if ! equal $scope local ; then
local gflag=g
else
local gflag=''
fi
local instructions="declare -A$gflag $name=("
if ! equal $# 0 ; then
local key=$1
local colon=$2
local value=$3
shift 3
exitIfNot $colon :
local value=$(echo "$value" | sed 's/"/\\"/g')
local item="[\"$key\"]=\"$value\""
local instructions="$instructions $item"
local i=0
local m=0
for token in $@ ; do
((i++))
((m=i % 4))
case $m in
1) local comma="$token" ;;
2) local key="$token" ;;
3) local colon="$token" ;;
0) local value="$token" ;;
esac
if equal $m 0 ; then
exitIfNot $comma ,
exitIfNot $colon :
local value=$(echo "$value" | sed 's/"/\\"/g')
local item="[\"$key\"]=\"$value\""
local instructions="$instructions $item"
local comma=""
local key=""
local colon=""
local value=""
fi
done
if ! equal $m 0 ; then
case $m in
1) stderr "unexpected ',' at end of hashMap data" ;;
2) stderr "':' expected after key '$key'" ;;
3) stderr "value expected after ':'" ;;
esac
exit $_FALSE
fi
fi
local prefix='declare -g'
if equal $scope local ; then
local prefix=local
else
local prefix=''
fi
local instructions="$instructions)"
if ! equal $varName NONE ; then
local instructions="$instructions; $prefix $varName=$name"
fi
echo "$instructions"
}
function new {
required firstArg $1
local firstArg=$1
if equal $firstArg local ; then
local scope=local
shift
fi
required varName $1
local varName=$1
shift
if ! equal $# 0 ; then
exitIfNot $1 :=
shift
fi
local name=$(dataStructure computeNewName hashMap)
fromList $scope $varName $name "$@"
}
function embed {
required firstArg $1
local firstArg=$1
if equal $firstArg local ; then
local scope=local
shift
else
local scope=''
fi
if ! equal $# 0 ; then
exitIfNot $1 :=
shift
fi
local name=$(dataStructure computeNewName hashMap)
eval $(fromList $scope NONE $name "$@")
echo $name
}
function dump {
required name $1
local name=$1
dataStructure $name _dump
}
function get {
required name $1
local name=$1
required key $2
local key=$2
derefKey $name $key
}
function set {
required name $1
local name=$1
required key $2
local key=$2
required value $3
local value="$3"
eval $name[$key]="$value"
}
function keys {
required name $1
local name=$1
eval echo "\${!$name[@]}"
}
function size {
required name $1
local name=$1
eval echo \${#$name[@]}
}
function remove {
required name $1
local name=$1
required index $2
local index=$2
dataStructure $name _remove
}
function copyFrom {
required toName $1
local toName=$1
required fromName $2
local fromName=$2
dataStructure $toName _copyFrom $fromName
}
if equal $1 new ; then
local func=$1
shift
$func "$@"
elif equal $1 embed ; then
local func=$1
shift
$func "$@"
else
local name=$1
local func=$2
shift 2
if ! dataStructure $name isHashMap; then
stderr "'$name' is not a hashMap"
exit 1
fi
$func $name "$@"
fi
}
|
eriksank/bashOfGaps
|
lib/hashMap.sh
|
Shell
|
gpl-3.0
| 4,528 |
#!/bin/sh
echo Grabbing fresh repository data...
sudo apt-get -qq update
echo Updated repositories. Grabbing git files for required programs...
git clone --quiet https://github.com/s-a/discord-shell-slave.git && git clone --quiet https://github.com/Just-Some-Bots/MusicBot.git
echo Grabbed discord-shell-slave and MusicBot.
echo Prerequisites installed.
exit 0
|
lounsberry/musicbotshell
|
install.sh
|
Shell
|
gpl-3.0
| 361 |
cd out
git clone https://github.com/probonopd/AppImageKit.git
sudo ./AppImageKit/build.sh
wget -c --trust-server-names https://download.fedoraproject.org/pub/fedora/linux/releases/23/Workstation/x86_64/iso/Fedora-Live-Workstation-x86_64-23-10.iso
wget -c --trust-server-names http://releases.ubuntu.com/14.04.4/ubuntu-14.04.4-desktop-amd64.iso?_ga=1.29637303.929741532.1454222602
wget-c --trust-server-names https://sfo1.dl.elementary.io/download/MTQ1NjgwODA1OA==/elementaryos-0.3.2-stable-amd64.20151209.iso
sudo ./AppImageKit/AppImageAssistant.AppDir/testappimage ./Fedora-Live-Workstation-x86_64-23-10.iso ./parkitectnexus
sudo ./AppImageKit/AppImageAssistant.AppDir/testappimage ./ubuntu-14.04.4-desktop-amd64.iso?_ga=1.29637303.929741532.1454222602 ./parkitectnexus
sudo ./AppImageKit/AppImageAssistant.AppDir/testappimage ./elementaryos-0.3.2-stable-amd64.20151209.iso ./parkitectnexus
|
ParkitectNexus/ParkitectNexusClient
|
AppImage/test.sh
|
Shell
|
gpl-3.0
| 898 |
#!/bin/bash
#
src=${BASH_SOURCE%/*}
restore_src=$1
archive_name="local-sensitive"
restore_dest=/opt/poppypop/
sudo $src/../restore-dir.sh $archive_name "$restore_src" "$restore_dest"
if [ $? -eq 0 ]
then
echo "Sensitive: Ok"
else
echo "Sensitive: Fail"
exit 1
fi
read -p "Change owner to: [empty does nothing] " newowner
if [ ! -z "$newowner" ]; then
echo "Changing to $newowner"
sudo chown -R $newowner:$newowner "$restore_dest"
fi
|
PoppyPop/docker
|
system/backup-scripts/scripts/sensitiveconf/restore-sensitive.sh
|
Shell
|
gpl-3.0
| 453 |
gfortran -c matrix_inv.f95 matrix_tools.f95
gfortran matrix_inv.o matrix_tools.o
|
wcdawn/matrix_math
|
matrix_solve/quick_build.sh
|
Shell
|
gpl-3.0
| 80 |
#!/bin/sh
ADB_PATH="$ANDROID_HOME/platform-tools"
PACKAGE_NAME="dev.jriley.hackernews.debug"
NOT_PRESENT="List of devices attached"
ADB_FOUND=`${ADB_PATH}/adb devices | tail -2 | head -1 | cut -f 1 | sed 's/ *$//g'`
BASENAME=$(basename $1)
if [[ ${ADB_FOUND} == ${NOT_PRESENT} ]]; then
echo "Make sure a device is connected"
else
echo $1
#Device
${ADB_PATH}/adb push "$1" "/sdcard"
${ADB_PATH}/adb shell "
run-as ${PACKAGE_NAME} mv /sdcard/${BASENAME} /data/data/${PACKAGE_NAME}/files/${BASENAME}
exit
"
#Emulator
# ${ADB_PATH}/adb push "$1" "data/local/tmp"
# ${ADB_PATH}/adb shell "su 0 mv /data/local/tmp/${BASENAME} /data/data/${PACKAGE_NAME}/files/${BASENAME}
# exit
# "
if [ $? != 0 ]; then
(echo -e "\x1B[01;31mFAILED TO PUSH FILE ${BASENAME} DEV ON DEVICE\x1B[0m" >&2)
fi
echo "Database exported ${1} to /data/data/${PACKAGE_NAME}/files/${BASENAME} "
fi
|
jriley/HackerNews
|
tools/push-database.sh
|
Shell
|
mpl-2.0
| 906 |
#!/bin/bash
mkdir -p data
cd data
# get the zip files with the images
wget http://fbarc.stadt-berlin.de/FIS_Broker_Atom/Berlin_um/Berlin1650.zip
wget http://fbarc.stadt-berlin.de/FIS_Broker_Atom/Berlin_um/Berlin1690.zip
wget http://fbarc.stadt-berlin.de/FIS_Broker_Atom/Berlin_um/Berlin1750.zip
wget http://fbarc.stadt-berlin.de/FIS_Broker_Atom/Berlin_um/Berlin1800.zip
# unzip files
unzip Berlin1650.zip
unzip Berlin1690.zip
unzip Berlin1750.zip
unzip Berlin1800.zip
# fetch legends
wget http://fbinter.stadt-berlin.de/fb_daten/legenden/leg_berlin1650.gif
wget http://fbinter.stadt-berlin.de/fb_daten/legenden/leg_berlin1690.gif
wget http://fbinter.stadt-berlin.de/fb_daten/legenden/leg_berlin1750.gif
wget http://fbinter.stadt-berlin.de/fb_daten/legenden/leg_berlin1800.gif
# create tile dir
mkdir -p ../public/tiles
# gdalinfo FILENAME gives corners in pixel
# weppage give corners in EPSG:3068
# http://fbinter.stadt-berlin.de/fb/berlin/service.jsp?id=a_berlin1750@senstadt&type=FEED
# create tiles for berlin1650
gdal_translate -of VRT -a_srs EPSG:3068 \
-gcp 0 0 23100 22600 \
-gcp 0 3511 23100 19600 \
-gcp 3678 0 26300 22600 \
-gcp 3678 3511 26300 19600 \
Berlin\ um\ 1650\ entz.tif berlin1650.vrt
gdal2tiles.py -z 10-17 -p mercator --no-kml berlin1650.vrt ../public/tiles/berlin1650
# create tiles for berlin1690
gdal_translate -of VRT -a_srs EPSG:3068 \
-gcp 0 0 23100 22600 \
-gcp 0 3497 23100 19600 \
-gcp 3662 0 26300 22600 \
-gcp 3662 3497 26300 19600 \
Berlin\ um\ 1690\ entz.tif berlin1690.vrt
gdal2tiles.py -z 10-17 -p mercator --no-kml berlin1690.vrt ../public/tiles/berlin1690
# create tiles for berlin1750
gdal_translate -of VRT -a_srs EPSG:3068 \
-gcp 0 0 21600 23820 \
-gcp 0 7905 21600 17120 \
-gcp 8547 0 28400 23820 \
-gcp 8547 7905 28400 17120 \
Berlin\ um\ 1750\ entz.tif berlin1750.vrt
gdal2tiles.py -z 10-17 -p mercator --no-kml berlin1750.vrt ../public/tiles/berlin1750
# create tiles for berlin1800
gdal_translate -of VRT -a_srs EPSG:3068 \
-gcp 0 0 21600 23820 \
-gcp 0 7912 21600 17120 \
-gcp 8548 0 28400 23820 \
-gcp 8548 7912 28400 17120 \
Berlin\ um\ 1800\ entz.tif berlin1800.vrt
gdal2tiles.py -z 10-17 -p mercator --no-kml berlin1800.vrt ../public/tiles/berlin1800
|
jochenklar/entwicklung-berlins
|
bin/fetch.sh
|
Shell
|
agpl-3.0
| 2,300 |
#!/bin/bash
# Build
echo 'Building'
rm -rf build/server
tsc -p server
if [[ $? -eq 1 ]]; then
echo 'Build failed'
exit 1
fi
VINIMAY_ENV=test ./resetdb.sh
./starTestInstances.sh start
codes=""
newman run tests/me.json
codes="$codes $?"
newman run tests/posts.json
codes="$codes $?"
newman run tests/comments.json
codes="$codes $?"
newman run tests/reactions.json
codes="$codes $?"
# We need to switch to other SQL scripts, with no friends in them
# So we need to restart the servers with newly-generated databases
./starTestInstances.sh stop
./starTestInstances.sh clean
# Let the servers stop
sleep 2
lsof -i :3000
lsof -i :3001
lsof -i :3006
./resetdb.sh
./starTestInstances.sh start
newman run tests/friends.json
codes="$codes $?"
./starTestInstances.sh stop
./starTestInstances.sh clean
if [[ $codes == *"1"* ]]; then
exit 1
else
exit 0
fi
|
JosephCaillet/vinimay
|
run_tests.sh
|
Shell
|
agpl-3.0
| 860 |
#!/bin/bash
# Duplicate the output file for equilibrium position to the equilibrium output file for other normal modes.
# Select the output file while running the program.
# Program will ask for number of modes
# Program will ask for the prefix, for example, hyper_ccsd_c_
input="$1" # the input file.
echo -n "Enter the number of modes and press [ENTER] : "
read num
echo -n "Enter the prefix name and press [ENTER] : "
read prefix
mkdir -p eqb
i=1
while [ "$i" -le "$num" ]
do
cat "$input" > eqb/"$prefix""$i"_0.out
echo "$i"
((i++))
done
printf "Processed. All files in eqb folder.\n"
|
ankit7540/dalton_extracts
|
DFT/duplicate_0_0_file.sh
|
Shell
|
agpl-3.0
| 627 |
#!/usr/bin/env bash
. ./wvtest-bup.sh || exit $?
. t/lib.sh || exit $?
set -o pipefail
top="$(WVPASS pwd)" || exit $?
tmpdir="$(WVPASS wvmktempdir)" || exit $?
export BUP_DIR="$tmpdir/bup"
export GIT_DIR="$tmpdir/bup"
bup() { "$top/bup" "$@"; }
export TZ=UTC
WVPASS bup init
WVPASS cd "$tmpdir"
WVPASS mkdir src
WVPASS touch src/.dotfile src/executable
WVPASS mkfifo src/fifo
WVPASS "$top"/t/mksock src/socket
WVPASS bup random 1k > src/file
WVPASS chmod u+x src/executable
WVPASS chmod -R u=rwX,g-rwx,o-rwx .
WVPASS touch -t 196907202018 src/.dotfile src/*
(WVPASS cd src; WVPASS ln -s file symlink) || exit $?
WVPASS touch -t 196907202018 src
WVPASS touch -t 196907202018 .
WVPASS bup index src
WVPASS bup save -n src -d 242312160 src
WVPASS bup tag some-tag src
WVSTART "ls (short)"
WVPASSEQ "$(WVPASS bup ls /)" "src"
WVPASSEQ "$(WVPASS bup ls -A /)" ".commit
.tag
src"
WVPASSEQ "$(WVPASS bup ls -AF /)" ".commit/
.tag/
src/"
WVPASSEQ "$(WVPASS bup ls -a /)" ".
..
.commit
.tag
src"
WVPASSEQ "$(WVPASS bup ls -aF /)" "./
../
.commit/
.tag/
src/"
WVPASSEQ "$(WVPASS bup ls /.tag)" "some-tag"
WVPASSEQ "$(WVPASS bup ls /src)" \
"1977-09-05-125600
latest"
WVPASSEQ "$(WVPASS bup ls src/latest/"$tmpdir"/src)" "executable
fifo
file
socket
symlink"
WVPASSEQ "$(WVPASS bup ls -A src/latest/"$tmpdir"/src)" ".dotfile
executable
fifo
file
socket
symlink"
WVPASSEQ "$(WVPASS bup ls -a src/latest/"$tmpdir"/src)" ".
..
.dotfile
executable
fifo
file
socket
symlink"
WVPASSEQ "$(WVPASS bup ls -F src/latest/"$tmpdir"/src)" "executable*
fifo|
file
socket=
symlink@"
WVPASSEQ "$(WVPASS bup ls --file-type src/latest/"$tmpdir"/src)" "executable
fifo|
file
socket=
symlink@"
WVPASSEQ "$(WVPASS bup ls -d src/latest/"$tmpdir"/src)" "src/latest$tmpdir/src"
WVSTART "ls (long)"
WVPASSEQ "$(WVPASS bup ls -l / | tr -s ' ' ' ')" \
"d--------- ?/? 0 1970-01-01 00:00 src"
WVPASSEQ "$(WVPASS bup ls -lA / | tr -s ' ' ' ')" \
"d--------- ?/? 0 1970-01-01 00:00 .commit
d--------- ?/? 0 1970-01-01 00:00 .tag
d--------- ?/? 0 1970-01-01 00:00 src"
WVPASSEQ "$(WVPASS bup ls -lAF / | tr -s ' ' ' ')" \
"d--------- ?/? 0 1970-01-01 00:00 .commit/
d--------- ?/? 0 1970-01-01 00:00 .tag/
d--------- ?/? 0 1970-01-01 00:00 src/"
WVPASSEQ "$(WVPASS bup ls -la / | tr -s ' ' ' ')" \
"d--------- ?/? 0 1970-01-01 00:00 .
d--------- ?/? 0 1970-01-01 00:00 ..
d--------- ?/? 0 1970-01-01 00:00 .commit
d--------- ?/? 0 1970-01-01 00:00 .tag
d--------- ?/? 0 1970-01-01 00:00 src"
WVPASSEQ "$(WVPASS bup ls -laF / | tr -s ' ' ' ')" \
"d--------- ?/? 0 1970-01-01 00:00 ./
d--------- ?/? 0 1970-01-01 00:00 ../
d--------- ?/? 0 1970-01-01 00:00 .commit/
d--------- ?/? 0 1970-01-01 00:00 .tag/
d--------- ?/? 0 1970-01-01 00:00 src/"
symlink_mode="$(WVPASS ls -l src/symlink | cut -b -10)" || exit $?
symlink_bup_info="$(WVPASS bup ls -l src/latest"$tmpdir"/src | grep symlink)" \
|| exit $?
symlink_date="$(WVPASS echo "$symlink_bup_info" \
| WVPASS perl -ne 'm/.*? (\d+) (\d\d\d\d-\d\d-\d\d \d\d:\d\d)/ and print $2')" \
|| exit $?
if test "$(uname -s)" != NetBSD; then
symlink_size="$(WVPASS bup-python -c "import os
print os.lstat('src/symlink').st_size")" || exit $?
else
# NetBSD appears to return varying sizes, so for now, just ignore it.
symlink_size="$(WVPASS echo "$symlink_bup_info" \
| WVPASS perl -ne 'm/.*? (\d+) (\d\d\d\d-\d\d-\d\d \d\d:\d\d)/ and print $1')" \
|| exit $?
fi
uid="$(WVPASS id -u)" || exit $?
gid="$(WVPASS bup-python -c 'import os; print os.stat("src").st_gid')" || exit $?
user="$(WVPASS id -un)" || exit $?
group="$(WVPASS bup-python -c 'import grp, os;
print grp.getgrgid(os.stat("src").st_gid)[0]')" || exit $?
WVPASSEQ "$(bup ls -l src/latest"$tmpdir"/src | tr -s ' ' ' ')" \
"-rwx------ $user/$group 0 1969-07-20 20:18 executable
prw------- $user/$group 0 1969-07-20 20:18 fifo
-rw------- $user/$group 1024 1969-07-20 20:18 file
srwx------ $user/$group 0 1969-07-20 20:18 socket
$symlink_mode $user/$group $symlink_size $symlink_date symlink -> file"
WVPASSEQ "$(bup ls -la src/latest"$tmpdir"/src | tr -s ' ' ' ')" \
"drwx------ $user/$group 0 1969-07-20 20:18 .
drwx------ $user/$group 0 1969-07-20 20:18 ..
-rw------- $user/$group 0 1969-07-20 20:18 .dotfile
-rwx------ $user/$group 0 1969-07-20 20:18 executable
prw------- $user/$group 0 1969-07-20 20:18 fifo
-rw------- $user/$group 1024 1969-07-20 20:18 file
srwx------ $user/$group 0 1969-07-20 20:18 socket
$symlink_mode $user/$group $symlink_size $symlink_date symlink -> file"
WVPASSEQ "$(bup ls -lA src/latest"$tmpdir"/src | tr -s ' ' ' ')" \
"-rw------- $user/$group 0 1969-07-20 20:18 .dotfile
-rwx------ $user/$group 0 1969-07-20 20:18 executable
prw------- $user/$group 0 1969-07-20 20:18 fifo
-rw------- $user/$group 1024 1969-07-20 20:18 file
srwx------ $user/$group 0 1969-07-20 20:18 socket
$symlink_mode $user/$group $symlink_size $symlink_date symlink -> file"
WVPASSEQ "$(bup ls -lF src/latest"$tmpdir"/src | tr -s ' ' ' ')" \
"-rwx------ $user/$group 0 1969-07-20 20:18 executable*
prw------- $user/$group 0 1969-07-20 20:18 fifo|
-rw------- $user/$group 1024 1969-07-20 20:18 file
srwx------ $user/$group 0 1969-07-20 20:18 socket=
$symlink_mode $user/$group $symlink_size $symlink_date symlink@ -> file"
WVPASSEQ "$(bup ls -l --file-type src/latest"$tmpdir"/src | tr -s ' ' ' ')" \
"-rwx------ $user/$group 0 1969-07-20 20:18 executable
prw------- $user/$group 0 1969-07-20 20:18 fifo|
-rw------- $user/$group 1024 1969-07-20 20:18 file
srwx------ $user/$group 0 1969-07-20 20:18 socket=
$symlink_mode $user/$group $symlink_size $symlink_date symlink@ -> file"
WVPASSEQ "$(bup ls -ln src/latest"$tmpdir"/src | tr -s ' ' ' ')" \
"-rwx------ $uid/$gid 0 1969-07-20 20:18 executable
prw------- $uid/$gid 0 1969-07-20 20:18 fifo
-rw------- $uid/$gid 1024 1969-07-20 20:18 file
srwx------ $uid/$gid 0 1969-07-20 20:18 socket
$symlink_mode $uid/$gid $symlink_size $symlink_date symlink -> file"
WVPASSEQ "$(bup ls -ld "src/latest$tmpdir/src" | tr -s ' ' ' ')" \
"drwx------ $user/$group 0 1969-07-20 20:18 src/latest$tmpdir/src"
WVSTART "ls (backup set - long)"
WVPASSEQ "$(bup ls -l src | cut -d' ' -f 1-2)" \
"l--------- ?/?
l--------- ?/?"
WVSTART "ls (dates TZ != UTC)"
export TZ=America/Chicago
symlink_date_central="$(bup ls -l src/latest"$tmpdir"/src | grep symlink)"
symlink_date_central="$(echo "$symlink_date_central" \
| perl -ne 'm/.*? (\d+) (\d\d\d\d-\d\d-\d\d \d\d:\d\d)/ and print $2')"
WVPASSEQ "$(bup ls -ln src/latest"$tmpdir"/src | tr -s ' ' ' ')" \
"-rwx------ $uid/$gid 0 1969-07-20 15:18 executable
prw------- $uid/$gid 0 1969-07-20 15:18 fifo
-rw------- $uid/$gid 1024 1969-07-20 15:18 file
srwx------ $uid/$gid 0 1969-07-20 15:18 socket
$symlink_mode $uid/$gid $symlink_size $symlink_date_central symlink -> file"
unset TZ
WVPASS rm -rf "$tmpdir"
|
tjanez/bup
|
t/test-ls.sh
|
Shell
|
lgpl-2.1
| 6,831 |
#!/bin/bash
# ver 0.2
# wrapper to run a workflow
# Copyright (C) 2005, University of Alaska Fairbanks
# Biotechnology Computing Research Group
# https://biotech.inbre.alaska.edu
# [email protected]
# All rights reserved.
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
# use the cgi bashlib
. /usr/local/lib/bashlib
wdir=`param wdir`
GSOAP=`param gsoap`
WFLOW_DIR=`param wflow_dir`
WFLOW_CGI=`param wflow_cgi`
html_dir=`echo $wdir | cut -d"/" -f3`
cd $wdir
if [ -e $wdir/node_num ]
then
node_num=`cat $wdir/node_num`
else
node_num=0
fi
last_node_num=$((node_num - 1))
echo "<html>" > output.html
echo "<head>" >> output.html
echo "<meta http-equiv=\"Pragma\" content=\"no-cache\">" >> output.html
echo "<meta http-equiv=\"Expires\" content=\"0\">" >> output.html
echo "<meta http-equiv=\"refresh\" content=\"20\">" >> output.html
echo "</head>" >> output.html
echo "<body>" >> output.html
echo "<map name=\"graphmap\">" >> output.html
cat $wdir/map.htm >> output.html
echo "</map>" >> output.html
echo "<img src=\"$WFLOW_SERVER/wflow/gsoap/$html_dir/map_${last_node_num}.jpg\" usemap=\"#graphmap\" border=\"0\">" >> output.html
echo "<br>" >> output.html
echo "<pre>" >> output.html
echo "</pre>" >> output.html
echo "</body>" >> output.html
echo "</html>" >> output.html
echo "Content-type: text/html"
echo ""
echo "<html>"
echo "<head>"
echo "<meta http-equiv=\"refresh\" content=\"5;url=$WFLOW_SERVER/wflow/gsoap/$html_dir/output.html\">"
echo "</head>"
echo "<body>"
echo "<map name=\"graphmap\">"
cat $wdir/map.htm
echo "</map>"
echo "<img src=\"$WFLOW_SERVER/wflow/gsoap/$html_dir/map_${last_node_num}.jpg\" usemap=\"#graphmap\" border=\"0\">"
echo "<br>"
echo "<pre>"
echo "Running wsdl2h and gSOAP compiler...<br>"
# parse the workflow graph, and output the order of nodes to process
if [ -e $wdir/node_order ]
then
rm $wdir/node_order
fi
touch $wdir/node_order
$WFLOW_DIR/output_order $wdir
# run wsdl2h on each wsdl file, concatenating the resulting .h files into one .h
if [ -e $wdir/gSOAP ]
then
rm -rf $wdir/gSOAP
fi
mkdir $wdir/gSOAP
chmod 755 $wdir/gSOAP
wsdl_num=0
while [ -e $wdir/wsdl_$wsdl_num ]
do
$GSOAP/wsdl2h -c $wdir/wsdl_$wsdl_num -o $wdir/gSOAP/n${wsdl_num}wflow.h -n n${wsdl_num}wflow
cat $wdir/gSOAP/n${wsdl_num}wflow.h >> $wdir/gSOAP/wflow_head.h
wsdl_num=$((wsdl_num + 1))
done
# run the gSOAP compiler
$GSOAP/soapcpp2 -c -I$GSOAP -d $wdir/gSOAP $wdir/gSOAP/wflow_head.h
# for each node to process, determine parameter names
# each node output has a unique name that may serve as the input name to a child
# create a C sourcefile with the proper order and names, compile, and run
cd $wdir/gSOAP
if [ -r callfile ]
then
rm callfile
fi
if [ -r tempfile ]
then
rm tempfile
fi
echo "Creating C source code of gSOAP calls to run graph...<br>"
echo "#include \"soapH.h\"" > client.c
echo "#include <stdio.h>" >> client.c
echo "#include <stdlib.h>" >> client.c
echo "#include <string.h>" >> client.c
echo "#include <sys/stat.h>" >> client.c
echo "#include <sys/types.h>" >> client.c
echo "#include <unistd.h>" >> client.c
echo "" >> client.c
# declare the namespace tables
for i in `ls ../wsdl_*`
do
j=`echo $i | sed "s/\.\.\///"`
i=$j
echo "SOAP_NMAC struct Namespace ${i}[] = " >> client.c
# cat $i | grep -v "#include" | grep -v "namespaces\[\]" | \
# grep "[^$]" >> client.c
## grep "[^$]" | sed "s/^/ /" >> client.c
##### next line is a hack
nsmap=`cat ../$i | grep "definitions name=" | sed "s/.*name=//" |\
sed "s/\"//g"`
cat ${nsmap}.nsmap | grep -v "#include" | grep -v "namespaces\[\]" | \
grep "[^$]" >> client.c
done
echo "" >> client.c
echo "struct Namespace *namespaces;" >> client.c
echo "" >> client.c
echo "char * get_data(char *, char *);" >> client.c
echo "" >> client.c
echo "int main(int argc, char **argv)" >> client.c
echo "{" >> client.c
echo " FILE * outfile;" >> client.c
echo " pid_t pid;" >> client.c
echo " char buffer[1026];" >> client.c
echo " char num[64];" >> client.c
# declare and set variables & print function calls
echo " struct soap soap;" >> client.c
for i in `cat $wdir/node_order`
do
# which wsdl does this service use?
wsdl=`cat $wdir/graph.dot | grep $i | grep wsdl | cut -d" " -f2 |\
sed "s/.*wsdl=/wsdl_/" | sed "s/&.*//"`
##### location hack for now
location=`tail -50 $wdir/$wsdl | grep location= | sed "s/\/>//" | \
sed "s/.*location=//"`
$WFLOW_DIR/declare_vars $wdir $i $wsdl $location
done
rm $wdir/node_order
echo "" >> client.c
echo " soap_init(&soap);" >> client.c
echo "" >> client.c
echo " buffer[0] = num[0] = 0x0;" >> client.c
echo " pid = getpid();" >> client.c
echo " sprintf(num, \" %d \", pid);" >> client.c
echo " strcat(buffer, \"$WFLOW_CGI/monitor_me.sh \");" >> client.c
echo " strcat(buffer, num);" >> client.c
echo " strcat(buffer, argv[1]);" >> client.c
echo " strcat(buffer, \" &\");" >> client.c
echo " system(buffer);" >> client.c
# parameter initializations are in "tempfile"
cat tempfile >> client.c
# function calls are in "callfile"
cat callfile >> client.c
echo "" >> client.c
echo " outfile = fopen(\"outfile\", \"a\");" >> client.c
echo " fprintf(outfile, \"\n\nWFLOW done.\n\");" >> client.c
echo " fflush(NULL);" >> client.c
echo " fclose(outfile);" >> client.c
echo " return 0;" >> client.c
echo "}" >> client.c
echo "" >> client.c
echo "char * get_data(char * node, char * param)" >> client.c
echo "{" >> client.c
echo " char buffer[1024], * ret_pt;" >> client.c
echo " long size;" >> client.c
echo " struct stat sbuf;" >> client.c
echo " FILE * infile;" >> client.c
echo "" >> client.c
echo " buffer[0] = 0x0;" >> client.c
echo " strcat(buffer, \"../DATA/\");" >> client.c
echo " strcat(buffer, node);" >> client.c
echo " strcat(buffer, \"/\");" >> client.c
echo " strcat(buffer, param);" >> client.c
echo " if(stat(buffer, &sbuf) == -1)" >> client.c
echo " {" >> client.c
echo " fprintf(stderr, \"client: error getting file stats for %s, returning...\n\", buffer);" >> client.c
echo " fflush(NULL);" >> client.c
echo " return NULL;" >> client.c
echo " }" >> client.c
echo "" >> client.c
echo " ret_pt = (char *) malloc(sbuf.st_size + 1);" >> client.c
echo "" >> client.c
echo " infile = fopen(buffer, \"r\");" >> client.c
echo " if(infile == NULL)" >> client.c
echo " {" >> client.c
echo " fprintf(stderr, \"client: file open error, %s\n\", buffer); " >> client.c
echo " fflush(NULL);" >> client.c
echo " return NULL;" >> client.c
echo " }" >> client.c
echo "" >> client.c
echo " size = fread(ret_pt, 1, sbuf.st_size, infile);" >> client.c
echo " if(size != sbuf.st_size)" >> client.c
echo " {" >> client.c
echo " fprintf(stderr, \"client: error reading file %s, returning...\n\", buffer);" >> client.c
echo " fflush(NULL);" >> client.c
echo " return NULL;" >> client.c
echo " }" >> client.c
echo "" >> client.c
echo " ret_pt[sbuf.st_size] = 0x0;" >> client.c
echo " return ret_pt;" >> client.c
echo "}" >> client.c
# compile and run
echo "Compiling code...<br>"
cp $GSOAP/stdsoap2.c $wdir/gSOAP
cp $GSOAP/stdsoap2.h $wdir/gSOAP
echo "Output from compilation:" > compile_file
echo "" >> compile_file
gcc -c -DWITH_NONAMESPACES -Wall -I. soapC.c >> compile_file 2>&1
gcc -c -DWITH_NONAMESPACES -Wall -I. soapClient.c >> compile_file 2>&1
gcc -c -DWITH_NONAMESPACES -Wall -I. stdsoap2.c >> compile_file 2>&1
gcc -DWITH_NONAMESPACES -Wall -I. -o client client.c *.o >> compile_file 2>&1
echo "Done, beginning to run workflow...<br>"
echo "<br>"
echo "<br>"
echo "View <a href=\"$WFLOW_SERVER/wflow/gsoap/$html_dir/gSOAP/compile_file\" target=\"_blank\">compilation output</a><br>"
echo "View <a href=\"$WFLOW_SERVER/wflow/gsoap/$html_dir/gSOAP/client.c\" target=\"_blank\">source code</a><br>"
echo "</pre>"
echo "</body>"
echo "</html>"
# detach child (redirect all streams so kill won't hang)
./client $wdir < /dev >& /dev/null &
kill -9 $$
|
jlong777/wflow
|
cgi/run_workflow.sh
|
Shell
|
lgpl-2.1
| 11,757 |
#! /bin/ksh
# Script : Salut.sh
# Description : Dit bonjour
# ID DECLENCHE : salut
#############################################################
# VARIABLES
#############################################################
#############################################################
# DIFFUSION DE LA REPONSE
#############################################################
# Voix
say -v "${VOX}" "Salut !"
echo "RETURN:Salut !:$(date +%H:%M)" >> ${HISTORIQUE_FILE}
# Fermeture du terminal
#osascript -e 'tell application "Terminal" to quit' &
|
kimous/Jarvis
|
plugins/Salut.sh
|
Shell
|
lgpl-3.0
| 541 |
# applications for text-for-speech wine and Windows TTSApp.exe
# Linux pico2wave program. In Ubuntu 14.04 it's part of libttspico-utils
# pico2wave --wave=test.wav "$(cat filename.txt)"
#converting windows files
#!/bin/bash
ARG=$1
for i in "${ARG}"/* ;
do
wine ttsUtil.exe "${i}" -voice=nick -output="${i}".wav
done
# to run: chmod +x wine.sh ./wine.sh ~/path/to/files
|
Aurametrix/Alg-U
|
ASR/wine.sh
|
Shell
|
unlicense
| 382 |
#!/bin/bash
set -e
url=http://ec2-52-28-130-84.eu-central-1.compute.amazonaws.com/config/scoring.project
curl -H "Content-Type: application/json; charset=ISO-8859-1" -X POST -d @scoring.project.js --header "X-Organizations: galanto" $url
|
marky-mark/catwatch
|
catwatch-score/scoring.project.sh
|
Shell
|
apache-2.0
| 242 |
useradd -m $VAGRANT_USER
usermod -p `pwhash $VAGRANT_PASSWORD` -G wheel $VAGRANT_USER
mkdir -p /home/$VAGRANT_USER/.ssh
pkg_add wget-1.16.3
wget --no-check-certificate -O - https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub >> /home/$VAGRANT_USER/.ssh/authorized_keys
chown -R $VAGRANT_USER /home/$VAGRANT_USER
chmod -R og-rwx /home/$VAGRANT_USER/.ssh
sed \
-e '/^#UseDNS /s/^#//' \
-e '/^UseDNS /s/ yes$/ no/' /etc/ssh/sshd_config > /tmp/sshd_config
mv /tmp/sshd_config /etc/ssh/sshd_config
echo "$VAGRANT_USER ALL=(ALL) NOPASSWD:ALL" >> /usr/pkg/etc/sudoers.d/$VAGRANT_USER
|
upperstream/packer-templates
|
netbsd/provisioners/vagrant.sh
|
Shell
|
apache-2.0
| 599 |
#!/bin/sh
# Do not use nounset here
# Do not use errexit here
# Roses are red, violets are blue, sugar is sweet, and so are you.
# Enjoy your usual ratio: 5% of lines do the actual work, and the rest are there to make sure they work. (It's like 1%, actually)
# Alternative scripts don't need know about what source/target files are;
# they let xdelta3 use the default filenames stored in vcdiff file
# (might not be correct, like tempSource.file, ???, or in the encoding it doesn't understand)
WORKINGDIR=$(pwd)
SCRIPTDIR="$(cd "$(dirname "$0")" && pwd)"
cd "$SCRIPTDIR"
args="$@"
dropin="$1"
sourcefile=''
targetfile=''
changes="changes.vcdiff"
olddir="old"
find_xdelta3() {
chmod +x ./xdelta3 2>/dev/null
chmod +x ./xdelta3.x86_64 2>/dev/null
case $(uname -m) in
i*86) arch=x86;;
Pentium*|AMD?Athlon*) arch=x86;;
amd64|x86_64) arch=x86_64;;
*) arch=other;;
esac
if [ "$(uname)" = "Linux" ] && [ "$arch" = "x86_64" ] && [ -x ./xdelta3.x86_64 ] && file ./xdelta3.x86_64 | grep -q "GNU/Linux"; then
app="./xdelta3.x86_64"
elif [ "$(uname)" = "Linux" ] && [ "$arch" = "x86" ] && [ -x ./xdelta3 ] && file ./xdelta3 | grep -q "GNU/Linux"; then
app="./xdelta3"
elif hash xdelta3 2>/dev/null; then
app="xdelta3"
elif hash wine 2>/dev/null && [ -f "xdelta3.exe" ]; then
app="wine ./xdelta3.exe"
else
echo "Error: The required application is not found or inaccessible."
echo "Please either make sure the file \"xdelta3\" has execute rights, install xdelta3 [recommended], or install WinE."
cd "$WORKINGDIR"
return 1
fi
return 0
}
find_inputs() {
if [ ! -z "$dropin" ] && [ ! "$dropin" = " " ]; then
if [ -f "$dropin" ]; then
sourcefile="$dropin"
else
echo "Warning: Input file \"$dropin\" is not found. Ignored."
fi
fi
if [ ! -f "$changes" ]; then
echo "Error: VCDIFF file \"$changes\" is missing."
echo "Please extract everything from the archive."
cd "$WORKINGDIR"
return 1
fi
return 0
}
run_patch () {
echo "Attempting to patch..."
if [ ! -z "$sourcefile" ] && [ ! "$sourcefile" = " " ]; then
`$app -d -f -s "$sourcefile" "$changes"`
return $?
else
`$app -d -f "$changes"`
return $?
fi
}
if find_xdelta3 && find_inputs; then
if run_patch; then
echo "Done."
cd "$WORKINGDIR"
return 0 2>/dev/null || exit 0
else
echo "Error: Patching wasn't successful!"
fi
fi
cd "$WORKINGDIR"
return 1 2>/dev/null || exit 1
|
dreamer2908/YAXBPC
|
YAXBPC/CopyMe/apply_patch_linux_alternative.sh
|
Shell
|
apache-2.0
| 2,393 |
#!/bin/bash
#This is a jenkins script
cd /opt/infrastructure/docker/images/mobile-survey/mobile-survey-webapp &&\
git pull origin master &&\
docker build -t mobile-survey /opt/infrastructure/docker/images/mobile-survey/
|
ecohealthalliance/infrastructure
|
jenkins/build-mobile-survey.sh
|
Shell
|
apache-2.0
| 224 |
#!/bin/bash
#
# Build Docker images for Kubebench example tf-cnn.
#
# build_image.sh ${SRC_DIR} ${DOCKERFILE} ${IMAGE} ${VERSION}
set -ex
SRC_DIR=$(realpath $1)
DOCKERFILE=$(realpath $2)
IMAGE=$3
VERSION=$4
TAG=${REGISTRY}/${REPO_NAME}/${IMAGE}:${VERSION}
echo "Setup build directory"
export BUILD_DIR=`mktemp -d -p $(dirname $SRC_DIR)`
echo "Copy source and Dockerfile to build directory"
cp -r ${SRC_DIR}/examples ${BUILD_DIR}/examples
cp ${DOCKERFILE} ${BUILD_DIR}/Dockerfile
echo "Change working directory to ${BUILD_DIR}"
cd ${BUILD_DIR}
echo "Authenticate gcloud account"
gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS}
echo "Build image ${TAG}"
gcloud builds submit --tag=${TAG} --project=${PROJECT} .
echo "Clean up build directory"
cd
rm -rf ${BUILD_DIR}
echo "Image ${TAG} built successfully"
|
kubeflow/kubebench
|
build/images/examples/tf-cnn/build_image.sh
|
Shell
|
apache-2.0
| 844 |
#!/bin/bash
thisDir=$(dirname $0)
thisDir=$(readlink -f "$thisDir")
source $thisDir/load-spark-config.sh
resourceFile=$1
resourceFileName="${resourceFile##*/}"
out=$(ssh $sparkUser@$sparkMaster 'ls resources/'$resourceFileName' | wc -l')
if [ ! $out = "1" ]; then
${thisDir}/spark-deploy-resource.sh $resourceFile
fi
|
mbalassi/streaming-performance
|
src/test/resources/Performance/spark-deploy-resource-if-needed.sh
|
Shell
|
apache-2.0
| 325 |
# Manifest for Dockerfiles creation
# every dest path will be prefixed by $DESTDIR/$version
# Files containing distgen directives
DISTGEN_RULES="
src=src/cccp.yml
dest=cccp.yml;
src=src/README.md
dest=README.md;
src=src/root/opt/app-root/etc/scl_enable
dest=root/opt/app-root/etc/scl_enable;
src=src/s2i/bin/assemble
dest=s2i/bin/assemble
mode=0755;
src=src/s2i/bin/usage
dest=s2i/bin/usage
mode=0755;
src=test/run
dest=test/run
mode=0755;
src=src/test/pipenv-test-app/Pipfile
dest=test/pipenv-test-app/Pipfile;
src=src/test/pipenv-test-app/Pipfile.lock
dest=test/pipenv-test-app/Pipfile.lock;
src=src/test/pipenv-test-app/Pipfile
dest=test/micropipenv-test-app/Pipfile;
src=src/test/pipenv-test-app/Pipfile.lock
dest=test/micropipenv-test-app/Pipfile.lock;
src=src/test/from-dockerfile/Dockerfile_no_s2i.tpl
dest=test/from-dockerfile/Dockerfile_no_s2i.tpl;
"
# Files containing distgen directives, which are used for each
# (distro, version) combination not excluded in multispec
DISTGEN_MULTI_RULES="
src=src/Dockerfile.template
dest=Dockerfile;
src=src/Dockerfile.template
dest=Dockerfile.rhel7;
src=src/Dockerfile.template
dest=Dockerfile.rhel8;
src=src/Dockerfile.template
dest=Dockerfile.fedora;
"
# Symbolic links
SYMLINK_RULES="
link_target=../../examples/app-home-test-app
link_name=test/app-home-test-app;
link_target=../../examples/django-test-app
link_name=test/django-test-app;
link_target=../../examples/locale-test-app
link_name=test/locale-test-app;
link_target=../../examples/micropipenv-requirements-test-app
link_name=test/micropipenv-requirements-test-app;
link_target=../../examples/mod-wsgi-test-app
link_name=test/mod-wsgi-test-app;
link_target=../../examples/npm-virtualenv-uwsgi-test-app
link_name=test/npm-virtualenv-uwsgi-test-app;
link_target=../../examples/numpy-test-app
link_name=test/numpy-test-app;
link_target=../../examples/pin-pipenv-version-test-app
link_name=test/pin-pipenv-version-test-app;
link_target=../../examples/setup-requirements-test-app
link_name=test/setup-requirements-test-app;
link_target=../../examples/setup-test-app
link_name=test/setup-test-app;
link_target=../../examples/standalone-test-app
link_name=test/standalone-test-app;
link_target=../../src/test/pipenv-and-micropipenv-should-fail-test-app
link_name=test/pipenv-and-micropipenv-should-fail-test-app;
link_target=../../../src/test/from-dockerfile/Dockerfile.tpl
link_name=test/from-dockerfile/Dockerfile.tpl;
link_target=../../test/run-openshift
link_name=test/run-openshift;
link_target=../../common/test-lib.sh
link_name=test/test-lib.sh;
link_target=../../common/test-lib-openshift.sh
link_name=test/test-lib-openshift.sh;
link_target=../../test/test-lib-python.sh
link_name=test/test-lib-python.sh;
link_target=../../imagestreams
link_name=test/imagestreams;
link_target=../../common/check_imagestreams.py
link_name=test/check_imagestreams.py;
"
# Files to copy
COPY_RULES="
src=src/root/opt/app-root/etc/generate_container_user
dest=root/opt/app-root/etc/generate_container_user;
src=src/s2i/bin/run
dest=s2i/bin/run
mode=0755;
src=src/s2i/bin/init-wrapper
dest=s2i/bin/init-wrapper
mode=0755;
src=examples/pipenv-test-app/testapp.py
dest=test/pipenv-test-app/testapp.py;
src=examples/pipenv-test-app/setup.py
dest=test/pipenv-test-app/setup.py;
src=examples/pipenv-test-app/.s2i/environment
dest=test/pipenv-test-app/.s2i/environment;
src=examples/pipenv-test-app/.gitignore
dest=test/pipenv-test-app/.gitignore;
src=examples/micropipenv-test-app/testapp.py
dest=test/micropipenv-test-app/testapp.py;
src=examples/micropipenv-test-app/setup.py
dest=test/micropipenv-test-app/setup.py;
src=examples/micropipenv-test-app/.s2i/environment
dest=test/micropipenv-test-app/.s2i/environment;
src=examples/micropipenv-test-app/.gitignore
dest=test/micropipenv-test-app/.gitignore;
"
|
openshift/sti-python
|
manifest.sh
|
Shell
|
apache-2.0
| 4,222 |
#!/bin/bash
{ echo 'content.location.href="file:///home/pi/wall_display/views/sports.html"'; echo 'window.fullScreen = true'; echo 'document.getElementById("toolbar-menubar").hidden = true'; sleep 10 } | telnet localhost 7070
|
jwalthour/wall_display
|
set_view_4.sh
|
Shell
|
apache-2.0
| 227 |
#/usr/bin/env bash
sudo pip install Pygments
npm install -g typescript
gem install pygments.rb
gem install jekyll
jekyll build
|
Spirals-Team/codEnergy
|
scripts/install.sh
|
Shell
|
apache-2.0
| 130 |
#!/bin/bash
#
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Test //external mechanisms
#
# Load test environment
src_dir=$(cd "$(dirname ${BASH_SOURCE[0]})" && pwd)
source $src_dir/test-setup.sh \
|| { echo "test-setup.sh not found!" >&2; exit 1; }
source $src_dir/remote_helpers.sh \
|| { echo "remote_helpers.sh not found!" >&2; exit 1; }
export JAVA_RUNFILES=$TEST_SRCDIR
function set_up() {
# Set up custom repository directory.
m2=$TEST_TMPDIR/my-m2
rm -rf $m2
mkdir -p $m2
startup_server $m2
}
function tear_down() {
shutdown_server
rm -rf $m2
}
function generate_workspace() {
${bazel_data}/src/tools/generate_workspace/generate_workspace $@
}
# Takes: groupId, artifactId, and version.
function make_artifact() {
local groupId=$1
local artifactId=$2
local version=$3
local pkg_dir=$m2/$groupId/$artifactId/$version
mkdir -p $pkg_dir
# Make the pom.xml.
cat > $pkg_dir/$artifactId-$version.pom <<EOF
<project>
<modelVersion>4.0.0</modelVersion>
<groupId>$artifactId</groupId>
<artifactId>$artifactId</artifactId>
<version>$version</version>
</project>
EOF
# Make the jar with one class (we use the groupId for the classname).
cat > $TEST_TMPDIR/$groupId.java <<EOF
public class $groupId {
public static void print() {
System.out.println("$artifactId");
}
}
EOF
${bazel_javabase}/bin/javac $TEST_TMPDIR/$groupId.java
${bazel_javabase}/bin/jar cf $pkg_dir/$artifactId-$version.jar $TEST_TMPDIR/$groupId.class
}
function get_workspace_file() {
cat $TEST_log | tail -n 2 | head -n 1
}
function get_build_file() {
cat $TEST_log | tail -n 1
}
function test_pom() {
# Create a maven repo
make_artifact blorp glorp 1.2.3
# Create a pom that references the artifacts.
cat > $TEST_TMPDIR/pom.xml <<EOF
<project>
<modelVersion>4.0.0</modelVersion>
<groupId>my</groupId>
<artifactId>thing</artifactId>
<version>1.0</version>
<repositories>
<repository>
<id>my-repo1</id>
<name>a custom repo</name>
<url>http://localhost:$fileserver_port/</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>blorp</groupId>
<artifactId>glorp</artifactId>
<version>1.2.3</version>
</dependency>
</dependencies>
</project>
EOF
generate_workspace --maven_project=$TEST_TMPDIR &> $TEST_log \
|| fail "generating workspace failed"
cat $(cat $TEST_log | tail -n 2 | head -n 1) > ws
cat $(cat $TEST_log | tail -n 1) > build
assert_contains "artifact = \"blorp:glorp:1.2.3\"," ws
assert_contains "repository = \"http://localhost:$fileserver_port/\"," ws
assert_contains "\"@blorp_glorp//jar\"," build
}
function test_invalid_pom() {
# No pom file.
rm -f $TEST_TMPDIR/pom.xml
generate_workspace -m $TEST_TMPDIR &> $TEST_log
expect_log "Non-readable POM $TEST_TMPDIR/pom.xml"
# Invalid XML.
cat > $TEST_TMPDIR/pom.xml <<EOF
<project>
EOF
generate_workspace -m $TEST_TMPDIR &> $TEST_log
expect_log "expected end tag </project>"
}
function test_profile() {
cat > $TEST_TMPDIR/pom.xml <<EOF
<project>
<modelVersion>4.0.0</modelVersion>
<groupId>my</groupId>
<artifactId>thing</artifactId>
<version>1.0</version>
<profiles>
<profile>
<id>my-profile</id>
<activation>
<property>
<name>makeThing</name>
<value>thing</value>
</property>
</activation>
</profile>
</profiles>
</project>
EOF
generate_workspace --maven_project=$TEST_TMPDIR &> $TEST_log \
|| fail "generating workspace failed"
}
function test_submodules() {
cat > $TEST_TMPDIR/pom.xml <<EOF
<project>
<modelVersion>4.0.0</modelVersion>
<groupId>xyz</groupId>
<artifactId>a</artifactId>
<version>1.0</version>
<packaging>pom</packaging>
<modules>
<module>b1</module>
<module>b2</module>
</modules>
</project>
EOF
# Create submodules, version and group are inherited from parent.
mkdir -p $TEST_TMPDIR/{b1,b2}
cat > $TEST_TMPDIR/b1/pom.xml <<EOF
<project>
<modelVersion>4.0.0</modelVersion>
<artifactId>b1</artifactId>
<parent>
<groupId>xyz</groupId>
<artifactId>a</artifactId>
<version>1.0</version>
</parent>
<dependencies>
<dependency>
<groupId>xyz</groupId>
<artifactId>b2</artifactId>
<version>1.0</version>
</dependency>
</dependencies>
</project>
EOF
cat > $TEST_TMPDIR/b2/pom.xml <<EOF
<project>
<modelVersion>4.0.0</modelVersion>
<artifactId>b2</artifactId>
<parent>
<groupId>xyz</groupId>
<artifactId>a</artifactId>
<version>1.0</version>
</parent>
</project>
EOF
generate_workspace -m $TEST_TMPDIR/b1 &> $TEST_log || fail "generate failed"
expect_log "xyz_b2 was defined in $TEST_TMPDIR/b2/pom.xml which isn't a repository URL"
assert_contains "artifact = \"xyz:b2:1.0\"," $(get_workspace_file)
}
run_suite "maven tests"
|
kamalmarhubi/bazel
|
src/test/shell/bazel/generate_workspace_test.sh
|
Shell
|
apache-2.0
| 5,511 |
## Globals that add classes
# Globals can also be used to add classes to roles. in the previous section we saw that we can swap which class is included by a role based on a global, but what if you want to sometimes add a class, and sometimes have nothing at all?
# An example of this is including debugging tools. Let's make a new class that includes systemtap, which we don't want on our prod servers, but which we do want on our QA servers. Using a global, we can have the same scenario for prod and QA, and change a single line of yaml to add our debugging tool to the QA servers.
# Create our systemtap class
mkdir -p /etc/puppet/modules/systemtap/manifests
cat > /etc/puppet/modules/systemtap/manifests/init.pp<<EOF
class systemtap::init()
{
notice("Systemtap installed")
}
EOF
# Add a debug global, we'll use user.yaml because racecar
echo "debug: true" > /etc/puppet/data/global_hiera_params/user.yaml
# Update the heirarchy to support the debug choice
cat > /etc/puppet/hiera.yaml<<EOF
---
:backends:
- data_mapper
:hierarchy:
- user
- %{scenario}
- %{db_type}
- debug/%{debug}
- common
:data_mapper:
:datadir: /etc/puppet/data/data_mappings
EOF
# This will search for data in scenarios/debug/%{debug}, so let's make that directory
mkdir -p /etc/puppet/data/scenarios/debug
# Now, if debug is true, we want to add the systemtap::init class to the appserver role. We do that like so:
cat > /etc/puppet/data/scenarios/debug/true.yaml<<EOF
roles:
appserver:
classes:
- systemtap::init
EOF
# Now when we apply, we should see systemtap added.
puppet apply -e ""
# If we set debug to anything other than true, it won't be added. because the file name won't be there
echo "debug: not_true" > /etc/puppet/data/global_hiera_params/user.yaml
puppet apply -e ""
|
bodepd/scenario_node_terminus
|
tutorials/06globals2.sh
|
Shell
|
apache-2.0
| 1,797 |
# ----------------------------------------------------------------------------
#
# Package : slf4j-log4j12
# Version : 1.7.30, 2.0.0-alpha1
# Source repo : https://github.com/qos-ch/slf4j
# Tested on : RHEL 7.6
# Script License : Apache License Version 2.0
# Maintainer : Pratham Murkute <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
#!/bin/bash
# install tools and dependent packages
#yum -y update
yum install -y git wget curl unzip nano vim make build-essential
#yum install -y gcc ant
# setup java environment
yum install -y java java-devel
which java
ls /usr/lib/jvm/
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-ibm-1.8.0.6.5-1jpp.1.el7.ppc64le
# update the path env. variable
export PATH=$PATH:$JAVA_HOME/bin
# install maven
MAVEN_VERSION=3.6.3
wget http://mirrors.estointernet.in/apache/maven/maven-3/$MAVEN_VERSION/binaries/apache-maven-$MAVEN_VERSION-bin.tar.gz
ls /usr/local
tar -C /usr/local/ -xzf apache-maven-$MAVEN_VERSION-bin.tar.gz
mv /usr/local/apache-maven-$MAVEN_VERSION /usr/local/maven
ls /usr/local
rm apache-maven-$MAVEN_VERSION-bin.tar.gz
export M2_HOME=/usr/local/maven
# update the path env. variable
export PATH=$PATH:$M2_HOME/bin
# create folder for saving logs
mkdir -p /logs
# variables
PKG_NAME="slf4j-log4j12"
PKG_VERSION=1.7.30
PKG_VERSION_LATEST=2.0.0
LOGS_DIRECTORY=/logs
LOCAL_DIRECTORY=/root
REPOSITORY="https://github.com/qos-ch/slf4j.git"
# clone, build and test specified version
cd $LOCAL_DIRECTORY
git clone $REPOSITORY $PKG_NAME-$PKG_VERSION
cd $PKG_NAME-$PKG_VERSION/
git checkout -b $PKG_VERSION tags/v_$PKG_VERSION
cd slf4j-log4j12/
mvn install | tee $LOGS_DIRECTORY/$PKG_NAME-$PKG_VERSION.txt
# clone, build and test latest version
cd $LOCAL_DIRECTORY
git clone $REPOSITORY $PKG_NAME-$PKG_VERSION_LATEST
cd $PKG_NAME-$PKG_VERSION_LATEST/
git checkout -b $PKG_VERSION_LATEST tags/v_$PKG_VERSION_LATEST-alpha1
cd slf4j-log4j12/
mvn install | tee $LOGS_DIRECTORY/$PKG_NAME-$PKG_VERSION_LATEST.txt
# clone, build and test master
#cd $LOCAL_DIRECTORY
#git clone $REPOSITORY $PKG_NAME-master
#cd $PKG_NAME-master/
#cd slf4j-log4j12/
#mvn install | tee $LOGS_DIRECTORY/$PKG_NAME.txt
|
ppc64le/build-scripts
|
s/slf4j-log4j12/slf4j_log4j12_rhel_7.6.sh
|
Shell
|
apache-2.0
| 2,519 |
#!/bin/bash
#
# Script to create a signed container. Intended for op-build integration.
#
# Defaults, initial values
P=${0##*/}
SIGN_MODE=local
KMS=signframework
HW_KEY_ARGS=""
SW_KEY_ARGS=""
HW_SIG_ARGS=""
SW_SIG_ARGS=""
VERIFY_ARGS=""
DEBUG_ARGS=""
ADDL_ARGS=""
RC=0
#
# Functions
#
usage () {
echo ""
echo " Options:"
echo " -h, --help display this message and exit"
echo " -v, --verbose show verbose output"
echo " -d, --debug show additional debug output"
echo " -w, --wrap column to wrap long output in verbose mode"
echo " -a, --hwKeyA file containing HW key A private key in PEM format"
echo " -b, --hwKeyB file containing HW key B private key in PEM format"
echo " -c, --hwKeyC file containing HW key C private key in PEM format"
echo " -p, --swKeyP file containing SW key P private key in PEM format"
echo " -q, --swKeyQ file containing SW key Q private key in PEM format"
echo " -r, --swKeyR file containing SW key R private key in PEM format"
echo " -l, --protectedPayload file containing the payload to be signed"
echo " -i, --out file to write containerized payload"
echo " -o, --code-start-offset code start offset for software header in hex"
echo " -f, --flags prefix header flags in hex"
echo " -m, --mode signing mode: local, independent or production"
echo " -k, --kms key management system for retrieving keys and signatures"
echo " (choices are \"signframework\" or \"pkcs11\")"
echo " -s, --scratchDir scratch directory to use for file caching, etc."
echo " -L, --label name or identifier of the module being built (8 char max)"
echo " --contrHdrOut file write container header only (w/o payload)"
echo " --archiveOut file or directory to write archive (tarball) of artifacts"
echo " if directory, must end in '/'. for PWD, use '.'"
echo " --archiveIn file containing archive of artifacts to import to cache"
echo " --validate validate the container after build"
echo " --verify verify the container after build, against the provided"
echo " value, or filename containing value, of the HW Keys hash"
echo " --sign-project-config INI file containing configuration properties (options"
echo " set here override those set via cmdline or environment)"
echo ""
exit 1
}
die () {
echo "$P: $*" 1>&2
exit 1
}
is_private_key () {
openssl ec -pubout -in "$1" &>/dev/null
}
is_public_key () {
openssl ec -pubin -pubout -in "$1" &>/dev/null
}
is_raw_key () {
# A RAW p521 pubkey will be 133 bytes with a leading byte of 0x04,
# indicating an uncompressed key.
test "$1" && \
test "$(stat -c%s "$1")" -eq 133 && \
[[ $(dd if="$1" bs=1 count=1 2>/dev/null) == $'\004' ]]
}
to_lower () {
echo "$1" | tr A-Z a-z
}
to_upper () {
echo "$1" | tr a-z A-Z
}
is_path_full () {
# If a path has a leading slash, it's a full path, not relative
echo "$1" | egrep -q ^/
}
is_path_dir () {
# If a path has a trailing slash, it's a dir, not a file
echo "$1" | egrep -q /$
}
make_bool () {
# Sanitize boolean values so that on input:
# - True = set to "true" or "y", case insensitive
# - False = set to any other string, or unset
# On output:
# - True = set to a non-zero length string
# - False = set to a zero length string
if [ "$(to_lower "$1")" == true ] || [ "$(to_lower "$1")" == y ]
then
echo true
else
echo ""
fi
}
is_cmd_available () {
command -v "$1" &>/dev/null
}
get_date_string () {
# Convert a seconds-since-epoch value to presentation format
local d
d=$(date -d @"$1" 2>/dev/null) && echo "$d" && return
is_cmd_available perl && \
d=$(perl -le "print scalar localtime $1" 2>/dev/null) && \
echo "$d" && return
d=$1 && echo "$d"
}
exportArchive () {
# If project basename is set, prepare the export for import to a system
# using the same project basename.
if [ "$SIGN_MODE" == "local" ] || [ "$SIGN_MODE" == "independent" ]
then
if [ "$SF_HW_SIGNING_PROJECT_BASE" ]
then
echo "--> $P: Exporting HW keys and sigs for project: $SF_HW_SIGNING_PROJECT_BASE"
cd "$T" || die "Cannot cd to $T"
for KEY in a b c; do
cp -p &>/dev/null "HW_key_$KEY.pub" \
"project.${SF_HW_SIGNING_PROJECT_BASE}_${KEY}.HW_key_${KEY}.pub"
cp -p &>/dev/null "HW_key_$KEY.raw" \
"project.${SF_HW_SIGNING_PROJECT_BASE}_${KEY}.HW_key_${KEY}.raw"
cp -p &>/dev/null "HW_key_$KEY.sig" \
"project.${SF_HW_SIGNING_PROJECT_BASE}_${KEY}.HW_sig_${KEY}.sig"
cp -p &>/dev/null "HW_key_$KEY.raw" \
"project.${SF_HW_SIGNING_PROJECT_BASE}_${KEY}.HW_sig_${KEY}.raw"
done
fi
if [ "$SF_FW_SIGNING_PROJECT_BASE" ]
then
echo "--> $P: Exporting FW keys and sigs for project: $SF_FW_SIGNING_PROJECT_BASE"
cd "$T" || die "Cannot cd to $T"
for KEY in p q r; do
cp -p &>/dev/null "SW_key_$KEY.pub" \
"project.${SF_FW_SIGNING_PROJECT_BASE}_${KEY}.SW_key_${KEY}.pub"
cp -p &>/dev/null "SW_key_$KEY.raw" \
"project.${SF_FW_SIGNING_PROJECT_BASE}_${KEY}.SW_key_${KEY}.raw"
mv &>/dev/null "SW_key_$KEY.sig" \
"project.${SF_FW_SIGNING_PROJECT_BASE}_${KEY}.SW_sig_${KEY}.sig"
done
fi
fi
# Create the archive.
cd "$SB_SCRATCH_DIR" || die "Cannot cd to $SB_SCRATCH_DIR"
if tar -zcf "$SB_ARCHIVE_OUT" "$buildID/$LABEL/"; then
echo "--> $P: Archive saved to: $SB_ARCHIVE_OUT"
else
echo "--> $P: Error $? saving archive to: $SB_ARCHIVE_OUT"
fi
}
importArchive () {
echo "--> $P: Importing archive: $1..."
test ! -f "$1" && die "archiveIn file not found: $1"
if ! is_path_full "$1"; then
local f="$PWD/$1"
else
local f="$1"
fi
local previous_wd="$PWD"
cd "$TOPDIR" || die "Cannot cd to $TOPDIR"
archpath=$(tar -tf "$f" | head -1)
archdir=$(echo "$archpath" | cut -d/ -f1)
archsubdir=$(echo "$archpath" | cut -d/ -f2)
test -z "$archdir" -o -z "$archsubdir" && \
die "Cannot determine archive content for $f"
if [ -d "$archsubdir" ]; then
# We already have this subdir in the cache, make a backup
rm -rf "$archsubdir.save"
cp -rp "$archsubdir" "$archsubdir.save"
else
# We don't yet have a subdir by this name, create it
mkdir "$archsubdir"
fi
if ! tar -xf "$f"; then
echo "--> $P: Error $? unpacking archive: $f"
fi
# Move the unpacked files and remove the temporary archive directory
mv "$archdir/$archsubdir/"* "$archsubdir/"
rmdir "$archdir/$archsubdir/"
rmdir "$archdir/"
cd "$previous_wd" || die "Cannot cd back to ${previous_wd}, is it gone?"
}
checkKey () {
# The variable name
local keyname="$1"
# The variable's value, typically the filename holding the key
local k="${!keyname}"
if [ "$k" ]; then
# Handle the special values __skip, __get, __getkey and __getsig
test "$k" == __skip && return
test "$k" == __get && return
test "$k" == __getkey && return
test "$k" == __getsig && return
# If it's a file, determine what kind of key it contains
if [ -f "$k" ]; then
if is_private_key "$k"; then
test "$SB_VERBOSE" && \
echo "--> $P: Key $keyname is a private ECDSA key"
elif is_public_key "$k"; then
test "$SB_VERBOSE" && \
echo "--> $P: Key $keyname is a public ECDSA key"
elif is_raw_key "$k"; then
test "$SB_VERBOSE" && \
echo "--> $P: Key $keyname is a RAW public ECDSA key"
else
die "Key $keyname is neither a public nor private key"
fi
else
die "Can't open file: $k for $keyname"
fi
fi
}
parseIni () {
local IFS=" ="
local section property value
while read -r property value
do
if echo "$property" | egrep -q "^;"
then
# This is a comment, skip it
continue
elif echo "$property" | egrep -q "\[.*]"
then
# This is a section header, read it
section=$(echo "$property" | tr -d [] )
elif test "$value"
then
# This is a property, set it
eval "${section}_${property}=\"$value\""
fi
done < "$1"
}
findArtifact () {
local f
local found
local scope
for f in "$@"
do
# If filename starts with ./ search only this component cache.
if [ "${f:0:2}" == "./" ]
then
f="${f:2}"
scope=local
else
scope=global
fi
# Look for artifact in the local cache
found=$(find "$T" -name "$f" | head -1)
if [ "$found" ]; then
echo "$f"
return
fi
test "$scope" == "local" && continue
# Look elsewhere in the cache
found=$(find "$TOPDIR" -name "$f" | head -1)
if [ "$found" ]; then
cp -p "$found" "$T/"
echo "$f"
return
fi
done
}
#
# Main
#
# Convert long options to short
for arg in "$@"; do
shift
case "$arg" in
"--help") set -- "$@" "-h" ;;
"--verbose") set -- "$@" "-v" ;;
"--debug") set -- "$@" "-d" ;;
"--wrap") set -- "$@" "-w" ;;
"--hwKeyA") set -- "$@" "-a" ;;
"--hwKeyB") set -- "$@" "-b" ;;
"--hwKeyC") set -- "$@" "-c" ;;
"--swKeyP") set -- "$@" "-p" ;;
"--swKeyQ") set -- "$@" "-q" ;;
"--swKeyR") set -- "$@" "-r" ;;
"--flags") set -- "$@" "-f" ;;
"--code-start-offset") set -- "$@" "-o" ;;
"--protectedPayload") set -- "$@" "-l" ;;
"--out") set -- "$@" "-i" ;;
"--kms") set -- "$@" "-k" ;;
"--mode") set -- "$@" "-m" ;;
"--scratchDir") set -- "$@" "-s" ;;
"--label") set -- "$@" "-L" ;;
"--sign-project-FW-token") set -- "$@" "-L" ;;
"--sign-project-config") set -- "$@" "-4" ;;
"--contrHdrOut") set -- "$@" "-5" ;;
"--archiveIn") set -- "$@" "-6" ;;
"--archiveOut") set -- "$@" "-7" ;;
"--validate") set -- "$@" "-8" ;;
"--verify") set -- "$@" "-9" ;;
*) set -- "$@" "$arg"
esac
done
# Process command-line arguments
while getopts -- ?hdvw:a:b:c:p:q:r:f:o:l:i:m:k:s:L:4:5:6:7:89: opt
do
case "${opt:?}" in
v) SB_VERBOSE="TRUE";;
d) SB_DEBUG="TRUE";;
w) SB_WRAP="$OPTARG";;
a) HW_KEY_A="$OPTARG";;
b) HW_KEY_B="$OPTARG";;
c) HW_KEY_C="$OPTARG";;
p) SW_KEY_P="$OPTARG";;
q) SW_KEY_Q="$OPTARG";;
r) SW_KEY_R="$OPTARG";;
f) HW_FLAGS="$OPTARG";;
o) CS_OFFSET="$OPTARG";;
l) PAYLOAD="$OPTARG";;
i) OUTPUT="$OPTARG";;
k) KMS="$(to_lower "$OPTARG")";;
m) SIGN_MODE="$(to_lower "$OPTARG")";;
s) SB_SCRATCH_DIR="$OPTARG";;
L) LABEL="$OPTARG";;
4) PROJECT_INI="$OPTARG";;
5) SB_CONTR_HDR_OUT="$OPTARG";;
6) SB_ARCHIVE_IN="$OPTARG";;
7) SB_ARCHIVE_OUT="$OPTARG";;
8) SB_VALIDATE="TRUE";;
9) SB_VERIFY="$OPTARG";;
h|\?) usage;;
esac
done
# Check required programs
for p in date egrep tar openssl create-container print-container
do
is_cmd_available $p || \
die "Required command \"$p\" not available or not found in PATH"
done
# Sanitize boolean values
SB_VERBOSE="$(make_bool "$SB_VERBOSE")"
SB_DEBUG="$(make_bool "$SB_DEBUG")"
# These are the only env vars that override a command line option
test "$SB_KMS" && KMS="$(to_lower "$SB_KMS")"
test "$SB_SIGN_MODE" && SIGN_MODE="$(to_lower "$SB_SIGN_MODE")"
test "$SB_PROJECT_INI" && PROJECT_INI="$SB_PROJECT_INI"
# What op-build calls development mode, we call local mode
test "$SIGN_MODE" == development && SIGN_MODE=local
echo "--> $P: Signing mode: $SIGN_MODE"
#
# Parse INI file
#
if [ "$(to_upper "$LABEL")" == SBKTRAND ]
then
# Key transition container may have its own ini file
test "$SB_PROJECT_INI_TRANS" && PROJECT_INI=$SB_PROJECT_INI_TRANS
fi
if [ "$PROJECT_INI" ]
then
test ! -f "$PROJECT_INI" && die "Can't open INI file: $PROJECT_INI"
signer_userid=""
signer_sshkey_file=""
signer_epwd_file=""
server_hostname=""
signtool_validate=""
signtool_verify=""
signtool_verify_trans=""
signtool_pass_on_validation_error=""
signproject_hw_signing_project_basename=""
signproject_fw_signing_project_basename=""
signproject_getpubkey_project_basename=""
pkcs11_module=""
pkcs11_token=""
echo "--> $P: Parsing INI file: $PROJECT_INI"
parseIni "$PROJECT_INI"
test "$signer_userid" && SF_USER="$signer_userid"
test "$signer_sshkey_file" && SF_SSHKEY="$signer_sshkey_file"
test "$signer_epwd_file" && SF_EPWD="$signer_epwd_file"
test "$server_hostname" && SF_SERVER="$server_hostname"
test "$signtool_validate" && SB_VALIDATE="$signtool_validate"
test "$signtool_verify" && SB_VERIFY="$signtool_verify"
test "$signtool_verify_trans" && SB_VERIFY_TRANS="$signtool_verify_trans"
test "$signtool_pass_on_validation_error" && \
SB_PASS_ON_ERROR="$signtool_pass_on_validation_error"
test "$signproject_hw_signing_project_basename" && \
SF_HW_SIGNING_PROJECT_BASE="$signproject_hw_signing_project_basename"
test "$signproject_fw_signing_project_basename" && \
SF_FW_SIGNING_PROJECT_BASE="$signproject_fw_signing_project_basename"
test "$signproject_getpubkey_project_basename" && \
SF_GETPUBKEY_PROJECT_BASE="$signproject_getpubkey_project_basename"
test "$pkcs11_module" && SB_PKCS11_MODULE="$pkcs11_module"
test "$pkcs11_token" && SB_PKCS11_TOKEN="$pkcs11_token"
fi
#
# Check required arguments
#
if [ -z "$PAYLOAD" ] || [ "$PAYLOAD" == __none ]
then
PAYLOAD=/dev/zero
elif [ ! -f "$PAYLOAD" ]; then
die "Can't open payload file: $PAYLOAD"
fi
if [ "$SIGN_MODE" == "production" ]
then
test -z "$SF_USER" && die "Production mode selected but no signer userid provided"
test -z "$SF_SSHKEY" && die "Production mode selected but no signer ssh key provided"
test -z "$SF_EPWD" && die "Production mode selected but no signer ePWD provided"
test -z "$SF_SERVER" && die "Production mode selected but no signframework server provided"
is_cmd_available sf_client || \
die "Required command \"sf_client\" not available or not found in PATH"
fi
# Check input keys
for KEY in HW_KEY_A HW_KEY_B HW_KEY_C; do
checkKey $KEY
done
for KEY in SW_KEY_P SW_KEY_Q SW_KEY_R; do
checkKey $KEY
done
#
# Set cache directory
#
: "${TMPDIR:=/tmp}"
: "${SB_SCRATCH_DIR:=$TMPDIR}"
: "${SB_KEEP_CACHE:=false}"
: "${LABEL:=IMAGE}"
moniker="SIGNTOOL"
test ! -d "$SB_SCRATCH_DIR" && die "Scratch directory not found: $SB_SCRATCH_DIR"
TOPDIR=$(ls -1dt "$SB_SCRATCH_DIR"/${moniker}_* 2>/dev/null | head -1)
if [ "$TOPDIR" ]; then
buildID="${TOPDIR##*/}"
timestamp="${buildID##*_}"
echo "--> $P: Using existing cache dir: $TOPDIR, created: $(get_date_string "$timestamp")"
else
buildID="${moniker}_$(date +%s)"
TOPDIR="$SB_SCRATCH_DIR/$buildID"
echo "--> $P: Creating new cache dir: $TOPDIR"
mkdir "$TOPDIR"
fi
T="$TOPDIR/$LABEL"
if [ -d "$T" ]; then
echo "--> $P: Using existing cache subdir: $T"
else
echo "--> $P: Creating new cache subdir: $T"
mkdir "$T"
fi
# Set a scratch file for output, if none provided.
if [ -z "$OUTPUT" ] || [ "$OUTPUT" == __none ]
then
OUTPUT="$SB_SCRATCH_DIR/$(to_lower "$buildID").scratch.out.img"
OUTPUT_SCRATCH=true
else
OUTPUT_SCRATCH=false
fi
#
# If --archiveOut requested, construct the path and check it now
#
if [ "$SB_ARCHIVE_OUT" ]; then
path=$SB_ARCHIVE_OUT
test "$path" == . && path=${PWD}/
if ! is_path_full "$path"; then
# Path is a relative path, prepend PWD
path=${PWD}/${path}
fi
if is_path_dir "$path"; then
# Path is a directory, append default filename
path=${path}$(to_lower "$buildID")_${LABEL}.tgz
fi
test ! -d "${path%/*}" && die "archiveOut directory not found: ${path%/*}/"
SB_ARCHIVE_OUT=$path
fi
#
# If --archiveIn requested, import the file(s) now
#
if [ "$SB_ARCHIVE_IN" ]
then
IFS=","
for f in $SB_ARCHIVE_IN
do
f="${f# }"; f="${f% }" # strip leading or trailing space
importArchive "$f"
done
unset IFS
fi
#
# Set arguments for (program) execution
#
test "$SB_VERBOSE" && DEBUG_ARGS=" -v"
test "$SB_DEBUG" && DEBUG_ARGS="$DEBUG_ARGS -d"
test "$SB_WRAP" && DEBUG_ARGS="$DEBUG_ARGS -w $SB_WRAP"
test "$HW_FLAGS" && ADDL_ARGS="$ADDL_ARGS --hw-flags $HW_FLAGS"
test "$CS_OFFSET" && ADDL_ARGS="$ADDL_ARGS --sw-cs-offset $CS_OFFSET"
test "$LABEL" && ADDL_ARGS="$ADDL_ARGS --label $LABEL"
test "$SB_CONTR_HDR_OUT" && CONTR_HDR_OUT_OPT="--dumpContrHdr"
test "$SB_VERBOSE" && SF_DEBUG_ARGS=" -v"
test "$SB_DEBUG" && SF_DEBUG_ARGS="$SF_DEBUG_ARGS -d -stdout"
#
# Set defaults for signframework project basenames
#
if [ "$SIGN_MODE" == "production" ]
then
: "${SF_HW_SIGNING_PROJECT_BASE:=sign_ecc_pwr_hw_key}"
: "${SF_FW_SIGNING_PROJECT_BASE:=sign_ecc_pwr_fw_key_op_bld}"
: "${SF_GETPUBKEY_PROJECT_BASE:=getpubkeyecc}"
fi
#
# Set defaults for PKCS11
#
: "${SB_PKCS11_MODULE:=/usr/lib64/pkcs11/libsofthsm2.so}"
: "${SB_PKCS11_TOKEN:=P9Signing}"
#
# Get the public keys
#
if [ "$SIGN_MODE" == "local" ] || [ "$SIGN_MODE" == "independent" ]
then
for KEY in a b c; do
# This will evaluate the value of HW_KEY_A, HW_KEY_B, HW_KEY_C
varname=HW_KEY_$(to_upper $KEY); KEYFILE=${!varname}
# Handle the special values, or empty value
test -z "$KEYFILE" && continue
test "$KEYFILE" == __skip && continue
if [ "$KEYFILE" == __get ] || [ "$KEYFILE" == __getkey ]
then
# We expect a key of of this signing project to be imported.
test -z "$SF_HW_SIGNING_PROJECT_BASE" && \
die "__get or __getkey requested but no project basename provided for HW key $(to_upper $KEY)."
SF_PROJECT=${SF_HW_SIGNING_PROJECT_BASE}_${KEY}
KEYFILE_BASE=project.$SF_PROJECT.HW_key_$KEY
KEYFILE=$(findArtifact "$KEYFILE_BASE.pub" "$KEYFILE_BASE.raw")
if [ "$KEYFILE" ]; then
test "$SB_VERBOSE" && msg=" ($KEYFILE)"
echo "--> $P: Found key for HW key $(to_upper $KEY).${msg}"
KEYFILE="$T/$KEYFILE"
else
die "__get or __getkey requested but no imported key found for HW key $(to_upper $KEY)."
fi
else
# The user provided KEYFILE should point to file on disk.
# Copy the pubkey to the cache.
if [ -f "$KEYFILE" ]; then
if is_private_key "$KEYFILE"; then
openssl ec -in "$KEYFILE" -pubout -out "$T/HW_key_$KEY.pub" &>/dev/null
elif is_public_key "$KEYFILE"; then
cp -p "$KEYFILE" "$T/HW_key_$KEY.pub"
elif is_raw_key "$KEYFILE"; then
cp -p "$KEYFILE" "$T/HW_key_$KEY.raw"
fi
fi
fi
# Add to HW_KEY_ARGS
HW_KEY_ARGS="$HW_KEY_ARGS -$KEY $KEYFILE"
done
for KEY in p q r; do
# Find the value of SW_KEY_P, SW_KEY_Q, SW_KEY_R
varname=SW_KEY_$(to_upper $KEY); KEYFILE=${!varname}
# Handle the special values, or empty value
test -z "$KEYFILE" && break
test "$KEYFILE" == __skip && break
if [ "$KEYFILE" == __get ] || [ "$KEYFILE" == __getkey ]
then
# We expect a key of of this signing project to be imported.
test -z "$SF_FW_SIGNING_PROJECT_BASE" && \
die "__get or __getkey requested but no project basename provided for SW key $(to_upper $KEY)."
SF_PROJECT=${SF_FW_SIGNING_PROJECT_BASE}_${KEY}
KEYFILE_BASE=project.$SF_PROJECT.SW_key_$KEY
KEYFILE=$(findArtifact "$KEYFILE_BASE.pub" "$KEYFILE_BASE.raw")
if [ "$KEYFILE" ]; then
test "$SB_VERBOSE" && msg=" ($KEYFILE)"
echo "--> $P: Found key for SW key $(to_upper $KEY).${msg}"
KEYFILE="$T/$KEYFILE"
else
die "__get or __getkey requested but no imported key found for SW key $(to_upper $KEY)."
fi
else
# The user provided KEYFILE should point to file on disk.
# Copy the pubkey to the cache.
if [ -f "$KEYFILE" ]; then
if is_private_key "$KEYFILE"; then
openssl ec -in "$KEYFILE" -pubout -out "$T/SW_key_$KEY.pub" &>/dev/null
elif is_public_key "$KEYFILE"; then
cp -p "$KEYFILE" "$T/SW_key_$KEY.pub"
elif is_raw_key "$KEYFILE"; then
cp -p "$KEYFILE" "$T/SW_key_$KEY.raw"
fi
fi
fi
# Add to SW_KEY_ARGS
SW_KEY_ARGS="$SW_KEY_ARGS -$KEY $KEYFILE"
done
elif [ "$SIGN_MODE" == "production" ]
then
for KEY in a b c; do
varname=HW_KEY_$(to_upper $KEY); KEYFILE=${!varname}
# Handle the special values, or empty value
test -z "$KEYFILE" && continue
test "$KEYFILE" == __skip && continue
test "$KEYFILE" == __getsig && continue
# TODO: Add full support for user-specified keys in Production mode.
# Currently we use it only to check if __skip was specified.
SF_PROJECT=${SF_HW_SIGNING_PROJECT_BASE}_${KEY}
KEYFILE_BASE=project.$SF_PROJECT.HW_key_$KEY
KEYFILE=$(findArtifact "$KEYFILE_BASE.pub" "$KEYFILE_BASE.raw")
if [ "$KEYFILE" ]; then
test "$SB_VERBOSE" && msg=" ($KEYFILE)"
echo "--> $P: Found key for HW key $(to_upper $KEY).${msg}"
else
# No key found, request one.
echo "--> $P: Requesting public key for HW key $(to_upper $KEY)..."
if [ "$KMS" == "signframework" ]
then
# Output is pubkey in raw format
KEYFILE="$KEYFILE_BASE.raw"
sf_client $SF_DEBUG_ARGS -project "$SF_GETPUBKEY_PROJECT_BASE" \
-param "-signproject $SF_PROJECT" \
-epwd "$SF_EPWD" -comments "Requesting $SF_PROJECT" \
-url sftp://$SF_USER@$SF_SERVER -pkey "$SF_SSHKEY" \
-o "$T/$KEYFILE"
elif [ "$KMS" == "pkcs11" ]
then
# Output is pubkey in PEM format
KEYFILE="$KEYFILE_BASE.pub"
pkcs11-tool --module $SB_PKCS11_MODULE \
--token-label $SB_PKCS11_TOKEN \
--read-object --type pubkey --label $SF_PROJECT | \
openssl ec -inform der -pubin -pubout -out "$T/$KEYFILE" &>/dev/null
else
die "Unsupported KMS: $KMS"
fi
rc=$?
test $rc -ne 0 && die "Call to KMS client failed with error: $rc"
test "$(find "$T" -name $KEYFILE)" || \
die "Unable to retrieve HW key $(to_upper $KEY)."
echo "--> $P: Retrieved public key for HW key $(to_upper $KEY)."
fi
# Add to HW_KEY_ARGS
HW_KEY_ARGS="$HW_KEY_ARGS -$KEY $T/$KEYFILE"
done
for KEY in p q r; do
varname=SW_KEY_$(to_upper $KEY); KEYFILE=${!varname}
# Handle the special values, or empty value
test -z "$KEYFILE" && break
test "$KEYFILE" == __skip && break
test "$KEYFILE" == __getsig && continue
SF_PROJECT=${SF_FW_SIGNING_PROJECT_BASE}_${KEY}
KEYFILE_BASE=project.$SF_PROJECT.SW_key_$KEY
KEYFILE=$(findArtifact "$KEYFILE_BASE.pub" "$KEYFILE_BASE.raw")
if [ "$KEYFILE" ]; then
test "$SB_VERBOSE" && msg=" ($KEYFILE)"
echo "--> $P: Found key for SW key $(to_upper $KEY).${msg}"
else
# No key found, request one.
echo "--> $P: Requesting public key for SW key $(to_upper $KEY)..."
if [ "$KMS" == "signframework" ]
then
KEYFILE="$KEYFILE_BASE.raw"
sf_client $SF_DEBUG_ARGS -project "$SF_GETPUBKEY_PROJECT_BASE" \
-param "-signproject $SF_PROJECT" \
-epwd "$SF_EPWD" -comments "Requesting $SF_PROJECT" \
-url sftp://$SF_USER@$SF_SERVER -pkey "$SF_SSHKEY" \
-o "$T/$KEYFILE"
elif [ "$KMS" == "pkcs11" ]
then
KEYFILE="$KEYFILE_BASE.pub"
pkcs11-tool --module $SB_PKCS11_MODULE \
--token-label $SB_PKCS11_TOKEN \
--read-object --type pubkey --label $SF_PROJECT | \
openssl ec -inform der -pubin -pubout -out "$T/$KEYFILE" &>/dev/null
fi
rc=$?
test $rc -ne 0 && die "Call to KMS client failed with error: $rc"
test "$(find "$T" -name $KEYFILE)" || \
die "Unable to retrieve SW key $(to_upper $KEY)."
echo "--> $P: Retrieved public key for SW key $(to_upper $KEY)."
fi
# Add to SW_KEY_ARGS
SW_KEY_ARGS="$SW_KEY_ARGS -$KEY $T/$KEYFILE"
done
elif [ "$SIGN_MODE" ]
then
die "Unsupported mode: $SIGN_MODE"
fi
#
# Build enough of the container to create the Prefix and Software headers
#
if [ "$SIGN_MODE" == "independent" ] && [ "$SB_ARCHIVE_IN" ]
then
echo "--> $P: Attempting to re-use existing signing requests..."
# TODO: check that prefix_hdr and software_hdr files are available...
else
echo "--> $P: Generating signing requests..."
create-container $HW_KEY_ARGS $SW_KEY_ARGS \
--payload "$PAYLOAD" --imagefile "$OUTPUT" \
--dumpPrefixHdr "$T/prefix_hdr" \
--dumpSwHdr "$T/software_hdr" \
$DEBUG_ARGS \
$ADDL_ARGS
rc=$?
test $rc -ne 0 && die "Call to create-container failed with error: $rc"
fi
#
# Prepare the HW and SW key signatures
#
FOUND=""
if [ "$SIGN_MODE" == "local" ] || [ "$SIGN_MODE" == "independent" ]
then
for KEY in a b c; do
varname=HW_KEY_$(to_upper $KEY); KEYFILE=${!varname}
# Handle the special values, or empty value
test -z "$KEYFILE" && continue
test "$KEYFILE" == __skip && continue
if [ "$KEYFILE" == __get ] || [ "$KEYFILE" == __getsig ]
then
# We expect a sig of of this signing project to be imported.
test -z "$SF_HW_SIGNING_PROJECT_BASE" && \
die "__get or __getsig requested but no project basename provided for HW key $(to_upper $KEY)."
SF_PROJECT=${SF_HW_SIGNING_PROJECT_BASE}_${KEY}
SIGFILE_BASE=project.$SF_PROJECT.HW_sig_$KEY
SIGFILE=$(findArtifact "$SIGFILE_BASE.sig" "$SIGFILE_BASE.raw")
if [ "$SIGFILE" ]; then
test "$SB_VERBOSE" && msg=" ($SIGFILE)"
echo "--> $P: Found sig for HW key $(to_upper $KEY).${msg}"
else
die "__get or __getsig requested but no imported sig found for HW key $(to_upper $KEY)."
fi
FOUND="${FOUND}$(to_upper $KEY),"
HW_SIG_ARGS="$HW_SIG_ARGS -$(to_upper $KEY) $T/$SIGFILE"
continue
fi
# Look for signature in the local cache dir.
SIGFILE=HW_key_$KEY.sig
if [ -f "$T/$SIGFILE" ]
then
test "$SB_VERBOSE" && msg=" ($SIGFILE)"
echo "--> $P: Found signature for HW key $(to_upper $KEY).${msg}"
else
# Check elsewhere in the cache.
if [ "$SIGN_MODE" == "independent" ] && [ "$SB_ARCHIVE_IN" ]
then
SIGFOUND=$(find "$TOPDIR" -type f -name $SIGFILE | head -1)
else
SIGFOUND=""
fi
if [ "$SIGFOUND" ]
then
test "$SB_VERBOSE" && msg=" ($SIGFILE)"
echo "--> $P: Found signature for HW key $(to_upper $KEY).${msg}"
cp -p "$SIGFOUND" "$T/"
else
# If no signature found, try to generate one.
if [ -f "$KEYFILE" ] && is_private_key "$KEYFILE"
then
echo "--> $P: Generating signature for HW key $(to_upper $KEY)..."
openssl dgst -SHA512 -sign "$KEYFILE" "$T/prefix_hdr" > "$T/$SIGFILE"
rc=$?
test $rc -ne 0 && die "Call to openssl failed with error: $rc"
else
echo "--> $P: No signature found and no private key available for HW key $(to_upper $KEY), skipping."
continue
fi
fi
fi
FOUND="${FOUND}$(to_upper $KEY),"
HW_SIG_ARGS="$HW_SIG_ARGS -$(to_upper $KEY) $T/$SIGFILE"
done
for KEY in p q r; do
SIGFILE=SW_key_$KEY.sig
varname=SW_KEY_$(to_upper $KEY); KEYFILE=${!varname}
# Handle the special values, or empty value
test -z "$KEYFILE" && break
test "$KEYFILE" == __skip && break
# Look for a signature in the local cache dir, if found use it.
# (but never reuse a sig for SBKT, the payload is always regenerated)
if [ -f "$T/$SIGFILE" ] && \
[ "$(to_upper "$LABEL")" != SBKT ] && \
[ "$(to_upper "$LABEL")" != SBKTRAND ]
then
test "$SB_VERBOSE" && msg=" ($SIGFILE)"
echo "--> $P: Found signature for SW key $(to_upper $KEY).${msg}"
elif test -f "$KEYFILE" && is_private_key "$KEYFILE"
then
# No signature found, try to generate one.
echo "--> $P: Generating signature for SW key $(to_upper $KEY)..."
openssl dgst -SHA512 -sign "$KEYFILE" "$T/software_hdr" > "$T/$SIGFILE"
rc=$?
test $rc -ne 0 && die "Call to openssl failed with error: $rc"
else
echo "--> $P: No signature found and no private key available for SW key $(to_upper $KEY), skipping."
continue
fi
FOUND="${FOUND}$(to_upper $KEY),"
SW_SIG_ARGS="$SW_SIG_ARGS -$(to_upper $KEY) $T/$SIGFILE"
done
elif [ "$SIGN_MODE" == "production" ]
then
for KEY in a b c; do
varname=HW_KEY_$(to_upper $KEY); KEYFILE=${!varname}
# Handle the special values, or empty value
test -z "$KEYFILE" && continue
test "$KEYFILE" == __skip && continue
# TODO: Add full support for user-specified keys in Production mode.
# Currently we use it only to check if __skip or __getkey was specified.
SF_PROJECT=${SF_HW_SIGNING_PROJECT_BASE}_${KEY}
SIGFILE_BASE=project.$SF_PROJECT.HW_sig_$KEY
SIGFILE=$(findArtifact "$SIGFILE_BASE.sig" "$SIGFILE_BASE.raw")
if [ "$SIGFILE" ]; then
test "$SB_VERBOSE" && msg=" ($SIGFILE)"
echo "--> $P: Found sig for HW key $(to_upper $KEY).${msg}"
else
# No signature found, request one.
test "$KEYFILE" == __getkey && break # (unless instructed not to)
echo "--> $P: Requesting signature for HW key $(to_upper $KEY)..."
if [ "$KMS" == "signframework" ]
then
# Output is signature in raw format
SIGFILE="$SIGFILE_BASE.raw"
sf_client $SF_DEBUG_ARGS -project $SF_PROJECT -epwd "$SF_EPWD" \
-comments "Requesting sig for $SF_PROJECT" \
-url sftp://$SF_USER@$SF_SERVER -pkey "$SF_SSHKEY" \
-payload "$T/prefix_hdr" -o "$T/$SIGFILE"
elif [ "$KMS" == "pkcs11" ]
then
# Output is signature in DER format
SIGFILE="$SIGFILE_BASE.sig"
/bin/openssl dgst -engine pkcs11 -keyform engine \
-sign "pkcs11:token=$SB_PKCS11_TOKEN;object=$SF_PROJECT" \
-sha512 -out "$T/$SIGFILE" "$T/prefix_hdr"
fi
rc=$?
test $rc -ne 0 && die "Call to KMS client failed with error: $rc"
test "$(find "$T" -name $SIGFILE)" || \
die "Unable to retrieve sig for HW key $(to_upper $KEY)."
echo "--> $P: Retrieved signature for HW key $(to_upper $KEY)."
fi
FOUND="${FOUND}$(to_upper $KEY),"
HW_SIG_ARGS="$HW_SIG_ARGS -$(to_upper $KEY) $T/$SIGFILE"
done
for KEY in p q r; do
varname=SW_KEY_$(to_upper $KEY); KEYFILE=${!varname}
# Handle the special values, or empty value
test -z "$KEYFILE" && break
test "$KEYFILE" == __skip && break
SF_PROJECT=${SF_FW_SIGNING_PROJECT_BASE}_${KEY}
SIGFILE_BASE=project.$SF_PROJECT.SW_sig_$KEY
# Look for a signature in the local cache dir, if found use it.
# (but never reuse a sig for SBKT, the payload is always regenerated)
if [ "$(to_upper "$LABEL")" == SBKT ] || \
[ "$(to_upper "$LABEL")" == SBKTRAND ]
then
SIGFILE=""
else
SIGFILE=$(findArtifact "./$SIGFILE_BASE.sig" "./$SIGFILE_BASE.raw")
fi
if [ "$SIGFILE" ]; then
test "$SB_VERBOSE" && msg=" ($SIGFILE)"
echo "--> $P: Found sig for SW key $(to_upper $KEY).${msg}"
else
# No signature found, request one.
test "$KEYFILE" == __getkey && break # (unless instructed not to)
echo "--> $P: Requesting signature for SW key $(to_upper $KEY)..."
if [ "$KMS" == "signframework" ]
then
# Output is signature in raw format
SIGFILE="$SIGFILE_BASE.raw"
sf_client $SF_DEBUG_ARGS -project $SF_PROJECT -epwd "$SF_EPWD" \
-comments "Requesting sig for $LABEL from $SF_PROJECT" \
-url sftp://$SF_USER@$SF_SERVER -pkey "$SF_SSHKEY" \
-payload "$T/software_hdr.md.bin" -o "$T/$SIGFILE"
elif [ "$KMS" == "pkcs11" ]
then
# Output is signature in DER format
SIGFILE="$SIGFILE_BASE.sig"
/bin/openssl dgst -engine pkcs11 -keyform engine \
-sign "pkcs11:token=$SB_PKCS11_TOKEN;object=$SF_PROJECT" \
-sha512 -out "$T/$SIGFILE" "$T/software_hdr"
fi
rc=$?
test $rc -ne 0 && die "Call to KMS client failed with error: $rc"
test "$(find "$T" -name $SIGFILE)" || \
die "Unable to retrieve sig for SW key $(to_upper $KEY)."
echo "--> $P: Retrieved signature for SW key $(to_upper $KEY)."
fi
FOUND="${FOUND}$(to_upper $KEY),"
SW_SIG_ARGS="$SW_SIG_ARGS -$(to_upper $KEY) $T/$SIGFILE"
done
fi
#
# Build the full container
#
if [ "$HW_SIG_ARGS" ] || [ "$SW_SIG_ARGS" ]; then
echo "--> $P: Have signatures for keys $FOUND adding to container..."
create-container $HW_KEY_ARGS $SW_KEY_ARGS \
$HW_SIG_ARGS $SW_SIG_ARGS \
--payload "$PAYLOAD" --imagefile "$OUTPUT" \
$DEBUG_ARGS $ADDL_ARGS \
$CONTR_HDR_OUT_OPT "$SB_CONTR_HDR_OUT"
rc=$?
test $rc -ne 0 && die "Call to create-container failed with error: $rc"
test "$SB_CONTR_HDR_OUT" && \
echo "--> $P: Container header saved to: $SB_CONTR_HDR_OUT"
else
echo "--> $P: No signatures available."
fi
echo "--> $P: Container $LABEL build completed."
#
# Export archive
#
test "$SB_ARCHIVE_OUT" && exportArchive "$SB_ARCHIVE_OUT"
#
# Validate, verify the container
#
if [ "$(to_upper $SB_VALIDATE)" != Y ] && \
[ "$(to_upper $SB_VALIDATE)" != TRUE ]
then
SB_VALIDATE=""
fi
if [ "$(to_upper $SB_PASS_ON_ERROR)" != Y ] && \
[ "$(to_upper $SB_PASS_ON_ERROR)" != TRUE ]
then
SB_PASS_ON_ERROR=""
fi
if [ "$(to_upper "$LABEL")" == SBKTRAND ]
then
# Key transition container may have its own verify value
test "$SB_VERIFY_TRANS" && SB_VERIFY=$SB_VERIFY_TRANS
fi
test "$SB_VALIDATE" && VALIDATE_OPT="--validate"
test "$SB_VERIFY" && VERIFY_OPT="--verify" && VERIFY_ARGS="$SB_VERIFY"
if [ "$VALIDATE_OPT" ] || [ "$VERIFY_OPT" ]; then
echo
print-container --imagefile "$OUTPUT" --no-print \
$DEBUG_ARGS $VALIDATE_OPT $VERIFY_OPT "$VERIFY_ARGS"
test $? -ne 0 && test -z $SB_PASS_ON_ERROR && RC=1
fi
#
# Cleanup
#
if [ $SB_KEEP_CACHE == false ]; then
test "$SB_VERBOSE" && \
echo "--> $P: Removing cache dir: $TOPDIR"
rm -rf "$TOPDIR"
fi
if [ $OUTPUT_SCRATCH == true ]; then
rm "$OUTPUT"
fi
exit $RC
|
hellerda/sb-signing-utils
|
crtSignedContainer.sh
|
Shell
|
apache-2.0
| 37,556 |
#!/bin/bash
#
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0(the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Installs an Android zip file to a directory
set -e
usage() {
echo cat build.zip \| $0 "\${dir} [ -- file_to_extract [another_file]... ]"
echo or
echo $0 build-zip "\${dir} [ -- file_to_extract [another_file]... ]"
}
# sanitize input to treat everything after '--' as files to be extracted
idx=0
for arg in $@; do
if [[ "$arg" == "--" ]]; then break; fi
idx=$((idx+1))
done
files_to_extract=${@:(($idx+2))}
set -- ${@:1:$idx}
case $# in
1)
source=-
destdir="$1"
;;
2)
source="$1"
destdir="$2"
sparse=-S
;;
*)
usage
exit 2
;;
esac
mkdir -p "${destdir}"
bsdtar -x -C "${destdir}" -f "${source}" ${sparse} ${files_to_extract}
if [[ " ${files_to_extract[*]} " == *" boot.img "* ]]; then
/usr/lib/cuttlefish-common/bin/unpack_boot_image.py -boot_img "${destdir}/boot.img" -dest "${destdir}"
fi
find "${destdir}" -name "*.img" -exec sh -c '
img="$0"
file "$img" | grep "Android sparse image," -q \
&& simg2img "$img" "$img.inflated" \
&& mv "$img.inflated" "$img"
' {} ';'
exit 0
|
google/android-cuttlefish
|
host/deploy/install_zip.sh
|
Shell
|
apache-2.0
| 1,686 |
module load global/cluster
spherepath=$1
datapath=$2
resultspath=$3
homepath=$4
nb_spheres=`cat $resultspath/nb_spheres.txt`
for i in `seq $nb_spheres`;
do
X=`awk "NR=="$i $resultspath/centresX.txt`
Y=`awk "NR=="$i $resultspath/centresY.txt`
Z=`awk "NR=="$i $resultspath/centresZ.txt`
R=`awk "NR=="$i $resultspath/radii.txt`
qsub -N job3 -pe smp 2 -j y -t $i -tc 10 $homepath/selector.sh $X $Y $Z $R $spherepath/sphere$i $datapath $homepath $resultspath
done
|
DiamondLightSource/auto_tomo_calibration-experimental
|
measure_resolution/selector_loop.sh
|
Shell
|
apache-2.0
| 468 |
#!/usr/bin/env bash
docker build -t buildbox .
docker run -i --rm \
-v "$(pwd)":/workspace \
--user "$(id -u):$(id -g)" \
-e DISPLAY=unix:0.0 \
-v /tmp/.X11-unix:/tmp/.X11-unix:ro \
buildbox \
bash -c "Xvfb :1 -screen 0 1024x768x16 &> xvfb.log & DISPLAY=:1.0 mvn clean install -Dmaven.repo.local=/workspace/.m2"
|
marc-christian-schulze/structs4java
|
build.sh
|
Shell
|
apache-2.0
| 338 |
#!/bin/bash
i=0
while [ $i -le 50 ]
do
dd if=/dev/zero of='file'$i'00k.blob' bs=$(( i * 102400 )) count=1
i=$[$i+5]
done
|
mixisbad/qos
|
apps/qos/nweb/gen_files/gen.sh
|
Shell
|
apache-2.0
| 128 |
# -----------------------------------------------------------------------------
#
# Package : pyasn1_modules
# Version : 0.2.8
# Source repo : https://github.com/etingof/pyasn1-modules
# Tested on : RHEL 8.3
# Script License: Apache License, Version 2 or later
# Maintainer : BulkPackageSearch Automation <[email protected]>
#
# Disclaimer: This script has been tested in root mode on given
# ========== platform using the mentioned version of the package.
# It may not work as expected with newer versions of the
# package and/or distribution. In such case, please
# contact "Maintainer" of this script.
#
# ----------------------------------------------------------------------------
PACKAGE_NAME=pyasn1_modules
PACKAGE_VERSION=0.2.8
PACKAGE_URL=https://github.com/etingof/pyasn1-modules
yum -y update && yum install -y python38 python38-devel python39 python39-devel python2 python2-devel python3 python3-devel ncurses git gcc gcc-c++ libffi libffi-devel sqlite sqlite-devel sqlite-libs python3-pytest make cmake
OS_NAME=`python3 -c "os_file_data=open('/etc/os-release').readlines();os_info = [i.replace('PRETTY_NAME=','').strip() for i in os_file_data if i.startswith('PRETTY_NAME')];print(os_info[0])"`
SOURCE=Github
pip3 install -r /home/tester/output/requirements.txt
pip3 freeze > /home/tester/output/available_packages.txt
PACKAGE_INFO=`cat available_packages.txt | grep $PACKAGE_NAME`
HOME_DIR=`pwd`
if ! test -z "$PACKAGE_INFO"; then
PACKAGE_VERSION=$(echo $PACKAGE_INFO | cut -d "=" -f 3)
SOURCE="Distro"
echo "------------------$PACKAGE_NAME:install_and_test_both_success-------------------------"
echo $PACKAGE_INFO
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | $SOURCE | Pass | Both_Install_and_Test_Success"
exit 0
fi
function build_test_with_python2(){
SOURCE="Python 2.7"
cd $HOME_DIR/$PACKAGE_NAME
if ! python2 setup.py install; then
echo "------------------$PACKAGE_NAME:install_fails-------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME "
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | $SOURCE | Fail | Install_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! python2 setup.py test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME "
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | $SOURCE | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_and_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME "
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | $SOURCE | Pass | Both_Install_and_Test_Success"
exit 0
fi
}
if ! git clone $PACKAGE_URL $PACKAGE_NAME; then
echo "------------------$PACKAGE_NAME:clone_fails---------------------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME"
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | $SOURCE | Fail | Clone_Fails"
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
git checkout $PACKAGE_VERSION
if ! python3 setup.py install; then
build_test_with_python2
exit 0
fi
cd $HOME_DIR/$PACKAGE_NAME
if ! python3 setup.py test; then
echo "------------------$PACKAGE_NAME:install_success_but_test_fails---------------------"
echo "$PACKAGE_URL $PACKAGE_NAME "
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | $SOURCE | Fail | Install_success_but_test_Fails"
exit 0
else
echo "------------------$PACKAGE_NAME:install_and_test_both_success-------------------------"
echo "$PACKAGE_URL $PACKAGE_NAME "
echo "$PACKAGE_NAME | $PACKAGE_URL | $PACKAGE_VERSION | $OS_NAME | $SOURCE | Pass | Both_Install_and_Test_Success"
exit 0
fi
|
ppc64le/build-scripts
|
p/pyasn1_modules/pyasn1_modules_rhel_8.3.sh
|
Shell
|
apache-2.0
| 3,799 |
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(jbeda): Provide a way to override project
# gcloud multiplexing for shared GCE/GKE tests.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
GCLOUD=gcloud
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
REGION=${ZONE%-*}
RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false}
REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true}
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_NODES=${NUM_NODES:-3}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)}
MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB}
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
KUBE_APISERVER_REQUEST_TIMEOUT=300
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-debian}}
if [[ "${MASTER_OS_DISTRIBUTION}" == "coreos" ]]; then
MASTER_OS_DISTRIBUTION="container-linux"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "coreos" ]]; then
NODE_OS_DISTRIBUTION="container-linux"
fi
if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
MASTER_OS_DISTRIBUTION="gci"
fi
if [[ "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
NODE_OS_DISTRIBUTION="gci"
fi
# By default a cluster will be started with the master on GCI and nodes on
# containervm. If you are updating the containervm version, update this
# variable. Also please update corresponding image for node e2e at:
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
CVM_VERSION=${CVM_VERSION:-container-vm-v20170214}
GCI_VERSION=${KUBE_GCI_VERSION:-gci-beta-56-9000-80-0}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers}
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${CVM_VERSION}}
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-google-containers}
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
GCI_DOCKER_VERSION=${KUBE_GCI_DOCKER_VERSION:-}
RKT_VERSION=${KUBE_RKT_VERSION:-1.23.0}
RKT_STAGE1_IMAGE=${KUBE_RKT_STAGE1_IMAGE:-coreos.com/rkt/stage1-coreos}
NETWORK=${KUBE_GCE_NETWORK:-e2e}
INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-e2e-test-${USER}}"
CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
MASTER_NAME="${INSTANCE_PREFIX}-master"
INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
ETCD_QUORUM_READ="${ENABLE_ETCD_QUORUM_READ:-false}"
MASTER_TAG="${INSTANCE_PREFIX}-master"
NODE_TAG="${INSTANCE_PREFIX}-minion"
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.180.0.0/14}"
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
# Optional: set feature gates
FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100}
# Extra docker options for nodes.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
# Enable the docker debug mode.
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --debug"
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
ENABLE_DOCKER_REGISTRY_CACHE=true
# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests:
# glbc - CE L7 Load Balancer Controller
ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
# Optional: Cluster monitoring to setup as part of the cluster bring up:
# none - No cluster monitoring setup
# influxdb - Heapster, InfluxDB, and Grafana
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
# googleinfluxdb - Enable influxdb and google (except GCM)
# standalone - Heapster only. Metrics available via Heapster REST API.
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# Set etcd image (e.g. 3.0.14-experimental.1) version (e.g. 3.0.14) if you need
# non-default version.
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
ETCD_VERSION="${TEST_ETCD_VERSION:-}"
# Default Log level for all components in test clusters and variables to override it in specific components.
TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}"
KUBELET_TEST_LOG_LEVEL="${KUBELET_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
DOCKER_TEST_LOG_LEVEL="${DOCKER_TEST_LOG_LEVEL:---log-level=info}"
API_SERVER_TEST_LOG_LEVEL="${API_SERVER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
CONTROLLER_MANAGER_TEST_LOG_LEVEL="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=1}"
TEST_CLUSTER_MAX_REQUESTS_INFLIGHT="${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}"
TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:---min-resync-period=3m}"
# ContentType used by all components to communicate with apiserver.
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --max-pods=110 --serialize-image-pulls=false --outofdisk-transition-frequency=0 ${TEST_CLUSTER_API_CONTENT_TYPE}"
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --runtime-config=extensions/v1beta1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}"
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}"
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
# Optional: Enable node logging.
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}"
ELASTICSEARCH_LOGGING_REPLICAS=1
# Optional: Don't require https for registries in our local RFC1918 network
if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry 10.0.0.0/8"
fi
# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.0.0.10"
DNS_DOMAIN="cluster.local"
# Optional: Enable DNS horizontal autoscaler
ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-false}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_DISK:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE_GCE="${CLUSTER_REGISTRY_DISK_TYPE_GCE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
# Optional: Install node problem detector.
ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-true}"
# Optional: Create autoscaler for cluster's nodes.
ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}"
if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-}"
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-}"
AUTOSCALER_ENABLE_SCALE_DOWN="${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-false}"
fi
# Optional: Enable Rescheduler
ENABLE_RESCHEDULER="${KUBE_ENABLE_RESCHEDULER:-true}"
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
ADMISSION_CONTROL="${KUBE_ADMISSION_CONTROL:-NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota}"
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
# Optional: setting it to true denotes this is a testing cluster,
# so that we can use pulled kubernetes binaries, even if binaries
# are pre-installed in the image. Note that currently this logic
# is only supported in trusty or GCI.
TEST_CLUSTER="${TEST_CLUSTER:-true}"
# Storage backend. 'etcd2' and 'etcd3' are supported.
STORAGE_BACKEND=${STORAGE_BACKEND:-}
# Storage media type: application/json and application/vnd.kubernetes.protobuf are supported.
STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-}
# OpenContrail networking plugin specific settings
NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, opencontrail, kubenet
OPENCONTRAIL_TAG="${OPENCONTRAIL_TAG:-R2.20}"
OPENCONTRAIL_KUBERNETES_TAG="${OPENCONTRAIL_KUBERNETES_TAG:-master}"
OPENCONTRAIL_PUBLIC_SUBNET="${OPENCONTRAIL_PUBLIC_SUBNET:-10.1.0.0/16}"
# Network Policy plugin specific settings.
NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
# How should the kubelet configure hairpin mode?
HAIRPIN_MODE="${HAIRPIN_MODE:-promiscuous-bridge}" # promiscuous-bridge, hairpin-veth, none
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
E2E_STORAGE_TEST_ENVIRONMENT=${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}
# Optional: if set to true, a image puller is deployed. Only for use in e2e clusters.
# TODO: Pipe this through GKE e2e clusters once we know it helps.
PREPULL_E2E_IMAGES="${PREPULL_E2E_IMAGES:-true}"
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"
# Optional: custom scheduling algorithm
SCHEDULING_ALGORITHM_PROVIDER="${SCHEDULING_ALGORITHM_PROVIDER:-}"
# Optional: install a default StorageClass
ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-true}"
# TODO(dawn1107): Remove this once the flag is built into CVM image.
# Kernel panic upon soft lockup issue
SOFTLOCKUP_PANIC="${SOFTLOCKUP_PANIC:-true}" # true, false
|
shashidharatd/kubernetes
|
cluster/gce/config-test.sh
|
Shell
|
apache-2.0
| 10,759 |
#! /bin/bash
#
# Copyright (c) 2013-2016 Jens Deters http://www.jensd.de
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
BASE_NAME_DEFAULT="jerady"
DOCKER_RUN_ARGS_DEFAULT="-ti"
VERSION=1.0
function print_dockerw_header {
cat <<EOF
_ _
| | | |
__| | ___ ___| | _____ _ ____ __
/ _' |/ _ \ / __| |/ / _ \ '__\ \ /\ / /
| (_| | (_) | (__| < __/ | \ V V /
\__,_|\___/ \___|_|\_\___|_| \_/\_/
EOF
}
function print_title {
printf '%s\n\e[1m %s \e[m\n%s\n' "${DIVIDER}" "${1}" "${DIVIDER}"
}
function print_done {
printf '\e[1;32mDone.\e[m\n%s\n' "${DIVIDER}"
}
function print_message {
printf "%s: \e[1m%s\e[m\n" "${1}" "${2}"
}
function print_green {
printf '\e[1;32m%s\e[m\n' "${1}"
}
function print_red {
printf '\e[1;31m%s\e[m\n' "${1}"
}
function print_bold {
printf '\e[1m%s\e[m\n' "${1}"
}
function print_env {
echo
print_bold "$(basename $0) environment:"
echo "CONTAINER_NAME = ${CONTAINER_NAME}"
echo "BASE_NAME = ${BASE_NAME}"
echo "IMAGE_NAME = ${IMAGE_NAME}"
echo "DOCKER_RUN_ARGS = ${DOCKER_RUN_ARGS}"
echo "DOCKER_RUN = ${DOCKER_RUN}"
echo
}
function help {
print_dockerw_header
printf '\e[1m%s\e[m%s\n' "$(basename ${0})" ", a simple docker command wrapper for the rest of us"
printf '\e[1m%s\e[m\n' "version ${VERSION}, (c) copyright 2016 Jens Deters / www.jensd.de"
echo
print_message "Usage:" "${0} [ build | run | stop | clean | status | env | help ]"
cat <<EOF
build : builds the docker image
run : runs a docker container based on the image
stop : stops ALL running container based on the image and removes them
clean : stops and removes the image and containers
status : status if the docker image
env : list current environment variables
help : display this help
EOF
}
function build {
print_title "docker BUILD ${IMAGE_NAME}"
build_image ${IMAGE_NAME}
}
function run {
stop
print_title "docker RUN ${IMAGE_NAME}"
print_message "executing" "${DOCKER_RUN}"
${DOCKER_RUN}
}
function stop {
print_title "docker STOP ${CONTAINER_NAME}"
stop_remove_container ${CONTAINER_NAME}
print_done
}
function clean {
stop
print_title "docker CLEAN ${IMAGE_NAME}"
remove_image ${IMAGE_NAME}
print_done
}
function status {
print_title "docker image STATUS ${IMAGE_NAME}"
docker images ${IMAGE_NAME}
}
function build_image {
print_message "attempt to build image" "docker build -t ${1} ."
time docker build -t ${1} .
if [ $? -eq 0 ]
then
status
print_green "Successfully build ${IMAGE_NAME}"
else
print_red "Failed to build ${IMAGE_NAME}"
exit 1
fi
}
function remove_image {
IMAGE=$(docker images | grep "${1}" | awk '{ print $1}')
print_message "attemt to remove image" ${1}
if [ ! -z "${IMAGE}" ]
then
time docker rmi --force ${IMAGE}
else
print_message "no such image" ${1}
fi
}
function stop_remove_container {
CID=$(docker ps -a | grep "${1}" | awk '{ print $1}')
print_message "attempt to stop running containers" ${1}
if [ ! -z "${CID}" ]
then
echo -e "Stopping and removing containers with CID:\n${CID}"
docker stop ${CID}
docker rm ${CID}
else
print_message "no running container found" ${1}
fi
}
function check_env_variables {
if [[ -z ${CONTAINER_NAME} ]]
then
CONTAINER_NAME="$(basename `pwd`)"
fi
if [[ -z ${BASE_NAME} ]]
then
BASE_NAME=${BASE_NAME_DEFAULT}
fi
if [[ -z ${IMAGE_NAME} ]]
then
IMAGE_NAME="${BASE_NAME}/${CONTAINER_NAME}"
fi
if [[ -z ${DOCKER_RUN_ARGS} ]]
then
DOCKER_RUN_ARGS=${DOCKER_RUN_ARGS_DEFAULT}
fi
if [[ -z ${DOCKER_RUN} ]]
then
DOCKER_RUN="docker run ${DOCKER_RUN_ARGS} --name ${CONTAINER_NAME} ${IMAGE_NAME}"
fi
}
#
# MAIN
#
check_env_variables
case "${1}" in
b|build) build ;;
r|run) run ;;
c|clean) clean ;;
s|stop) stop ;;
status) status ;;
e|env) print_env ;;
h|help) help ;;
*) help ;;
esac
|
Jerady/dockerw
|
dockerw/dockerw.sh
|
Shell
|
apache-2.0
| 4,465 |
set -e
cd "$(dirname $0)/SOURCES"
wget -i ../fetch-list
cd ..
sha512sum -c sha512sums.txt
|
CCI-MOC/distro-packages
|
fetch.sh
|
Shell
|
apache-2.0
| 90 |
#!/bin/bash -eux
########################################################################
#
# title: Deploy an image for MM DevPaas HeadEnd Component
# author: Marco Maccio (http://www.marmac.name)
# url: http://github.com/marcomaccio/devpaas
# description: Deploy DEVPAAS server as Virtual Box VM
#
# to run:
# sh deploy-devpaas-ubuntu-image.sh \
# deployments \
# image_name \
# image_version
#
########################################################################
SECONDS=0
echo " Start: " `date`
DEPLOYMENT_DIR=$1 #ex. $1=~/Development/deployment/vms/mm/devpaas
IMAGE_NAME=$2 #ex. $2=mm-base-ubuntu-server-1604 - omit extension .tar.gz
IMAGE_VERSION=$3 #ex. $3=0.0.1
echo "Deployment dir: $DEPLOYMENT_DIR"
if [ ! -d "$DEPLOYMENT_DIR" ]; then
echo "Create dir: $DEPLOYMENT_DIR"
mkdir -p $DEPLOYMENT_DIR/$IMAGE_NAME
fi
echo "****** Copy image $IMAGE_NAME/$IMAGE_NAME.tar.gz file into the deployment dir $DEPLOYMENT_DIR/ ******"
cp builds/$IMAGE_NAME/$IMAGE_NAME.tar.gz $DEPLOYMENT_DIR/
echo "****** Extract $IMAGE_NAME/$IMAGE_NAME.tar.gz file into the deployment dir ******"
if [ ! -d "$DEPLOYMENT_DIR/$IMAGE_NAME" ]; then
echo "Create dir: $DEPLOYMENT_DIR/$IMAGE_NAME"
mkdir -p $DEPLOYMENT_DIR/$IMAGE_NAME
fi
#cd $DEPLOYMENT_DIR/
echo "****** ls -al $DEPLOYMENT_DIR/"
ls -al $DEPLOYMENT_DIR/
echo "****** ls -al $DEPLOYMENT_DIR/$IMAGE_NAME"
ls -al $DEPLOYMENT_DIR/$IMAGE_NAME
tar -xvf $DEPLOYMENT_DIR/${IMAGE_NAME}.tar.gz -C $DEPLOYMENT_DIR/$IMAGE_NAME
echo "****** Import $DEPLOYMENT_DIR/$IMAGE_NAME/$IMAGE_NAME-$IMAGE_VERSION.ovf file in Virtual Box ******"
ls -al $DEPLOYMENT_DIR/$IMAGE_NAME/
vboxmanage import $DEPLOYMENT_DIR/$IMAGE_NAME/$IMAGE_NAME-$IMAGE_VERSION.ovf
echo "****** List all the VM in Virtual Box ******"
vboxmanage list vms
echo "****** Start the VM in Virtual Box ******"
vboxmanage startvm --type gui $IMAGE_NAME-$IMAGE_VERSION
echo "****** List all running VMs in Virtual Box ******"
vboxmanage list runningvms
echo "******************************************************************************"
echo "******************************************************************************"
echo "****** Show VM info for: $IMAGE_NAME-$IMAGE_VERSION in Virtual Box ******"
vboxmanage showvminfo $IMAGE_NAME-$IMAGE_VERSION
echo "******************************************************************************"
echo "******************************************************************************"
echo "**************************************************************************"
echo "TO POWER OFF: vboxmanage controlvm ${IMAGE_NAME}-${IMAGE_VERSION} poweroff"
echo "**************************************************************************"
duration=$SECONDS
echo "$(($duration / 60)) minutes and $(($duration % 60)) seconds elapsed."
echo " End: " `date`
|
marcomaccio/devpaas
|
devpaas-vm/packer/VBOX-deploy-image-in-vbox.sh
|
Shell
|
apache-2.0
| 2,998 |
#!/usr/bin/env bash
# Copyright 2021 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs conformance tests on a local kind environment.
set -euo pipefail
source $(dirname $0)/../hack/test-env.sh
IPS=( $(kubectl get nodes -lkubernetes.io/hostname!=kind-control-plane -ojsonpath='{.items[*].status.addresses[?(@.type=="InternalIP")].address}') )
CLUSTER_SUFFIX=${CLUSTER_SUFFIX:-cluster.local}
UNSUPPORTED_TESTS=""
# gateway-api CRD must be installed before Istio.
kubectl apply -k "github.com/kubernetes-sigs/gateway-api/config/crd?ref=${GATEWAY_API_VERSION}"
echo ">> Bringing up Istio"
curl -sL https://istio.io/downloadIstioctl | sh -
$HOME/.istioctl/bin/istioctl install -y --set values.gateways.istio-ingressgateway.type=NodePort --set values.global.proxy.clusterDomain="${CLUSTER_SUFFIX}"
echo ">> Deploy Gateway API resources"
kubectl apply -f ./third_party/istio/gateway/
echo ">> Running conformance tests"
go test -race -count=1 -short -timeout=20m -tags=e2e ./test/conformance/ingressv2 \
--enable-alpha --enable-beta \
--skip-tests="${UNSUPPORTED_TESTS}" \
--ingressendpoint="${IPS[0]}" \
--cluster-suffix=$CLUSTER_SUFFIX
|
knative-sandbox/net-gateway-api
|
test/kind-conformance-istio.sh
|
Shell
|
apache-2.0
| 1,687 |
#!/bin/bash
sudo firewall-cmd --list-all
|
zhuwbigdata/hadoop-admin-utils
|
firewall-utils/listAllFW.sh
|
Shell
|
apache-2.0
| 41 |
g++ x.cpp -o x -licuio
|
rfree/misc
|
icu_windows1250/build.sh
|
Shell
|
bsd-2-clause
| 24 |
#!/bin/sh
# setup.sh
# Cumulus
#
# Created by John Clayton on 10/16/11.
# Copyright (c) 2011 Fivesquare Software, LLC. All rights reserved.
ROOT=`dirname __FILE__`
sudo gem install bundle
bundle install --path "$ROOT/vendor/bundle"
|
FivesquareSoftware/Cumulus
|
Tests/Server/setup.sh
|
Shell
|
bsd-3-clause
| 238 |
#!/bin/sh
#
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This script can change key (usually developer keys) and kernel config
# of kernels on an disk image (usually for SSD but also works for USB).
SCRIPT_BASE="$(dirname "$0")"
. "$SCRIPT_BASE/common_minimal.sh"
load_shflags || exit 1
# Constants used by DEFINE_*
VBOOT_BASE='/usr/share/vboot'
DEFAULT_KEYS_FOLDER="$VBOOT_BASE/devkeys"
DEFAULT_BACKUP_FOLDER='/mnt/stateful_partition/backups'
DEFAULT_PARTITIONS='2 4'
# TODO(hungte) The default image selection is no longer a SSD, so the script
# works more like "make_dev_image". We may change the file name in future.
ROOTDEV="$(rootdev -s 2>/dev/null)"
ROOTDEV_PARTITION="$(echo $ROOTDEV | sed -n 's/.*\([0-9][0-9]*\)$/\1/p')"
ROOTDEV_DISK="$(rootdev -s -d 2>/dev/null)"
ROOTDEV_KERNEL="$((ROOTDEV_PARTITION - 1))"
# DEFINE_string name default_value description flag
DEFINE_string image "$ROOTDEV_DISK" "Path to device or image file" "i"
DEFINE_string keys "$DEFAULT_KEYS_FOLDER" "Path to folder of dev keys" "k"
DEFINE_boolean remove_rootfs_verification \
$FLAGS_FALSE "Modify kernel boot config to disable rootfs verification" ""
DEFINE_string backup_dir \
"$DEFAULT_BACKUP_FOLDER" "Path of directory to store kernel backups" ""
DEFINE_string save_config "" \
"Base filename to store kernel configs to, instead of resigning." ""
DEFINE_string set_config "" \
"Base filename to load kernel configs from" ""
DEFINE_string partitions "" \
"List of partitions to examine (default: $DEFAULT_PARTITIONS)" ""
DEFINE_boolean recovery_key "$FLAGS_FALSE" \
"Use recovery key to sign image (to boot from USB" ""
DEFINE_boolean force "$FLAGS_FALSE" "Skip sanity checks and make the change" "f"
# Parse command line
FLAGS "$@" || exit 1
ORIGINAL_PARAMS="$@"
eval set -- "$FLAGS_ARGV"
ORIGINAL_PARTITIONS="$FLAGS_partitions"
: ${FLAGS_partitions:=$DEFAULT_PARTITIONS}
# Globals
# ----------------------------------------------------------------------------
set -e
# a log file to keep the output results of executed command
EXEC_LOG="$(make_temp_file)"
# Functions
# ----------------------------------------------------------------------------
# Removes rootfs verification from kernel boot parameter
remove_rootfs_verification() {
local new_root="PARTUUID=%U/PARTNROFF=1"
echo "$*" | sed '
s| root=/dev/dm-[0-9] | root='"$new_root"' |
s| dm_verity[^=]*=[-0-9]*||g
s| dm="[^"]*"||
s| ro | rw |'
}
remove_legacy_boot_rootfs_verification() {
# See src/scripts/create_legacy_bootloader_templates
local image="$1"
local mount_point="$(make_temp_dir)"
local config_file
debug_msg "Removing rootfs verification for legacy boot configuration."
mount_image_partition "$image" 12 "$mount_point" || return $FLAGS_FALSE
config_file="$mount_point/efi/boot/grub.cfg"
[ ! -f "$config_file" ] ||
sudo sed -i 's/^ *set default=2 *$/set default=0/g' "$config_file"
config_file="$mount_point/syslinux/default.cfg"
[ ! -f "$config_file" ] ||
sudo sed -i 's/-vusb/-usb/g; s/-vhd/-hd/g' "$config_file"
sudo umount "$mount_point"
}
# Wrapped version of dd
mydd() {
# oflag=sync is safer, but since we need bs=512, syncing every block would be
# very slow.
dd "$@" >"$EXEC_LOG" 2>&1 ||
err_die "Failed in [dd $@], Message: $(cat "$EXEC_LOG")"
}
# Prints a more friendly name from kernel index number
cros_kernel_name() {
case $1 in
2)
echo "Kernel A"
;;
4)
echo "Kernel B"
;;
6)
echo "Kernel C"
;;
*)
echo "Partition $1"
esac
}
find_valid_kernel_partitions() {
local part_id
local valid_partitions=""
for part_id in $*; do
local name="$(cros_kernel_name $part_id)"
local kernel_part="$(make_partition_dev "$FLAGS_image" "$part_id")"
if [ -z "$(dump_kernel_config "$kernel_part" 2>"$EXEC_LOG")" ]; then
echo "INFO: $name: no kernel boot information, ignored." >&2
else
[ -z "$valid_partitions" ] &&
valid_partitions="$part_id" ||
valid_partitions="$valid_partitions $part_id"
continue
fi
done
debug_msg "find_valid_kernel_partitions: [$*] -> [$valid_partitions]"
echo "$valid_partitions"
}
# Resigns a kernel on SSD or image.
resign_ssd_kernel() {
# bs=512 is the fixed block size for dd and cgpt
local bs=512
local ssd_device="$1"
# reasonable size for current kernel partition
local min_kernel_size=32000
local max_kernel_size=65536
local resigned_kernels=0
for kernel_index in $FLAGS_partitions; do
local old_blob="$(make_temp_file)"
local new_blob="$(make_temp_file)"
local name="$(cros_kernel_name $kernel_index)"
local rootfs_index="$(($kernel_index + 1))"
debug_msg "Probing $name information"
local offset size
offset="$(partoffset "$ssd_device" "$kernel_index")" ||
err_die "Failed to get partition $kernel_index offset from $ssd_device"
size="$(partsize "$ssd_device" "$kernel_index")" ||
err_die "Failed to get partition $kernel_index size from $ssd_device"
if [ ! $size -gt $min_kernel_size ]; then
echo "INFO: $name seems too small ($size), ignored."
continue
fi
if [ ! $size -le $max_kernel_size ]; then
echo "INFO: $name seems too large ($size), ignored."
continue
fi
debug_msg "Reading $name from partition $kernel_index"
mydd if="$ssd_device" of="$old_blob" bs=$bs skip=$offset count=$size
debug_msg "Checking if $name is valid"
local kernel_config
if ! kernel_config="$(dump_kernel_config "$old_blob" 2>"$EXEC_LOG")"; then
debug_msg "dump_kernel_config error message: $(cat "$EXEC_LOG")"
echo "INFO: $name: no kernel boot information, ignored."
continue
fi
if [ -n "${FLAGS_save_config}" ]; then
# Save current kernel config
local old_config_file
old_config_file="${FLAGS_save_config}.$kernel_index"
echo "Saving $name config to $old_config_file"
echo "$kernel_config" > "$old_config_file"
# Just save; don't resign
continue
fi
if [ -n "${FLAGS_set_config}" ]; then
# Set new kernel config from file
local new_config_file
new_config_file="${FLAGS_set_config}.$kernel_index"
kernel_config="$(cat "$new_config_file")" ||
err_die "Failed to read new kernel config from $new_config_file"
debug_msg "New kernel config: $kernel_config)"
echo "$name: Replaced config from $new_config_file"
fi
if [ ${FLAGS_remove_rootfs_verification} = $FLAGS_FALSE ]; then
debug_msg "Bypassing rootfs verification check"
else
debug_msg "Changing boot parameter to remove rootfs verification"
kernel_config="$(remove_rootfs_verification "$kernel_config")"
debug_msg "New kernel config: $kernel_config"
echo "$name: Disabled rootfs verification."
remove_legacy_boot_rootfs_verification "$ssd_device"
fi
local new_kernel_config_file="$(make_temp_file)"
echo -n "$kernel_config" >"$new_kernel_config_file"
debug_msg "Re-signing $name from $old_blob to $new_blob"
debug_msg "Using key: $KERNEL_DATAKEY"
vbutil_kernel \
--repack "$new_blob" \
--keyblock "$KERNEL_KEYBLOCK" \
--config "$new_kernel_config_file" \
--signprivate "$KERNEL_DATAKEY" \
--oldblob "$old_blob" >"$EXEC_LOG" 2>&1 ||
err_die "Failed to resign $name. Message: $(cat "$EXEC_LOG")"
debug_msg "Creating new kernel image (vboot+code+config)"
local new_kern="$(make_temp_file)"
cp "$old_blob" "$new_kern"
mydd if="$new_blob" of="$new_kern" conv=notrunc
if is_debug_mode; then
debug_msg "for debug purposes, check *.dbgbin"
cp "$old_blob" old_blob.dbgbin
cp "$new_blob" new_blob.dbgbin
cp "$new_kern" new_kern.dbgbin
fi
debug_msg "Verifying new kernel and keys"
vbutil_kernel \
--verify "$new_kern" \
--signpubkey "$KERNEL_PUBKEY" --verbose >"$EXEC_LOG" 2>&1 ||
err_die "Failed to verify new $name. Message: $(cat "$EXEC_LOG")"
debug_msg "Backup old kernel blob"
local backup_date_time="$(date +'%Y%m%d_%H%M%S')"
local backup_name="$(echo "$name" | sed 's/ /_/g; s/^K/k/')"
local backup_file_name="${backup_name}_${backup_date_time}.bin"
local backup_file_path="$FLAGS_backup_dir/$backup_file_name"
if mkdir -p "$FLAGS_backup_dir" &&
cp -f "$old_blob" "$backup_file_path"; then
echo "Backup of $name is stored in: $backup_file_path"
else
echo "WARNING: Cannot create file in $FLAGS_backup_dir... Ignore backups."
fi
debug_msg "Writing $name to partition $kernel_index"
mydd \
if="$new_kern" \
of="$ssd_device" \
seek=$offset \
bs=$bs \
count=$size \
conv=notrunc
resigned_kernels=$(($resigned_kernels + 1))
debug_msg "Make the root file system writable if needed."
# TODO(hungte) for safety concern, a more robust way would be to:
# (1) change kernel config to ro
# (2) check if we can enable rw mount
# (3) change kernel config to rw
if [ ${FLAGS_remove_rootfs_verification} = $FLAGS_TRUE ]; then
local root_offset_sector=$(partoffset "$ssd_device" $rootfs_index)
local root_offset_bytes=$((root_offset_sector * 512))
if ! is_ext2 "$ssd_device" "$root_offset_bytes"; then
debug_msg "Non-ext2 partition: $ssd_device$rootfs_index, skip."
elif ! rw_mount_disabled "$ssd_device" "$root_offset_bytes"; then
debug_msg "Root file system is writable. No need to modify."
else
# disable the RO ext2 hack
debug_msg "Disabling rootfs ext2 RO bit hack"
enable_rw_mount "$ssd_device" "$root_offset_bytes" >"$EXEC_LOG" 2>&1 ||
err_die "Failed turning off rootfs RO bit. OS may be corrupted. " \
"Message: $(cat "$EXEC_LOG")"
fi
fi
# Sometimes doing "dump_kernel_config" or other I/O now (or after return to
# shell) will get the data before modification. Not a problem now, but for
# safety, let's try to sync more.
sync; sync; sync
echo "$name: Re-signed with developer keys successfully."
done
# If we saved the kernel config, exit now so we don't print an error
if [ -n "${FLAGS_save_config}" ]; then
echo "(Kernels have not been resigned.)"
exit 0
fi
return $resigned_kernels
}
sanity_check_live_partitions() {
debug_msg "Partition sanity check"
if [ "$FLAGS_partitions" = "$ROOTDEV_KERNEL" ]; then
debug_msg "only for current active partition - safe."
return
fi
if [ "$ORIGINAL_PARTITIONS" != "" ]; then
debug_msg "user has assigned partitions - provide more info."
echo "INFO: Making change to $FLAGS_partitions on $FLAGS_image."
return
fi
echo "
ERROR: YOU ARE TRYING TO MODIFY THE LIVE SYSTEM IMAGE $FLAGS_image.
The system may become unusable after that change, especially when you have
some auto updates in progress. To make it safer, we suggest you to only
change the partition you have booted with. To do that, re-execute this command
as:
sudo ./make_dev_ssd.sh $ORIGINAL_PARAMS --partitions $ROOTDEV_KERNEL
If you are sure to modify other partition, please invoke the command again and
explicitly assign only one target partition for each time (--partitions N )
"
return $FLAGS_FALSE
}
sanity_check_live_firmware() {
debug_msg "Firmware compatibility sanity check"
if [ "$(crossystem mainfw_type)" = "developer" ]; then
debug_msg "developer type firmware in active."
return
fi
debug_msg "Loading firmware to check root key..."
local bios_image="$(make_temp_file)"
local rootkey_file="$(make_temp_file)"
echo "INFO: checking system firmware..."
sudo flashrom -p host -i GBB -r "$bios_image" >/dev/null 2>&1
gbb_utility -g --rootkey="$rootkey_file" "$bios_image" >/dev/null 2>&1
if [ ! -s "$rootkey_file" ]; then
debug_msg "failed to read root key from system firmware..."
else
# The magic 130 is counted by "od dev-rootkey" for the lines until the body
# of key is reached. Trailing bytes (0x00 or 0xFF - both may appear, and
# that's why we need to skip them) are started at line 131.
# TODO(hungte) compare with rootkey in $VBOOT_BASE directly.
local rootkey_hash="$(od "$rootkey_file" |
head -130 | md5sum |
sed 's/ .*$//' )"
if [ "$rootkey_hash" = "a13642246ef93daaf75bd791446fec9b" ]; then
debug_msg "detected DEV root key in firmware."
return
else
debug_msg "non-devkey hash: $rootkey_hash"
fi
fi
echo "
ERROR: YOU ARE NOT USING DEVELOPER FIRMWARE, AND RUNNING THIS COMMAND MAY
THROW YOUR CHROMEOS DEVICE INTO UN-BOOTABLE STATE.
You need to either install developer firmware, or change system root key.
- To install developer firmware: type command
sudo chromeos-firmwareupdate --mode=todev
- To change system rootkey: disable firmware write protection (a hardware
switch) and then type command:
sudo ./make_dev_firmware.sh
If you are sure that you want to make such image without developer
firmware or you've already changed system root keys, please run this
command again with --force paramemeter:
sudo ./make_dev_ssd.sh --force $ORIGINAL_PARAMS
"
return $FLAGS_FALSE
}
# Main
# ----------------------------------------------------------------------------
main() {
local num_signed=0
local num_given=$(echo "$FLAGS_partitions" | wc -w)
# Check parameters
if [ "$FLAGS_recovery_key" = "$FLAGS_TRUE" ]; then
KERNEL_KEYBLOCK="$FLAGS_keys/recovery_kernel.keyblock"
KERNEL_DATAKEY="$FLAGS_keys/recovery_kernel_data_key.vbprivk"
KERNEL_PUBKEY="$FLAGS_keys/recovery_key.vbpubk"
else
KERNEL_KEYBLOCK="$FLAGS_keys/kernel.keyblock"
KERNEL_DATAKEY="$FLAGS_keys/kernel_data_key.vbprivk"
KERNEL_PUBKEY="$FLAGS_keys/kernel_subkey.vbpubk"
fi
debug_msg "Prerequisite check"
ensure_files_exist \
"$KERNEL_KEYBLOCK" \
"$KERNEL_DATAKEY" \
"$KERNEL_PUBKEY" \
"$FLAGS_image" ||
exit 1
# checks for running on a live system image.
if [ "$FLAGS_image" = "$ROOTDEV_DISK" ]; then
debug_msg "check valid kernel partitions for live system"
local valid_partitions="$(find_valid_kernel_partitions $FLAGS_partitions)"
[ -n "$valid_partitions" ] ||
err_die "No valid kernel partitions on $FLAGS_image ($FLAGS_partitions)."
FLAGS_partitions="$valid_partitions"
# Sanity checks
if [ "$FLAGS_force" = "$FLAGS_TRUE" ]; then
echo "
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
! INFO: ALL SANITY CHECKS WERE BYPASSED. YOU ARE ON YOUR OWN. !
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
" >&2
local i
for i in $(seq 5 -1 1); do
echo -n "\rStart in $i second(s) (^C to abort)... " >&2
sleep 1
done
echo ""
elif ! sanity_check_live_firmware ||
! sanity_check_live_partitions; then
err_die "IMAGE $FLAGS_image IS NOT MODIFIED."
fi
fi
resign_ssd_kernel "$FLAGS_image" || num_signed=$?
debug_msg "Complete."
if [ $num_signed -gt 0 -a $num_signed -le $num_given ]; then
# signed something at least
echo "Successfully re-signed $num_signed of $num_given kernel(s)" \
" on device $FLAGS_image".
else
err_die "Failed re-signing kernels."
fi
}
# People using this to process images may forget to add "-i",
# so adding parameter check is safer.
if [ "$#" -gt 0 ]; then
flags_help
err_die "Unknown parameters: $@"
fi
main
|
ccaapton/vboot_reference
|
scripts/image_signing/make_dev_ssd.sh
|
Shell
|
bsd-3-clause
| 15,671 |
#!/bin/sh
set -e
BUILD_SCRIPT_LOCATION=$(cd "$(dirname "$0")"; pwd)
. ${BUILD_SCRIPT_LOCATION}/../jenkins/common.sh
. ${BUILD_SCRIPT_LOCATION}/common.sh
# sanity checks
[ ! -z $WORKSPACE ] || die "WORKSPACE missing"
[ ! -z $CVMFS_BUILD_LOCATION ] || die "CVMFS_BUILD_LOCATION missing"
[ ! -z $CVMFS_SOURCE_LOCATION ] || die "CVMFS_SOURCE_LOCATION missing"
[ ! -z $CVMFS_BUILD_CLEAN ] || die "CVMFS_BUILD_CLEAN missing"
[ ! -z $CERNVM_CI_SCRIPT_LOCATION ] || die "CERNVM_CI_SCRIPT_LOCATION missing"
# setup a fresh build workspace on first execution or on request
if [ ! -d "$CVMFS_BUILD_LOCATION" ] || [ x"$CVMFS_BUILD_CLEAN" = x"true" ]; then
rm -fR "$CVMFS_BUILD_LOCATION"
mkdir -p "$CVMFS_BUILD_LOCATION"
fi
# run the build
command_tmpl=""
desired_architecture="$(extract_arch $CVMFS_BUILD_ARCH)"
if is_docker_host; then
echo "incremental build on docker for ${desired_architecture}..."
docker_image_name="${CVMFS_BUILD_PLATFORM}_${desired_architecture}"
command_tmpl="${CERNVM_CI_SCRIPT_LOCATION}/docker/run_on_docker.sh \
${WORKSPACE} \
${docker_image_name} \
${CVMFS_SOURCE_LOCATION}/ci/build_incremental_multi.sh \
${CVMFS_SOURCE_LOCATION} \
${CVMFS_BUILD_LOCATION} \
$(get_number_of_cpu_cores)"
else
echo "incremental build (bare metal) for ${desired_architecture}..."
command_tmpl="${CVMFS_SOURCE_LOCATION}/ci/build_incremental_multi.sh \
"$CVMFS_SOURCE_LOCATION" \
"$CVMFS_BUILD_LOCATION" \
$(get_number_of_cpu_cores)"
fi
echo "++ $command_tmpl"
$command_tmpl
|
cernvm/ci-scripts
|
cvmfs/incremental_build.sh
|
Shell
|
bsd-3-clause
| 1,747 |
#/bin/bash
# Creates the flutter-engine-ssh-key secret.
set -e -x
source ../../kube/config.sh
if [ "$#" -ne 1 ]; then
echo "The argument must point to the id_rsa file."
echo ""
echo "./create-flutter-engine-ssh-key-secret.sh "
exit 1
fi
SECRET_LOCATION=$1
SECRET_NAME="flutter-engine-ssh-key"
kubectl create secret generic "${SECRET_NAME}" --from-file=${SECRET_LOCATION}
cd -
|
google/skia-buildbot
|
autoroll/bin/create-flutter-engine-ssh-key-secret.sh
|
Shell
|
bsd-3-clause
| 390 |
#!/bin/sh
PROCESS="$1"
while :
do
RESULT=`ps -p ${PROCESS} -o comm=`
if [ "${RESULT:-null}" = null ]; then
break
else
echo "-"
sleep 5
fi
done
# see https://github.com/travis-ci/travis-build/blob/master/lib/travis/build/templates/header.sh
# for what travis-ci does
|
maverickg/utils
|
shell/wait4.sh
|
Shell
|
bsd-3-clause
| 290 |
#!/bin/bash
# Script to set up a Django project on Vagrant.
# Installation settings
PROJECT_NAME=$1
DB_NAME=$PROJECT_NAME
VIRTUALENV_NAME=$PROJECT_NAME
PROJECT_DIR=/home/vagrant/$PROJECT_NAME
VIRTUALENV_DIR=/home/vagrant/.virtualenvs/$PROJECT_NAME
LOCAL_SETTINGS_PATH="/$PROJECT_NAME/settings/local.py"
PGSQL_VERSION=9.3
# Need to fix locale so that Postgres creates databases in UTF-8
cp -p $PROJECT_DIR/etc/install/etc-bash.bashrc /etc/bash.bashrc
locale-gen en_GB.UTF-8
dpkg-reconfigure locales
export LANGUAGE=en_GB.UTF-8
export LANG=en_GB.UTF-8
export LC_ALL=en_GB.UTF-8
# Install essential packages from Apt
apt-get update -y
# Python dev packages
apt-get install -y build-essential python python-dev
# python-setuptools being installed manually
wget https://bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py -O - | python
# Dependencies for image processing with Pillow (drop-in replacement for PIL)
# supporting: jpeg, tiff, png, freetype, littlecms
# (pip install pillow to get pillow itself, it is not in requirements.txt)
apt-get install -y libjpeg-dev libtiff-dev zlib1g-dev libfreetype6-dev liblcms2-dev
# Git (we'd rather avoid people keeping credentials for git commits in the repo, but sometimes we need it for pip requirements that aren't in PyPI)
apt-get install -y git
# Postgresql
if ! command -v psql; then
apt-get install -y postgresql-$PGSQL_VERSION libpq-dev
cp $PROJECT_DIR/etc/install/pg_hba.conf /etc/postgresql/$PGSQL_VERSION/main/
/etc/init.d/postgresql reload
fi
# virtualenv global setup
if ! command -v pip; then
easy_install -U pip
fi
if [[ ! -f /usr/local/bin/virtualenv ]]; then
pip install virtualenv virtualenvwrapper stevedore virtualenv-clone
fi
# bash environment global setup
cp -p $PROJECT_DIR/etc/install/bashrc /home/vagrant/.bashrc
su - vagrant -c "mkdir -p /home/vagrant/.pip_download_cache"
# Node.js, CoffeeScript and LESS
if ! command -v npm; then
wget http://nodejs.org/dist/v4.1.1/node-v4.1.1.tar.gz
tar xzf node-v4.1.1.tar.gz
cd node-v4.1.1/
./configure && make && make install
cd ..
rm -rf node-v4.1.1/ node-v4.1.1.tar.gz
fi
if ! command -v coffee; then
npm install -g coffee-script
fi
if ! command -v lessc; then
npm install -g less
fi
# ---
# postgresql setup for project
createdb -Upostgres $DB_NAME
# virtualenv setup for project
su - vagrant -c "/usr/local/bin/virtualenv $VIRTUALENV_DIR && \
echo $PROJECT_DIR > $VIRTUALENV_DIR/.project && \
PIP_DOWNLOAD_CACHE=/home/vagrant/.pip_download_cache $VIRTUALENV_DIR/bin/pip install -r $PROJECT_DIR/requirements/dev.txt"
echo "workon $VIRTUALENV_NAME" >> /home/vagrant/.bashrc
# Set execute permissions on manage.py, as they get lost if we build from a zip file
chmod a+x $PROJECT_DIR/manage.py
# Django project setup
su - vagrant -c "source $VIRTUALENV_DIR/bin/activate && cd $PROJECT_DIR && ./manage.py syncdb --noinput && ./manage.py migrate"
# Add settings/local.py to gitignore
if ! grep -Fqx $LOCAL_SETTINGS_PATH $PROJECT_DIR/.gitignore
then
echo $LOCAL_SETTINGS_PATH >> $PROJECT_DIR/.gitignore
fi
|
Rawtechio/vagrant-django-template
|
etc/install/install.sh
|
Shell
|
bsd-3-clause
| 3,097 |
# $FreeBSD$
REGRESSION_START($1)
echo '1..23'
REGRESSION_TEST(`b', `printf "abc%b%b" "def\n" "\cghi"')
REGRESSION_TEST(`d', `printf "%d,%5d,%.5d,%0*d,%.*d\n" 123 123 123 5 123 5 123')
REGRESSION_TEST(`f', `printf "%f,%-8.3f,%f,%f\n" +42.25 -42.25 inf nan')
REGRESSION_TEST(`l1', `LC_ALL=en_US.ISO8859-1 printf "%d\n" $(printf \"\\344)')
REGRESSION_TEST(`l2', `LC_ALL=en_US.UTF-8 printf "%d\n" $(printf \"\\303\\244)')
REGRESSION_TEST(`m1', `printf "%c%%%d\0\045\n" abc \"abc')
REGRESSION_TEST(`m2', `printf "abc\n\cdef"')
REGRESSION_TEST(`m3', `printf "%%%s\n" abc def ghi jkl')
REGRESSION_TEST(`m4', `printf "%d,%f,%c,%s\n"')
REGRESSION_TEST(`m5', `printf -- "-d\n"')
REGRESSION_TEST(`s', `printf "%.3s,%-5s\n" abcd abc')
REGRESSION_TEST('zero', `printf "%u%u\n" 15')
REGRESSION_TEST('zero', `printf "%d%d\n" 15')
REGRESSION_TEST('zero', `printf "%d%u\n" 15')
REGRESSION_TEST('zero', `printf "%u%d\n" 15')
REGRESSION_TEST(`missingpos1', `printf "%1\$*s" 1 1 2>&1')
REGRESSION_TEST(`missingpos1', `printf "%*1\$s" 1 1 2>&1')
REGRESSION_TEST(`missingpos1', `printf "%1\$*.*s" 1 1 1 2>&1')
REGRESSION_TEST(`missingpos1', `printf "%*1\$.*s" 1 1 1 2>&1')
REGRESSION_TEST(`missingpos1', `printf "%*.*1\$s" 1 1 1 2>&1')
REGRESSION_TEST(`missingpos1', `printf "%1\$*2\$.*s" 1 1 1 2>&1')
REGRESSION_TEST(`missingpos1', `printf "%*1\$.*2\$s" 1 1 1 2>&1')
REGRESSION_TEST(`missingpos1', `printf "%1\$*.*2\$s" 1 1 1 2>&1')
REGRESSION_END()
|
TigerBSD/TigerBSD
|
FreeBSD/usr.bin/printf/tests/regress.sh
|
Shell
|
isc
| 1,433 |
#!/bin/bash
set -eux
cd "$(dirname "$0")"
sudo apt-get update
sudo apt-get install -y default-jdk maven git
|
selvasingh/azure-sdk-for-java
|
sdk/spring/azure-spring-boot-test-keyvault/install_apps.sh
|
Shell
|
mit
| 111 |
#!/bin/bash
th model-1_layer-variable.lua -hidden 16 -batch 16 -rate 0.10 -iter 12 \
| tee model-1_layer-variable.out
|
kbullaughey/lstm-play
|
toys/lstm/model-1_layer-variable.sh
|
Shell
|
mit
| 121 |
#!/bin/sh
cd "$( dirname "${BASH_SOURCE[0]}" )/.."
npm run install_testapp
cd website
# Check that directory is clean
if [ $(git status --porcelain | wc -l) != "0" ]; then
echo -e "\033[0;31m" 1>&2 # Red
echo "We cannot push the generated docs unless the working directory is" 1>&2
echo "clean. Either commit your changes, stash them, or generate the" 1>&2
echo "docs manually by running gulp in /website/ and push them to" 1>&2
echo "gh-pages at a later date" 1>&2
echo -e "\033[0m" 1>&2 # Normal color
exit 1
fi
# Switch to the branch were the version was bumped
VERSION=$(node ../scripts/get-version.js)
EXEC_BRANCH=$(git rev-parse --abbrev-ref HEAD)
git checkout "${VERSION}"
if [ $? -ne 0 ]; then
echo -e "\033[0;31m" 1>&2 # Red
echo "The package.json file indicates that the current version is" 1>&2
echo "\"${VERSION}\", but there is no corresponding git tag" 1>&2
echo -e "\033[0m" 1>&2 # Normal Color
exit 1
fi
# Generate files
npm run build
if [ $? -ne 0 ]; then
echo -e "\033[0;31m" 1>&2 # Red
echo "Build failed. Try running 'npm install' in /website/." 1>&2
echo -e "\033[0m" 1>&2 # Normal Color
exit 1
fi
# Transfer files to gh-pages
cd ".."
git branch -D gh-pages
git branch gh-pages
git checkout gh-pages
git pull https://github.com/angular/protractor.git gh-pages:gh-pages -f
git reset --hard
cp -r website/build/* .
git add -A
git commit -m "chore(website): automatic docs update for ${VERSION}"
echo -e "\033[0;32m" # Green
echo "Created update commit in gh-pages branch"
echo -e "\033[0m" 1>&2 # Normal Color
git checkout "${EXEC_BRANCH}"
|
manoj9788/protractor
|
scripts/generate-docs.sh
|
Shell
|
mit
| 1,598 |
#!/bin/bash
if ! command -V npm >/dev/null 2>&1; then
curl -sL https://deb.nodesource.com/setup_6.x | sudo -E bash -
sudo apt-get install -y nodejs
echo "export PATH=\$PATH:node_modules/.bin" >> "$VHOME/.profile"
fi
|
Staffjoy/v2
|
vagrant/npm.sh
|
Shell
|
mit
| 228 |
#!/bin/bash
FN="TENxBrainData_1.8.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.11/data/experiment/src/contrib/TENxBrainData_1.8.0.tar.gz"
"https://bioarchive.galaxyproject.org/TENxBrainData_1.8.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-tenxbraindata/bioconductor-tenxbraindata_1.8.0_src_all.tar.gz"
)
MD5="2565b4297c72e40bdcfa9ab9be5da1e6"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
roryk/recipes
|
recipes/bioconductor-tenxbraindata/post-link.sh
|
Shell
|
mit
| 1,313 |
#!/bin/bash
FN="MeSH.db_1.13.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.11/data/annotation/src/contrib/MeSH.db_1.13.0.tar.gz"
"https://bioarchive.galaxyproject.org/MeSH.db_1.13.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mesh.db/bioconductor-mesh.db_1.13.0_src_all.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-mesh.db/bioconductor-mesh.db_1.13.0_src_all.tar.gz"
)
MD5="ad8dd6852e665591565dfe43bee774e7"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
roryk/recipes
|
recipes/bioconductor-mesh.db/post-link.sh
|
Shell
|
mit
| 1,396 |
#!/bin/bash
#################################
######### VIMPRESSIONIST ########
#################################
set -e
echo "Coming soon..."
exit 1
# =============================
# LOG
# =============================
log() {
type="$1"
str="$2"
case "$type" in
err) echo -e "\033[0;31m$str\033[0m" 1>&2 ;;
warn) echo -e "\033[0;33m$str\033[0m" ;;
info) echo -e "\033[0;36m$str\033[0m" ;;
ok) echo -e "\033[0;32m$str\033[0m" ;;
esac
}
# =============================
# INTERACTIVE CMD
# =============================
ask() {
read -r choice
while [[ ! "$choice" =~ ^(y|n)$ ]]; do
log err "Invalid choice. Please enter 'y' (yes) or 'n' (no)."
read -r choice
done
if [ "$choice" = "n" ]; then
log ok "Bye"
exit 1
fi
}
# =============================
# DISCLAIMER
# =============================
log warn "You are going to install \033[1;33mVimpressionist\033[0m."
log warn "This will replace your configuration for each Vim-like app."
log warn "You will also need a working Internet connection.\n"
log warn "Do you really want to continue? (y/n)"
ask
# =============================
# PACKAGE MANAGEMENT
# =============================
# /!\ Be careful here!
# For instance, pacman is not a package manager on Debian systems (it is the game...).
pkg_manager=""
pkg_managers=( "pacman" "apt" "aptitude" "yum" "dnf" "zypper" "emerge" "equo" )
test_pkg() {
if hash "$1" 2> /dev/null; then
pkg_manager="$1"
fi
}
for mgr in "${pkg_managers[@]}"; do
test_pkg "$mgr"
done
pkg_update() {
# Arch & Co.
if [ "$pkg_manager" = "pacman" ] && [ -f /etc/arch-release ]; then
sudo pacman -Syu --noconfirm
# Debian & Co.
elif [ "$pkg_manager" = "apt" ] && [ -f /etc/debian_version ]; then
sudo apt-get update
sudo apt-get --assume-yes upgrade
elif [ "$pkg_manager" = "aptitude" ] && [ -f /etc/debian_version ]; then
sudo aptitude update
sudo aptitude --assume-yes upgrade
# Red Hat & Co.
elif [ "$pkg_manager" = "yum" ] && [ -f /etc/redhat-release ]; then
sudo yum update
elif [ "$pkg_manager" = "dnf" ] && [ -f /etc/redhat-release ]; then
sudo dnf update
# SuSE & Co.
elif [ "$pkg_manager" = "zypper" ] && [ -f /etc/SuSE-release ]; then
sudo zypper update
# Gentoo & Co.
# elif [ "$pkg_manager" = "emerge" ] && [ -f /etc/gentoo-release ]; then
# sudo emerge install "$@"
# elif [ "$pkg_manager" = "equo" ] && [ -f /etc/gentoo-release ]; then
# sudo equo install "$@"
else
log err "Your package manager has not been recognized."
exit 2
fi
}
pkg_install() {
# Arch & Co.
if [ "$pkg_manager" = "pacman" ] && [ -f /etc/arch-release ]; then
if [ "$#" -eq 0 ]; then
# vte3-ng is a dependency of the Termite package (conflict with vte3)
sudo pacman -Rdd --noconfirm vte3
sudo pacman -S --noconfirm i3 dmenu gvim vifm zathura zathura-pdf-mupdf termite mutt cmus feh lynx conky
else
sudo pacman -S --noconfirm "$@"
fi
# Debian & Co.
elif [ "$pkg_manager" = "apt" ] && [ -f /etc/debian_version ]; then
if [ "$#" -eq 0 ]; then
# Termite is not in official repos
git clone https://github.com/Corwind/termite-install.git
sh termite-install/termite-install.sh
sudo apt-get --assume-yes install i3 suckless-tools vim vim-gnome vifm zathura mutt cmus feh lynx conky
else
sudo apt-get --assume-yes install "$@"
fi
elif [ "$pkg_manager" = "aptitude" ] && [ -f /etc/debian_version ]; then
if [ "$#" -eq 0 ]; then
# Termite is not in official repos
git clone https://github.com/Corwind/termite-install.git
sh termite-install/termite-install.sh
sudo aptitude --assume-yes install i3 suckless-tools vim vim-gnome vifm zathura mutt cmus feh lynx conky
else
sudo aptitude --assume-yes install "$@"
fi
# Red Hat & Co.
elif [ "$pkg_manager" = "yum" ] && [ -f /etc/redhat-release ]; then
if [ "$#" -eq 0 ]; then
# Cmus and Termite are not in official repos
# Termite install???
su -lc 'yum install --nogpgcheck http://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-stable.noarch.rpm'
sudo yum --assumeyes install i3 dmenu vim-enhanced vim-X11 vifm zathura mutt cmus feh lynx conky
else
sudo yum --assumeyes install "$@"
fi
elif [ "$pkg_manager" = "dnf" ] && [ -f /etc/redhat-release ]; then
if [ "$#" -eq 0 ]; then
# Cmus and Termite are not in official repos
su -lc 'dnf install --nogpgcheck http://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-stable.noarch.rpm'
sudo dnf --assumeyes install i3 dmenu vim-enhanced vim-X11 vifm zathura mutt cmus feh lynx conky
else
sudo dnf --assumeyes install "$@"
fi
# SuSE & Co.
elif [ "$pkg_manager" = "zypper" ] && [ -f /etc/SuSE-release ]; then
if [ "$#" -eq 0 ]; then
# Zathura, Cmus and Termite are not in official repos
sudo zypper addrepo http://download.opensuse.org/repositories/home:msvec/openSUSE_Tumbleweed/home:msvec.repo
sudo zypper addrepo http://download.opensuse.org/repositories/home:cschneemann/openSUSE_Tumbleweed/home:cschneemann.repo
sudo zypper addrepo http://download.opensuse.org/repositories/home:cyphar/openSUSE_Tumbleweed/home:cyphar.repo
sudo zypper refresh
sudo zypper --non-interactive install i3 dmenu vim gvim vifm zathura termite mutt cmus feh lynx conky
else
sudo zypper --non-interactive install "$@"
fi
# Gentoo & Co.
# elif [ "$pkg_manager" = "emerge" ] && [ -f /etc/gentoo-release ]; then
# if [ "$#" -eq 0 ]; then
# sudo emerge install ...
# else
# sudo emerge install "$@"
# fi
# elif [ "$pkg_manager" = "equo" ] && [ -f /etc/gentoo-release ]; then
# if [ "$#" -eq 0 ]; then
# sudo emerge install ...
# else
# sudo equo install "$@"
# fi
else
log err "Your package manager has not been recognized."
exit 2
fi
}
# =============================
# DEPENDENCIES
# =============================
# Tests based on exit codes (0 = OK)
firefox=$(hash firefox 2> /dev/null; echo $?)
chromium=$(hash chromium 2> /dev/null || hash chromium-browser 2> /dev/null; echo $?)
git=$(hash git 2> /dev/null; echo $?)
if [ "$firefox" -ne 0 ]; then
log warn "Firefox is necessary to use Vimperator. Installing Firefox..."
pkg_install "firefox"
fi
if [ "$chromium" -ne 0 ]; then
log warn "Chromium is necessary to use Vimium. Installing Chromium..."
if [ "$pkg_manager" = "apt" ]; then
pkg_install "chromium-browser"
else
pkg_install "chromium"
fi
fi
if [ "$git" -ne 0 ]; then
log warn "Git is necessary to clone vim-anywhere. Installing Git..."
pkg_install "git"
fi
# =============================
# CORE
# =============================
# Main packages
pkg_install
# log warn "vim-anywhere requires GNOME desktop environment (DE)"
# log warn "Do you want to install GNOME? (y/n)"
# log warn "(If you answer 'no', you will keep your DE and vim-anywhere will not be installed.)"
# ask "optional"
# if [ "$choice" = "y" ]; then
# git clone https://github.com/cknadler/vim-anywhere.git
# pkg_install xclip
# bash vim-anywhere/install
# rm -rf vim-anywhere
# fi
log info "So far so good! Now you have to install Vimperator manually."
log info "Firefox will open a new window at the right URL."
log info "Please close it after the installation of Vimperator to continue with the wizard."
read -n1 -rsp $'Press any key to continue or Ctrl+C to exit...\n'
firefox https://addons.mozilla.org/fr/firefox/addon/vimperator/
log info "Alright! Now you need to do the same thing for Vimium."
log info "Chromium will open a new window at the right URL."
log info "Please close it after the installation of Vimium to continue with the wizard."
read -n1 -rsp $'Press any key to continue or Ctrl+C to exit...\n'
if [ "$pkg_manager" = "apt" ]; then
chromium-browser https://chrome.google.com/webstore/detail/vimium/dbepggeogbaibhgnhhndojpepiihcmeb
else
chromium https://chrome.google.com/webstore/detail/vimium/dbepggeogbaibhgnhhndojpepiihcmeb
fi
cp readline/inputrc ~/.inputrc
# -----------------------------
# Handle simple directories
# -----------------------------
# Simple means that there is only one possible config directory
# So basically it exists or not...
handle_simple_dir() {
dir="$1"
if [ -d "$dir" ]; then
rm -rf "${dir:?}/*"
else
mkdir "$dir"
fi
}
# -----------------------------
# (G)Vim
# -----------------------------
# vimrc & gvimrc files are generally created by the user
# They are not created automatically after an install
handle_simple_dir ~/.vim
cp -r vim/* ~/.vim
mv ~/.vim/vimrc ~/.vimrc
# Plugins
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
vim +PluginInstall +qall
# Patched fonts
git clone https://github.com/powerline/fonts.git
bash fonts/install.sh
rm -rf fonts
# -----------------------------
# i3
# -----------------------------
# i3 comes with a config directory
# It can be ~/.config/i3 or ~/.i3
if [ -d ~/.config/i3 ]; then
rm -rf ~/.config/i3/*
cp -r i3/* ~/.config/i3
mv ~/.config/i3/conkyrc ~/.conkyrc
sudo mv ~/.config/i3/scripts/* /usr/local/bin
else
rm -rf ~/.i3/*
cp -r i3/* ~/.i3
mv ~/.i3/conkyrc ~/.conkyrc
sudo mv ~/.i3/scripts/* /usr/local/bin
fi
# -----------------------------
# Vifm
# -----------------------------
# Vifm comes with a config directory
# It can be ~/.config/vifm or ~/.vifm
if [ -d ~/.config/vifm ]; then
rm -rf ~/.config/vifm/*
cp -r vifm/* ~/.config/vifm
else
rm -rf ~/.vifm/*
cp -r vifm/* ~/.vifm
fi
# -----------------------------
# Zathura
# -----------------------------
# Zathura normally comes with an empty config directory
# Here is the path: ~/.config/zathura
handle_simple_dir ~/.config/zathura
cp -r zathura/* ~/.config/zathura
# -----------------------------
# Termite
# -----------------------------
# There is usually no Termite directory after install
handle_simple_dir ~/.config/termite
cp -r termite/* ~/.config/termite
echo -e "\n# Termite hack" >> ~/.bashrc
echo "source /etc/profile.d/vte.sh" >> ~/.bashrc
# -----------------------------
# Mutt
# -----------------------------
# Mutt does not create directories or files in $HOME after install
# The user is responsible for its entire configuration (through ~/.muttrc and ~/.mutt)
handle_simple_dir ~/.mutt
cp -r mutt/* ~/.mutt
mv ~/.mutt/muttrc ~/.muttrc
# -----------------------------
# Vimperator
# -----------------------------
# Vimperator should create its config directory (~/.vimperator) automatically
handle_simple_dir ~/.vimperator
cp -r vimperator/* ~/.vimperator
mv ~/.vimperator/vimperatorrc ~/.vimperatorrc
# -----------------------------
# Vimium
# -----------------------------
log info "Almost done! Now you have to configure Vimium manually."
log info "Check the vimiumrc in the repo and copy/paste every part of the config.\n"
if [ "$pkg_manager" = "apt" ]; then
chromium-browser chrome-extension://dbepggeogbaibhgnhhndojpepiihcmeb/pages/options.html
else
chromium chrome-extension://dbepggeogbaibhgnhhndojpepiihcmeb/pages/options.html
fi
# =============================
# END
# =============================
log ok "OK"
exit 0
|
Badacadabra/Vimpressionist
|
install.sh
|
Shell
|
mit
| 11,822 |
#!/bin/bash
# create multiresolution windows icon
ICON_SRC=../../src/qt/res/icons/rotocoin.png
ICON_DST=../../src/qt/res/icons/rotocoin.ico
convert ${ICON_SRC} -resize 16x16 rotocoin-16.png
convert ${ICON_SRC} -resize 32x32 rotocoin-32.png
convert ${ICON_SRC} -resize 48x48 rotocoin-48.png
convert rotocoin-16.png rotocoin-32.png rotocoin-48.png ${ICON_DST}
|
rapecoin/rapecoin
|
share/qt/make_windows_icon.sh
|
Shell
|
mit
| 359 |
#!/bin/bash
# Script Name: AtoMiC NZBHydra systemd update
sudo sed -i "s@ExecStart=/path-to/python2 /path-to/nzbhydra/nzbhydra.py --nobrowser@ExecStart=/usr/bin/python2 $APPPATH/nzbhydra.py --nobrowser --config $APPSETTINGS --database $APPPATH/nzbhydra.db@g" /etc/systemd/system/$APPSYSTEMD || { echo -e $RED'Modifying ExecStart in SYSTEMD file failed.'$ENDCOLOR; exit 1; }
sudo sed -i "s@User=nzbhydra@User=$UNAME@g" /etc/systemd/system/$APPSYSTEMD || { echo -e $RED'Modifying USER in SYSTEMD file failed.'$ENDCOLOR; exit 1; }
sudo sed -i "s@Group=nzbhydra@Group=$UGROUP@g" /etc/systemd/system/$APPSYSTEMD || { echo -e $RED'Modifying GROUP in SYSTEMD file failed.'$ENDCOLOR; exit 1; }
sudo systemctl daemon-reload
sudo systemctl enable $APPSYSTEMD
|
htpcBeginner/AtoMiC-ToolKit
|
nzbhydra/nzbhydra-systemd-update.sh
|
Shell
|
mit
| 751 |
#!/bin/sh
set -eu
# No args.
# Example usage: sh mediadevices-disable.sh
# Disable webcam.
sudo modprobe -r uvcvideo
# Disable microphone.
amixer set Capture nocap
amixer set Capture 0%
amixer set Mic mute
amixer set Mic 0%
|
iterami/Scripts
|
ubuntu/mediadevices-disable.sh
|
Shell
|
cc0-1.0
| 227 |
#!/bin/sh
echo "exiting existing components..."
rtexit /localhost/`hostname`.host_cxt/JuliusRTC0.rtc
rtexit /localhost/`hostname`.host_cxt/MARYRTC0.rtc
rtexit /localhost/`hostname`.host_cxt/ConsoleIn0.rtc
rtexit /localhost/`hostname`.host_cxt/PulseAudioOutput0.rtc
sleep 3
echo "launching components..."
gnome-terminal -x python ConsoleIn-de.py
gnome-terminal -x maryrtc
gnome-terminal -x juliusrtc sample-de.grxml
gnome-terminal -x pulseaudiooutput
sleep 3
echo "connecting components..."
rtcon /localhost/`hostname`.host_cxt/ConsoleIn0.rtc:out /localhost/`hostname`.host_cxt/MARYRTC0.rtc:text
rtcon /localhost/`hostname`.host_cxt/MARYRTC0.rtc:result /localhost/`hostname`.host_cxt/PulseAudioOutput0.rtc:AudioDataIn
rtcon /localhost/`hostname`.host_cxt/PulseAudioOutput0.rtc:AudioDataOut /localhost/`hostname`.host_cxt/JuliusRTC0.rtc:data
sleep 1
echo "activating components..."
rtact /localhost/`hostname`.host_cxt/JuliusRTC0.rtc
rtact /localhost/`hostname`.host_cxt/MARYRTC0.rtc
rtact /localhost/`hostname`.host_cxt/ConsoleIn0.rtc
rtact /localhost/`hostname`.host_cxt/PulseAudioOutput0.rtc
echo "inloop simulation for 30 seconds..."
sleep 30
echo "existing components..."
rtexit /localhost/`hostname`.host_cxt/JuliusRTC0.rtc
rtexit /localhost/`hostname`.host_cxt/MARYRTC0.rtc
rtexit /localhost/`hostname`.host_cxt/ConsoleIn0.rtc
rtexit /localhost/`hostname`.host_cxt/PulseAudioOutput0.rtc
|
yosuke/OpenHRIVoice
|
examples/juliusrtc/run-de.sh
|
Shell
|
epl-1.0
| 1,399 |
#!/bin/sh
test_description='Test diff indent heuristic.
'
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=main
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./test-lib.sh
. "$TEST_DIRECTORY"/diff-lib.sh
# Compare two diff outputs. Ignore "index" lines, because we don't
# care about SHA-1s or file modes.
compare_diff () {
sed -e "/^index /d" <"$1" >.tmp-1
sed -e "/^index /d" <"$2" >.tmp-2
test_cmp .tmp-1 .tmp-2 && rm -f .tmp-1 .tmp-2
}
# Compare blame output using the expectation for a diff as reference.
# Only look for the lines coming from non-boundary commits.
compare_blame () {
sed -n -e "1,4d" -e "s/^+//p" <"$1" >.tmp-1
sed -ne "s/^[^^][^)]*) *//p" <"$2" >.tmp-2
test_cmp .tmp-1 .tmp-2 && rm -f .tmp-1 .tmp-2
}
test_expect_success 'prepare' '
cat <<-\EOF >spaces.txt &&
1
2
a
b
3
4
EOF
cat <<-\EOF >functions.c &&
1
2
/* function */
foo() {
foo
}
3
4
EOF
git add spaces.txt functions.c &&
test_tick &&
git commit -m initial &&
git branch old &&
cat <<-\EOF >spaces.txt &&
1
2
a
b
a
b
3
4
EOF
cat <<-\EOF >functions.c &&
1
2
/* function */
bar() {
foo
}
/* function */
foo() {
foo
}
3
4
EOF
git add spaces.txt functions.c &&
test_tick &&
git commit -m initial &&
git branch new &&
tr "_" " " <<-\EOF >spaces-expect &&
diff --git a/spaces.txt b/spaces.txt
--- a/spaces.txt
+++ b/spaces.txt
@@ -3,5 +3,8 @@
a
_
b
+a
+
+b
3
4
EOF
tr "_" " " <<-\EOF >spaces-compacted-expect &&
diff --git a/spaces.txt b/spaces.txt
--- a/spaces.txt
+++ b/spaces.txt
@@ -2,6 +2,9 @@
2
a
_
+b
+a
+
b
3
4
EOF
tr "_" " " <<-\EOF >functions-expect &&
diff --git a/functions.c b/functions.c
--- a/functions.c
+++ b/functions.c
@@ -1,6 +1,11 @@
1
2
/* function */
+bar() {
+ foo
+}
+
+/* function */
foo() {
foo
}
EOF
tr "_" " " <<-\EOF >functions-compacted-expect
diff --git a/functions.c b/functions.c
--- a/functions.c
+++ b/functions.c
@@ -1,5 +1,10 @@
1
2
+/* function */
+bar() {
+ foo
+}
+
/* function */
foo() {
foo
EOF
'
# --- diff tests ----------------------------------------------------------
test_expect_success 'diff: ugly spaces' '
git diff --no-indent-heuristic old new -- spaces.txt >out &&
compare_diff spaces-expect out
'
test_expect_success 'diff: --no-indent-heuristic overrides config' '
git -c diff.indentHeuristic=true diff --no-indent-heuristic old new -- spaces.txt >out2 &&
compare_diff spaces-expect out2
'
test_expect_success 'diff: nice spaces with --indent-heuristic' '
git -c diff.indentHeuristic=false diff --indent-heuristic old new -- spaces.txt >out-compacted &&
compare_diff spaces-compacted-expect out-compacted
'
test_expect_success 'diff: nice spaces with diff.indentHeuristic=true' '
git -c diff.indentHeuristic=true diff old new -- spaces.txt >out-compacted2 &&
compare_diff spaces-compacted-expect out-compacted2
'
test_expect_success 'diff: --indent-heuristic with --patience' '
git diff --indent-heuristic --patience old new -- spaces.txt >out-compacted3 &&
compare_diff spaces-compacted-expect out-compacted3
'
test_expect_success 'diff: --indent-heuristic with --histogram' '
git diff --indent-heuristic --histogram old new -- spaces.txt >out-compacted4 &&
compare_diff spaces-compacted-expect out-compacted4
'
test_expect_success 'diff: ugly functions' '
git diff --no-indent-heuristic old new -- functions.c >out &&
compare_diff functions-expect out
'
test_expect_success 'diff: nice functions with --indent-heuristic' '
git diff --indent-heuristic old new -- functions.c >out-compacted &&
compare_diff functions-compacted-expect out-compacted
'
# --- blame tests ---------------------------------------------------------
test_expect_success 'blame: nice spaces with --indent-heuristic' '
git blame --indent-heuristic old..new -- spaces.txt >out-blame-compacted &&
compare_blame spaces-compacted-expect out-blame-compacted
'
test_expect_success 'blame: nice spaces with diff.indentHeuristic=true' '
git -c diff.indentHeuristic=true blame old..new -- spaces.txt >out-blame-compacted2 &&
compare_blame spaces-compacted-expect out-blame-compacted2
'
test_expect_success 'blame: ugly spaces with --no-indent-heuristic' '
git blame --no-indent-heuristic old..new -- spaces.txt >out-blame &&
compare_blame spaces-expect out-blame
'
test_expect_success 'blame: ugly spaces with diff.indentHeuristic=false' '
git -c diff.indentHeuristic=false blame old..new -- spaces.txt >out-blame2 &&
compare_blame spaces-expect out-blame2
'
test_expect_success 'blame: --no-indent-heuristic overrides config' '
git -c diff.indentHeuristic=true blame --no-indent-heuristic old..new -- spaces.txt >out-blame3 &&
git blame old..new -- spaces.txt >out-blame &&
compare_blame spaces-expect out-blame3
'
test_expect_success 'blame: --indent-heuristic overrides config' '
git -c diff.indentHeuristic=false blame --indent-heuristic old..new -- spaces.txt >out-blame-compacted3 &&
compare_blame spaces-compacted-expect out-blame-compacted2
'
# --- diff-tree tests -----------------------------------------------------
test_expect_success 'diff-tree: nice spaces with --indent-heuristic' '
git diff-tree --indent-heuristic -p old new -- spaces.txt >out-diff-tree-compacted &&
compare_diff spaces-compacted-expect out-diff-tree-compacted
'
test_expect_success 'diff-tree: nice spaces with diff.indentHeuristic=true' '
git -c diff.indentHeuristic=true diff-tree -p old new -- spaces.txt >out-diff-tree-compacted2 &&
compare_diff spaces-compacted-expect out-diff-tree-compacted2
'
test_expect_success 'diff-tree: ugly spaces with --no-indent-heuristic' '
git diff-tree --no-indent-heuristic -p old new -- spaces.txt >out-diff-tree &&
compare_diff spaces-expect out-diff-tree
'
test_expect_success 'diff-tree: ugly spaces with diff.indentHeuristic=false' '
git -c diff.indentHeuristic=false diff-tree -p old new -- spaces.txt >out-diff-tree2 &&
compare_diff spaces-expect out-diff-tree2
'
test_expect_success 'diff-tree: --indent-heuristic overrides config' '
git -c diff.indentHeuristic=false diff-tree --indent-heuristic -p old new -- spaces.txt >out-diff-tree-compacted3 &&
compare_diff spaces-compacted-expect out-diff-tree-compacted3
'
test_expect_success 'diff-tree: --no-indent-heuristic overrides config' '
git -c diff.indentHeuristic=true diff-tree --no-indent-heuristic -p old new -- spaces.txt >out-diff-tree3 &&
compare_diff spaces-expect out-diff-tree3
'
# --- diff-index tests ----------------------------------------------------
test_expect_success 'diff-index: nice spaces with --indent-heuristic' '
git checkout -B diff-index &&
git reset --soft HEAD~ &&
git diff-index --indent-heuristic -p old -- spaces.txt >out-diff-index-compacted &&
compare_diff spaces-compacted-expect out-diff-index-compacted &&
git checkout -f main
'
test_expect_success 'diff-index: nice spaces with diff.indentHeuristic=true' '
git checkout -B diff-index &&
git reset --soft HEAD~ &&
git -c diff.indentHeuristic=true diff-index -p old -- spaces.txt >out-diff-index-compacted2 &&
compare_diff spaces-compacted-expect out-diff-index-compacted2 &&
git checkout -f main
'
test_expect_success 'diff-index: ugly spaces with --no-indent-heuristic' '
git checkout -B diff-index &&
git reset --soft HEAD~ &&
git diff-index --no-indent-heuristic -p old -- spaces.txt >out-diff-index &&
compare_diff spaces-expect out-diff-index &&
git checkout -f main
'
test_expect_success 'diff-index: ugly spaces with diff.indentHeuristic=false' '
git checkout -B diff-index &&
git reset --soft HEAD~ &&
git -c diff.indentHeuristic=false diff-index -p old -- spaces.txt >out-diff-index2 &&
compare_diff spaces-expect out-diff-index2 &&
git checkout -f main
'
test_expect_success 'diff-index: --indent-heuristic overrides config' '
git checkout -B diff-index &&
git reset --soft HEAD~ &&
git -c diff.indentHeuristic=false diff-index --indent-heuristic -p old -- spaces.txt >out-diff-index-compacted3 &&
compare_diff spaces-compacted-expect out-diff-index-compacted3 &&
git checkout -f main
'
test_expect_success 'diff-index: --no-indent-heuristic overrides config' '
git checkout -B diff-index &&
git reset --soft HEAD~ &&
git -c diff.indentHeuristic=true diff-index --no-indent-heuristic -p old -- spaces.txt >out-diff-index3 &&
compare_diff spaces-expect out-diff-index3 &&
git checkout -f main
'
# --- diff-files tests ----------------------------------------------------
test_expect_success 'diff-files: nice spaces with --indent-heuristic' '
git checkout -B diff-files &&
git reset HEAD~ &&
git diff-files --indent-heuristic -p spaces.txt >out-diff-files-raw &&
grep -v index out-diff-files-raw >out-diff-files-compacted &&
compare_diff spaces-compacted-expect out-diff-files-compacted &&
git checkout -f main
'
test_expect_success 'diff-files: nice spaces with diff.indentHeuristic=true' '
git checkout -B diff-files &&
git reset HEAD~ &&
git -c diff.indentHeuristic=true diff-files -p spaces.txt >out-diff-files-raw2 &&
grep -v index out-diff-files-raw2 >out-diff-files-compacted2 &&
compare_diff spaces-compacted-expect out-diff-files-compacted2 &&
git checkout -f main
'
test_expect_success 'diff-files: ugly spaces with --no-indent-heuristic' '
git checkout -B diff-files &&
git reset HEAD~ &&
git diff-files --no-indent-heuristic -p spaces.txt >out-diff-files-raw &&
grep -v index out-diff-files-raw >out-diff-files &&
compare_diff spaces-expect out-diff-files &&
git checkout -f main
'
test_expect_success 'diff-files: ugly spaces with diff.indentHeuristic=false' '
git checkout -B diff-files &&
git reset HEAD~ &&
git -c diff.indentHeuristic=false diff-files -p spaces.txt >out-diff-files-raw2 &&
grep -v index out-diff-files-raw2 >out-diff-files &&
compare_diff spaces-expect out-diff-files &&
git checkout -f main
'
test_expect_success 'diff-files: --indent-heuristic overrides config' '
git checkout -B diff-files &&
git reset HEAD~ &&
git -c diff.indentHeuristic=false diff-files --indent-heuristic -p spaces.txt >out-diff-files-raw3 &&
grep -v index out-diff-files-raw3 >out-diff-files-compacted &&
compare_diff spaces-compacted-expect out-diff-files-compacted &&
git checkout -f main
'
test_expect_success 'diff-files: --no-indent-heuristic overrides config' '
git checkout -B diff-files &&
git reset HEAD~ &&
git -c diff.indentHeuristic=true diff-files --no-indent-heuristic -p spaces.txt >out-diff-files-raw4 &&
grep -v index out-diff-files-raw4 >out-diff-files &&
compare_diff spaces-expect out-diff-files &&
git checkout -f main
'
test_done
|
tacker66/git
|
t/t4061-diff-indent.sh
|
Shell
|
gpl-2.0
| 10,651 |
#!/bin/bash
#
# Should be run with a root user or with a tap-creator with root owner and sticky bit setted.
#
#
# +----------+
# | external |
# | Linux |
# | Host |
# | |
# | "thetap" |
# +----------+
# | 10.0.0.1 |
# +----------+
# | node0 node1
# | +----------+ +----------+
# +-------| tap | | |
# | bridge | | |
# +----------+ +----------+
# | CSMA | | CSMA |
# +----------+ +----------+
# | 10.0.0.1 | | 10.0.0.2 | udp-echo-server listening on port 2000
# +----------+ +----------+
# | |
# | |
# | |
# =================
# CSMA LAN 10.0.0
#
cd `dirname $BASH_SOURCE`/../../
BASEDCE=$PWD
cd example/ccnx
if [ -x $BASEDCE/build/bin/dce-tap-udp-echo ]
then
NS3SCRIPT=$BASEDCE/build/bin/dce-tap-udp-echo
else
echo dce-tap-udp-echo not found !
exit 1
fi
if [ -x $BASEDCE/build/bin_dce/udp-echo-client ]
then
THECLIENT=$BASEDCE/build/bin_dce/udp-echo-client
else
echo udp-echo-client not found
exit 2
fi
echo Running DCE/NS-3 Script : dce-tap-udp-echo
$NS3SCRIPT &
echo sleep one second
sleep 1
echo
echo About to run udp client
echo
$THECLIENT 10.0.0.2 "Hello NS3"
echo
echo Client exit code : $?
echo
if [ -f files-1/var/log/39770/stdout ]
then
echo NS-3 stdout of udp-echo-server '>>>'
cat files-1/var/log/39770/stdout
echo '<<<'
exit 0
else
echo NS-3 stdout of udp-echo-server not found should be in file 'files-1/var/log/39770/stdout'
exit 3
fi
|
mingit/mstcp
|
arch/sim/test/buildtop/source/ns-3-dce/example/ccnx/run-tap-udp-echo.sh
|
Shell
|
gpl-2.0
| 1,744 |
. inc/common.sh
#The result to return
RESULT=0
function repeat_until_new_arch_log_created
{
local arch_log_dir=$1
local command=$2
local stop_lsn=`run_cmd $MYSQL $MYSQL_ARGS test -e 'SHOW ENGINE INNODB STATUS\G'|grep 'Log sequence number'|awk '{print $4}'`
local old_arch_logs_count=`ls -al $arch_log_dir/| wc -l`
local new_arch_logs_count=0
echo $old_arch_logs_count
# To be sure the data was flushed to archived log wait until new file
# with START_LSN > CURRENT_LSN is created
local max_lsn=0;
while [ $max_lsn -le $stop_lsn ];
do
$command
for i in $arch_log_dir/*;
do
local lsn=${i#$arch_log_dir/ib_log_archive_}
if [ $lsn -gt $max_lsn ];
then
max_lsn=$lsn
fi
done
done
}
function check_if_equal
{
local NAME=$1
local VAL1=$2
local VAL2=$3
local MSG="$NAME ($VAL1 == $VAL2):"
if [ $VAL1 -ne $VAL2 ]
then
vlog "$MSG failed"
RESULT=-1
else
vlog "$MSG passed"
fi
}
function check_if_not_equal
{
local NAME=$1
local VAL1=$2
local VAL2=$3
local MSG="$NAME ($VAL1 != $VAL2):"
if [ $VAL1 -eq $VAL2 ]
then
vlog "$MSG failed"
RESULT=-1
else
vlog "$MSG passed"
fi
}
function fill_tables
{
local TABLES_COUNT=$1
local TABLE_NAME=$2
for i in `seq 1 $TABLES_COUNT`; do
local TN=$TABLE_NAME$i
run_cmd $MYSQL $MYSQL_ARGS test \
<<EOF
INSERT INTO $TN (B)
SELECT ${TN}_1.B FROM
$TN ${TN}_1,
$TN ${TN}_2,
$TN ${TN}_3
LIMIT 10000;
EOF
done
}
function create_and_fill_db
{
local TABLES_COUNT=$1
local TABLE_NAME=$2
local ARCH_LOG_DIR=$3
local CREATE_TABLE_OPTIONS=${4:-''}
for i in `seq 1 $TABLES_COUNT`; do
local TN=$TABLE_NAME$i
run_cmd $MYSQL $MYSQL_ARGS test \
<<EOF
CREATE TABLE $TN
(A INT UNSIGNED AUTO_INCREMENT PRIMARY KEY,
B INT(10) UNSIGNED NOT NULL DEFAULT 0) ENGINE=INNODB $CREATE_TABLE_OPTIONS;
INSERT INTO $TN (B) VALUES (1),(1),(1),(1),(1),(1),(1),(1),(1),(1),
(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),
(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),
(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),
(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),
(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),
(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),
(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),
(1),(1),(1),(1),(1),(1),(1),(1),(1),(1),
(1),(1),(1),(1),(1),(1),(1),(1),(1),(1);
EOF
done
repeat_until_new_arch_log_created \
$ARCH_LOG_DIR \
"fill_tables $TABLES_COUNT $TABLE_NAME"
}
function make_changes
{
local START_I=$1
local TABLES_COUNT=$2
local TABLE_NAME=$3
local CLIENT_CMD="${MYSQL} ${MYSQL_ARGS} -Ns test -e"
for i in `seq $START_I $(($START_I+$TABLES_COUNT-1))`; do
local TN=$TABLE_NAME$(($i-$START_I+1))
#INSERT
I_FROM[$i]=`$CLIENT_CMD "select max(a) from $TN"`
$CLIENT_CMD "insert into $TN (b) select b from $TN limit 1000"
I_TO[$i]=`$CLIENT_CMD "select max(a) from $TN"`
vlog "Inserted rows for $i are in the range [${I_FROM[$i]}, ${I_TO[$i]})"
I_COUNT[$i]=`$CLIENT_CMD "select count(*) from $TN where a >= ${I_FROM[$i]} and a < ${I_TO[$i]} "`
#DELETE
if [ $i -gt $TABLES_COUNT ];
then
local START_ROW=${U_TO[$(($i-$TABLES_COUNT))]}
else
local START_ROW=1000
fi
D_FROM[$i]=`$CLIENT_CMD "select min(a) from (select a from $TN where a >= $START_ROW limit 1000) as temp_table"`
D_TO[$i]=`$CLIENT_CMD "select max(a) from (select a from $TN where a >= $START_ROW limit 1000) as temp_table"`
D_COUNT[$i]=`$CLIENT_CMD "select count(*) from $TN where a>=${D_FROM[$i]} and a<${D_TO[$i]}"`
$CLIENT_CMD "delete from $TN where a >= ${D_FROM[$i]} and a < ${D_TO[$i]}"
vlog "Deleted rows for $i are in the range [${D_FROM[$i]}, ${D_TO[$i]}), total ${D_COUNT[$i]} are deleted"
#UPDATE
U_FROM[$i]=${D_TO[$i]}
U_TO[$i]=`$CLIENT_CMD "select max(a) from (select a from $TN where a >= ${U_FROM[$i]} limit 1000) as temp_table"`
U_COUNT[$i]=`$CLIENT_CMD "select count(*) from $TN where a>=${U_FROM[$i]} and a<${U_TO[$i]}"`
$CLIENT_CMD "update $TN set b=2 where a >= ${U_FROM[$i]} and a < ${U_TO[$i]}"
vlog "Updated rows for $i are in the range [${U_FROM[$i]}, ${U_TO[$i]}), total ${U_COUNT[$i]} are updated"
done
}
function get_changes
{
local START_I=$1
local TABLES_COUNT=$2
local TABLE_NAME=$3
local CLIENT_CMD="${MYSQL} ${MYSQL_ARGS} -Ns test -e"
for i in `seq $START_I $(($START_I+$TABLES_COUNT-1))`; do
local TN=$TABLE_NAME$(($i-$START_I+1))
#INSERT
I_COUNT1[$i]=`$CLIENT_CMD "select count(*) from $TN where a >= ${I_FROM[$i]} and a < ${I_TO[$i]} "`
#DELETE
D_COUNT1[$i]=`$CLIENT_CMD "select count(*) from $TN where a >= ${D_FROM[$i]} and a < ${D_TO[$i]} "`
#UPDATE
U_COUNT1[$i]=`$CLIENT_CMD "select count(*) from $TN where a >= ${U_FROM[$i]} and a < ${U_TO[$i]} and b = 2"`
done
}
function check_changes
{
local START_I=$1
local TABLES_COUNT=$2
local CMD=$3
for i in `seq $START_I $(($START_I+$TABLES_COUNT-1))`; do
$CMD "INSERT TEST for $i" ${I_COUNT[$i]} ${I_COUNT1[$i]}
$CMD "DELETE TEST for $i" 0 ${D_COUNT1[$i]}
$CMD "UPDATE TEST for $i" ${U_COUNT[$i]} ${U_COUNT1[$i]}
done
}
function unset_global_variables
{
unset I_FROM
unset I_TO
unset I_COUNT
unset D_FROM
unset D_TO
unset D_COUNT
unset U_FROM
unset U_TO
unset U_COUNT
unset I_FROM1
unset I_TO1
unset I_COUNT1
unset D_FROM1
unset D_TO1
unset D_COUNT1
unset U_FROM1
unset U_TO1
unset U_COUNT1
}
function test_archived_logs
{
local TABLE_NAME=T
local TABLES_COUNT=4
local EXTRA_OPTIONS=${1:-''}
local CREATE_TABLE_OPTIONS=${2:-''}
#Setup server environment to get access to some variables
init_server_variables 1
switch_server 1
local BASE_BACKUP_DIR=$topdir/backup_base
local BACKUP_DIR=$topdir/backup
local BASE_DATA_DIR=$topdir/base_data
local ARCHIVED_LOGS_DIR=$topdir/archived_logs
local XTRABACKUP_OPTIONS="--innodb_log_file_size=2M $EXTRA_OPTIONS"
#Setup ROW binlog format to supress warnings in result file
local SERVER_OPTIONS="$XTRABACKUP_OPTIONS --innodb_log_archive=ON --innodb_log_arch_dir=$ARCHIVED_LOGS_DIR --binlog-format=ROW"
mkdir -p $BASE_BACKUP_DIR $BACKUP_DIR
mkdir -p $ARCHIVED_LOGS_DIR
reset_server_variables 1
###################################################################
# --to-lsn test. It checks the availability to apply logs only to #
# the certain LSN. #
###################################################################
start_server $SERVER_OPTIONS
#Create and fill tables to generate log files
create_and_fill_db $TABLES_COUNT $TABLE_NAME $ARCHIVED_LOGS_DIR $CREATE_TABLE_OPTIONS
#Backup the data
xtrabackup --backup --datadir=$mysql_datadir --target-dir=$BASE_BACKUP_DIR $XTRABACKUP_OPTIONS
#Make some changes in tables after backup is done
make_changes 1 $TABLES_COUNT $TABLE_NAME
#Make sure that changes are flushed to archived log
repeat_until_new_arch_log_created \
$ARCHIVED_LOGS_DIR \
"fill_tables $TABLES_COUNT $TABLE_NAME"
#Remember current LSN
local LSN=`run_cmd $MYSQL $MYSQL_ARGS test -e 'SHOW ENGINE INNODB STATUS\G'|grep 'Log sequence number'|awk '{print $4}'`
#Make more changes over remembered LSN
make_changes $(($TABLES_COUNT+1)) $TABLES_COUNT $TABLE_NAME
#Make sure the above changes are flushed to archived log
repeat_until_new_arch_log_created \
$ARCHIVED_LOGS_DIR \
"fill_tables $TABLES_COUNT $TABLE_NAME"
stop_server
cp -R $mysql_datadir $BASE_DATA_DIR
#########################################
# Apply logs only to the remembered lsn #
#########################################
# --apply-log-only is set implicitly because unfinished transactions
# can be finished on further logs applying but using this option with
# --innodb-log-arch-dir is tested here to prove this bug
# https://bugs.launchpad.net/percona-xtrabackup/+bug/1199555 is not
# concerned with this case
cp -R $BASE_BACKUP_DIR/* $BACKUP_DIR
xtrabackup --prepare \
--target-dir=$BACKUP_DIR \
--innodb-log-arch-dir=$ARCHIVED_LOGS_DIR \
--to-archived-lsn=$LSN \
--apply-log-only \
$XTRABACKUP_OPTIONS
#Copy prepared data to server data dir
cp -R $BACKUP_DIR/* $mysql_datadir
rm $mysql_datadir/ib_*
#Start server with prepared data
start_server "--binlog-format=ROW $EXTRA_OPTIONS"
#Get values from restored data files before remembered LSN
get_changes 1 $TABLES_COUNT $TABLE_NAME
#Get values from restored data files after remembered LSN
get_changes $(($TABLES_COUNT+1)) $TABLES_COUNT $TABLE_NAME
#We don't need server already
stop_server
#Check if the changes which was made before remembered LSN are in the
#restored databse
check_changes 1 $TABLES_COUNT 'check_if_equal'
#Check if the changes which was made after remembered LSN are NOT in the
#restored databse
check_changes $(($TABLES_COUNT+1)) $TABLES_COUNT 'check_if_not_equal'
# Apply the rest of archived logs
xtrabackup --prepare \
--target-dir=$BACKUP_DIR \
--innodb-log-arch-dir=$ARCHIVED_LOGS_DIR \
--apply-log-only \
$XTRABACKUP_OPTIONS
#Copy prepared data to server data dir
cp -R $BACKUP_DIR/* $mysql_datadir
rm $mysql_datadir/ib_*
#Start server with prepared data
start_server "--binlog-format=ROW $EXTRA_OPTIONS"
#Get values from restored data files before remembered LSN
get_changes 1 $TABLES_COUNT $TABLE_NAME
#Get values from restored data files after remembered LSN
get_changes $(($TABLES_COUNT+1)) $TABLES_COUNT $TABLE_NAME
stop_server
#Check if the changes which was made before remembered LSN are in the
#restored databse
check_changes 1 $TABLES_COUNT 'check_if_equal'
#Check if the changes which was made after remembered LSN are in the
#restored databse
check_changes $(($TABLES_COUNT+1)) $TABLES_COUNT 'check_if_equal'
rm -rf $mysql_datadir
rm -rf $BACKUP_DIR
##################################################
# Check the possibility of applying logs by sets #
##################################################
cp -R $BASE_DATA_DIR $mysql_datadir
cp -R $BASE_BACKUP_DIR $BACKUP_DIR
mkdir -p $ARCHIVED_LOGS_DIR/1 $ARCHIVED_LOGS_DIR/2
#Make two log files sets. The first set contains the first two files,
#the second set contains the rest and the last log file from the first
#set.
pushd .
cd $ARCHIVED_LOGS_DIR
for i in *;
do
test -f $i || continue
local n=${i#ib_log_archive_}
if [ $n -le $LSN ];
then
mv $i 1/;
else
mv $i 2/;
fi;
done
cd 1
find . -type f -printf "%T+ %p\n" | cut -d' ' -f2 | sort -n | tail -1 | \
xargs -I{} cp {} ../2/
popd
#Prepare the first set
xtrabackup --prepare \
--target-dir=$BACKUP_DIR \
--innodb-log-arch-dir=$ARCHIVED_LOGS_DIR/1 \
$XTRABACKUP_OPTIONS
#Prepare the second set
xtrabackup --prepare \
--target-dir=$BACKUP_DIR \
--innodb-log-arch-dir=$ARCHIVED_LOGS_DIR/2 \
$XTRABACKUP_OPTIONS
#Copy prepared data to server data dir
cp -R $BACKUP_DIR/* $mysql_datadir
rm $mysql_datadir/ib_*
#Start server with prepared data
start_server "--binlog-format=ROW $EXTRA_OPTIONS"
#Get values from restored data files before remembered LSN
get_changes 1 $TABLES_COUNT $TABLE_NAME
#Get values from restored data files after remembered LSN
get_changes $(($TABLES_COUNT+1)) $TABLES_COUNT $TABLE_NAME
stop_server
#Check all made changes
check_changes 1 $TABLES_COUNT 'check_if_equal'
check_changes $(($TABLES_COUNT+1)) $TABLES_COUNT 'check_if_equal'
#Clean up dir for the next procedure launch
rm -rf $BACKUP_DIR/* $BASE_BACKUP_DIR/*
rm -rf $mysql_datadir $BASE_DATA_DIR
rm -rf $ARCHIVED_LOGS_DIR/*
#Clean up variables
unset_global_variables
}
require_xtradb
require_server_version_higher_than '5.6.10'
test_archived_logs
test_archived_logs '' 'ROW_FORMAT=COMPRESSED'
exit $RESULT
|
janlindstrom/percona-xtrabackup
|
storage/innobase/xtrabackup/test/t/xb_apply_archived_logs.sh
|
Shell
|
gpl-2.0
| 11,623 |
#!/bin/bash -
# libguestfs
# Copyright (C) 2009 Red Hat Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Test if we can handle qemu death synchronously.
set -e
if [ "$(guestfish get-backend)" != "direct" ]; then
echo "$0: test skipped because default backend is not 'direct'"
exit 77
fi
rm -f qemudie-synch.pid
guestfish <<'EOF'
scratch 100M
run
# Kill subprocess.
pid | cat > qemudie-synch.pid
! kill $(cat qemudie-synch.pid) ; sleep 2
# XXX The following sleep should NOT be necessary.
-sleep 1
# We should now be able to rerun the subprocess.
scratch 100M
run
ping-daemon
EOF
rm qemudie-synch.pid
|
pombredanne/libguestfs
|
tests/protocol/test-qemudie-synch.sh
|
Shell
|
gpl-2.0
| 1,280 |
#!/usr/bin/env bash
echo "warning: Detected Prebuild Step"
cp ${BUDDYBUILD_SECURE_FILES}/wpcom_app_credentials ~/.wpcom_app_credentials
cp ${BUDDYBUILD_SECURE_FILES}/wpcom_internal_app_credentials ~/.wpcom_internal_app_credentials
cp ${BUDDYBUILD_SECURE_FILES}/wpcom_alpha_app_credentials ~/.wpcom_alpha_app_credentials
cp ${BUDDYBUILD_SECURE_FILES}/wpcom_test_credentials ~/.wpcom_test_credentials
echo "warning: Copied files over"
|
ScoutHarris/WordPress-iOS
|
WordPress/buddybuild_prebuild.sh
|
Shell
|
gpl-2.0
| 434 |
#!/usr/bin/env bash
# Log installation
exec > >(tee install.log) 2>&1
# Allow user to specify install path
# or default to current directory
INSTALL_PATH="$PWD/miniconda"
if [ ! -z "$1" ]
then
INSTALL_PATH="$1"
INSTALL_PATH="$(cd $(dirname "$1") && pwd)/$1"
fi
# Fail if any command fails
set -e
set -v
# We do this conditionally because it saves us some downloading if the
# version is the same.
if [ -z "$TRAVIS_PYTHON_VERSION" ]
then
PYTHON_VERSION="2.7"
else
PYTHON_VERSION="$TRAVIS_PYTHON_VERSION"
fi
# Don't redownload miniconda.sh
if [ ! -e miniconda.sh ]
then
if [[ "$PYTHON_VERSION" == "2.7" ]]; then
wget https://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh;
else
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
fi
else
echo "Reusing existing miniconda.sh"
fi
# Install miniconda here
if [ ! -d "$INSTALL_PATH" ]
then
bash miniconda.sh -b -p "$INSTALL_PATH"
else
if [ -e "${INSTALL_PATH}/bin/conda" ]
then
echo "Will attempt to reuse existing miniconda install in $INSTALL_PATH"
else
echo "$INSTALL_PATH already exists but is not a miniconda installation?"
exit 1
fi
fi
# Set path
export PATH="${INSTALL_PATH}/bin:$PATH"
# Ensure bash path search updated
hash -r
# Always say yes and don't set ps1
conda config --set always_yes yes --set changeps1 no
# Make sure conda is updated
conda update -q conda
# Useful for debugging any issues with conda
conda info -a
# Add bioconda channels(r is required for bioconda)
# these need to be in reverse order of preference
# Aka, laast one listed becomes highest priority when same
# package exists in multiple repos
conda config --add channels r
conda config --add channels bioconda
conda config --add channels BioBuilds
conda config --add channels vdbwrair
# Install dependencies
## Conda deps first
conda install --file requirements-conda.txt
## Pip specific deps next
pip install -r requirements-pip.txt
# Install package
python setup.py install
# Tell user how to setup PATH
echo "Make sure to setup your path to include $INSTALL_PATH/bin"
echo "export PATH=$INSTALL_PATH/bin:$PATH"
|
VDBWRAIR/bio_bits
|
install.sh
|
Shell
|
gpl-2.0
| 2,221 |
#!/bin/bash
[ -z "$1" ] && echo "Usage: $0 valgrind-arguments" >&2 return 1
dir="`dirname "$0"`"
valgrind \
--leak-check=full \
--track-fds=yes \
--show-reachable=yes \
--suppressions="$dir"/valgrind-ignore-ssl-snappy \
"$@" \
|& "$dir"/valgrind-output-color
|
speidy/redemption
|
tools/valgrind-filter/run-bin.sh
|
Shell
|
gpl-2.0
| 281 |
# Only run if this is an interactive bash session
if [ -n "$PS1" ] && [ -n "$BASH_VERSION" ]; then
echo "Press enter to activate this console"
read answer
# The user should have chosen their preferred keyboard layout
# in tails-greeter by now.
. /etc/default/locale
. /etc/default/keyboard
sudo setupcon
fi
|
azumi-/amnesia
|
config/chroot_local-includes/etc/profile.d/setup_console.sh
|
Shell
|
gpl-3.0
| 328 |
source /cvmfs/ilc.desy.de/sw/x86_64_gcc82_centos7/v02-02-01/init_ilcsoft.sh
|
akiyamiyamoto/Tutorial
|
PartTwo/init_ilcsoft.sh
|
Shell
|
gpl-3.0
| 76 |
#!/bin/sh
package_name="SuperTux"
package_version="$(git describe --tags --match "?[0-9]*.[0-9]*.[0-9]*")"
xgettext --keyword='_' --keyword='__:1,2' -C -o data/locale/main.pot \
$(find src -name "*.cpp" -or -name "*.hpp") \
--add-comments=l10n \
--package-name="${package_name}" --package-version="${package_version}" \
--msgid-bugs-address=https://github.com/SuperTux/supertux/issues
find data/ -name "credits.stxt" -print0 | xargs -0 xgettext --keyword='_:1' \
--language=Lisp --from-code=UTF-8 --sort-by-file \
--output data/locale/credits.pot --add-comments=l10n \
--package-name="${package_name}" --package-version="${package_version}" \
--msgid-bugs-address=https://github.com/SuperTux/supertux/issues
find data/ -name "objects.stoi" -print0 | xargs -0 xgettext --keyword='_:1' \
--language=Lisp --from-code=UTF-8 --sort-by-file \
--output data/locale/objects.pot --add-comments=l10n \
--package-name="${package_name}" --package-version="${package_version}" \
--msgid-bugs-address=https://github.com/SuperTux/supertux/issues
find data/ -name "*.strf" -print0 | xargs -0 xgettext --keyword='_:1' \
--language=Lisp --from-code=UTF-8 --sort-by-file \
--output data/locale/tilesets.pot --add-comments=l10n \
--package-name="${package_name}" --package-version="${package_version}" \
--msgid-bugs-address=https://github.com/SuperTux/supertux/issues
msgcat data/locale/main.pot data/locale/credits.pot data/locale/objects.pot data/locale/tilesets.pot > data/locale/messages.pot
rm -f data/locale/main.pot data/locale/credits.pot data/locale/objects.pot data/locale/tilesets.pot
# Prepare script files for inclusion in tinygettext
for LEVELSET in $(ls data/levels); do
SCRIPT_FILES=$(find data/levels/$LEVELSET -name "*.nut")
for SCRIPT_FILE in $SCRIPT_FILES; do
name=$(basename ${SCRIPT_FILE})
name=${name/.nut/}
python tools/extract_strings.py ${SCRIPT_FILE} data/levels/$LEVELSET/scripts_${name}.txt
done
done
for LEVELSET in $(ls data/levels); do
find "data/levels/${LEVELSET}" "(" -name "*.stl" -or -name "*.stl.in" -or -name "*.stwm" -or -name "*.txt" ")" -print0 | xargs -0 xgettext --keyword='_:1' \
--language=Lisp --from-code=UTF-8 --sort-by-file \
--output "data/levels/${LEVELSET}/messages.pot" --add-comments=l10n \
--package-name="${package_name}" --package-version="${package_version}" \
--msgid-bugs-address=https://github.com/SuperTux/supertux/issues
sed -n -e 's/\\r\\n/\\n/g' "data/levels/${LEVELSET}/messages.pot"
rm -f data/levels/$LEVELSET/scripts_*.txt
done
|
tobbi/supertux
|
makepot.sh
|
Shell
|
gpl-3.0
| 2,559 |
#!/bin/bash -f
xv_path="/opt/Xilinx/Vivado/2016.2"
ExecStep()
{
"$@"
RETVAL=$?
if [ $RETVAL -ne 0 ]
then
exit $RETVAL
fi
}
echo "xvlog -m64 --relax -prj tb_FPU_PIPELINED_FPADDSUB_vlog.prj"
ExecStep $xv_path/bin/xvlog -m64 --relax -prj tb_FPU_PIPELINED_FPADDSUB_vlog.prj 2>&1 | tee compile.log
|
GSejas/Karatsuba_FPU
|
Resultados/FPSUB/FPADDFPSUB_Pipelined/FPADDFPSUB_Pipelined.sim/sim_1/synth/func/compile.sh
|
Shell
|
gpl-3.0
| 293 |
#!/bin/bash
#
# mirrorcreator.com module
# Copyright (c) 2011-2014 Plowshare team
#
# This file is part of Plowshare.
#
# Plowshare is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Plowshare is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Plowshare. If not, see <http://www.gnu.org/licenses/>.
MODULE_MIRRORCREATOR_REGEXP_URL='https\?://\(www\.\)\?\(mirrorcreator\.com\|mir\.cr\)/'
MODULE_MIRRORCREATOR_UPLOAD_OPTIONS="
AUTH_FREE,b,auth-free,a=USER:PASSWORD,Free account
LINK_PASSWORD,p,link-password,S=PASSWORD,Protect a link with a password
INCLUDE,,include,l=LIST,Provide list of host site (comma separated)
SECURE,,secure,,Use HTTPS site version
FULL_LINK,,full-link,,Final link includes filename
COUNT,,count,n=COUNT,Take COUNT mirrors (hosters) from the available list. Default is 3, maximum is 12."
MODULE_MIRRORCREATOR_UPLOAD_REMOTE_SUPPORT=no
MODULE_MIRRORCREATOR_LIST_OPTIONS=""
MODULE_MIRRORCREATOR_LIST_HAS_SUBFOLDERS=no
# Upload a file to mirrorcreator.com
# $1: cookie file (for account only)
# $2: input file (with full path)
# $3: remote filename
# stdout: mirrorcreator.com download link
mirrorcreator_upload() {
local COOKIEFILE=$1
local FILE=$2
local DESTFILE=$3
local SZ=$(get_filesize "$FILE")
if [ -n "$SECURE" ]; then
local BASE_URL='https://www.mirrorcreator.com'
else
local BASE_URL='http://www.mirrorcreator.com'
fi
local PAGE FORM SITES_SEL SITES_ALL SITE DATA
if ! check_exec 'base64'; then
log_error "'base64' is required but was not found in path."
return $ERR_SYSTEM
fi
# File size limit check (warning message only)
if [ "$SZ" -gt 419430400 ]; then
log_debug 'file is bigger than 400MB, some site may not support it'
fi
if [ -n "$AUTH_FREE" ]; then
local LOGIN_DATA LOGIN_RESULT
LOGIN_DATA='username=$USER&password=$PASSWORD'
LOGIN_RESULT=$(post_login "$AUTH_FREE" "$COOKIEFILE" "$LOGIN_DATA" \
"$BASE_URL/members/login_.php" \
-H 'X-Requested-With: XMLHttpRequest') || return
if [ "$LOGIN_RESULT" -eq 0 ]; then
return $ERR_LOGIN_FAILED
fi
# get PHPSESSID entry in cookie file
fi
PAGE=$(curl "$BASE_URL" -b "$COOKIEFILE" -c "$COOKIEFILE") || return
FORM=$(grep_form_by_id "$PAGE" 'uu_upload' | break_html_lines)
TOKEN=$(parse "token" ": '\([^']\+\)" <<< "$PAGE") || return
# Retrieve complete hosting site list
SITES_ALL=$(echo "$FORM" | grep 'checkbox' | parse_all_attr 'id=' value)
if [ -z "$SITES_ALL" ]; then
log_error 'Empty list, site updated?'
return $ERR_FATAL
else
log_debug "Available sites:" $SITES_ALL
fi
if [ -n "$COUNT" ]; then
if (( COUNT > 12 )); then
COUNT=12
log_error "Too big integer value for --count, set it to $COUNT"
fi
for SITE in $SITES_ALL; do
(( COUNT-- > 0 )) || break
SITES_SEL="$SITES_SEL $SITE"
done
elif [ "${#INCLUDE[@]}" -gt 0 ]; then
for SITE in "${INCLUDE[@]}"; do
# FIXME: Should match word boundary (\< & \> are GNU grep extensions)
if match "$SITE" "$SITES_ALL"; then
SITES_SEL="$SITES_SEL $SITE"
else
log_error "Host not supported: $SITE, ignoring"
fi
done
else
# Default hosting sites selection
SITES_SEL=$(echo "$FORM" | parse_all_attr 'checked=' 'value')
fi
if [ -z "$SITES_SEL" ]; then
log_debug 'Empty site selection. Nowhere to upload!'
return $ERR_FATAL
fi
log_debug "Selected sites:" $SITES_SEL
# Do not seem needed..
#PAGE=$(curl "$BASE_URL/fnvalidator.php?fn=${DESTFILE};&fid=upfile_123;")
# -b "$COOKIEFILE" not needed here
#PAGE=$(curl_with_log \
# --user-agent "Shockwave Flash" \
# -F "Filename=$DESTFILE" \
# -F "Filedata=@$FILE;filename=$DESTFILE" \
# -F 'folder=/uploads' -F 'Upload=Submit Query' \
# "$BASE_URL/uploadify/uploadify.php") || return
PAGE=$(curl_with_log -b "$COOKIEFILE" \
-F "Filedata=@$FILE;filename=$DESTFILE" \
-F 'timestamp=' \
-F "token=$TOKEN" \
"$BASE_URL/uploadify/uploadifive.php") || return
# Filename can be renamed if "slot" already taken!
# {"fileName": "RFC-all.tar.gz"}
DESTFILE=$(echo "$PAGE" | parse 'fileName' ':[[:space:]]*"\([^"]\+\)"')
log_debug "filename=$DESTFILE"
# Some basic base64 encoding:
# > FilesNames +=value + '#0#' + filesCompletedSize[key]+ ';0;';
# > submitData = filesNames + '@e@' + email + '#H#' + selectedHost +'#P#' + pass + '#SC#' + scanvirus;
# Example: RFC-all.tar.gz#0#225280;0;@e@#H#turbobit;hotfile;#P#
DATA=$(echo "$SITES_SEL" | replace_all ' ' ';' | replace_all $'\r' '' | replace_all $'\n' ';')
log_debug "sites=$DATA"
DATA=$(echo "${DESTFILE}#0#${SZ};0;@e@#H#${DATA};#P#${LINK_PASSWORD}#SC#" | base64 --wrap=0)
PAGE=$(curl -b "$COOKIEFILE" --referer "$BASE_URL" \
"$BASE_URL/process.php?data=$DATA") || return
if [ -n "$FULL_LINK" ]; then
echo "$PAGE" | parse_attr 'getElementById("link1")' 'href' || return
else
echo "$PAGE" | parse_attr 'getElementById("link2")' 'href' || return
fi
return 0
}
# List links from a mirrorcreator link
# $1: mirrorcreator link
# $2: recurse subfolders (ignored here)
# stdout: list of links
mirrorcreator_list() {
local URL=$1
local PAGE STATUS LINKS NAME REL_URL
if match '^https' "$URL"; then
local BASE_URL='https://www.mirrorcreator.com'
else
local BASE_URL='http://www.mirrorcreator.com'
fi
PAGE=$(curl -L "$URL") || return
if match '<h2.*Links Unavailable' "$PAGE"; then
return $ERR_LINK_DEAD
fi
#NAMES=( $(echo "$PAGE" | parse_all 'Success' '\.gif"[[:space:]]alt="\([^"]*\)') )
NAME=$(parse_tag 'h3' <<< "$PAGE") || return
# mstat.php
STATUS=$(echo "$PAGE" | parse 'mstat\.php' ',[[:space:]]"\([^"]*\)",') || return
PAGE=$(curl -L "$BASE_URL$STATUS") || return
LINKS=$(echo "$PAGE" | parse_all_attr_quiet 'Success' href) || return
if [ -z "$LINKS" ]; then
return $ERR_LINK_DEAD
fi
while read REL_URL; do
test "$REL_URL" || continue
PAGE=$(curl "$BASE_URL$REL_URL") || return
URL=$(echo "$PAGE" | parse_tag 'redirecturl' div) || return
# Error : Selected hosting site is no longer available.
if ! match '^Error' "$URL"; then
echo "$URL"
echo "$NAME"
else
log_debug "$URL ($NAME)"
fi
done <<< "$LINKS"
}
|
dr3mro/plowshare
|
src/modules/mirrorcreator.sh
|
Shell
|
gpl-3.0
| 7,132 |
#!/bin/sh
DOMAIN='me.vorotnikov.Obozrenie'
LOCALEDIR='locale'
python3 setup.py compile_catalog -D $DOMAIN -d $LOCALEDIR
|
obozrenie/obozrenie
|
compile_translations.sh
|
Shell
|
gpl-3.0
| 120 |
#! /bin/bash
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# arg $1 package URL
# arg $2 cookbook template name
# arg $3 no-qos or qos
function copy_template() {
cd $HOME/chef-solo
cp -ra "cookbooks/$2" cookbooks/$3
echo "$2 template copied to folder cookbooks/$3"
}
function edit_recipe() {
# edit recipe replacing: $NAME, $PACKAGE_URL
cd cookbooks/$4
sed -i '/\$PACKAGE_URL/ s##'"$1"'#g' attributes/default.rb
sed -i '/\$NAME/ s##'"$4"'#g' attributes/default.rb
sed -i '/\$NAME/ s##'"$4"'#g' recipes/default.rb
sed -i '/\$NAME/ s##'"$4"'#g' recipes/remove.rb
if [ "$3" == "qos" ]
then
echo "Setting up tomcat recipe with QoS"
sed -i 's/tomcat::choreos/tomcat::choreos-qos/' recipes/default.rb
fi
echo "Cookbook $4 edited"
}
function edit_json() {
cd $HOME/chef-solo
. add_recipe_to_node.sh $3
}
function prepare() {
instance_uuid=`uuidgen`
echo '==========================='
echo "Preparing $2 deployment of $instance_uuid at `date`"
copy_template $1 $2 $instance_uuid
edit_recipe $1 $2 $3 $instance_uuid
edit_json $1 $2 $instance_uuid
echo "Instance $instance_uuid is prepared"
}
prepare $1 $2 $3 >> /tmp/chef-solo-prepare.log 2>&1
echo $instance_uuid | tr -d '\n'
|
choreos/enactment_engine
|
EnactmentEngine/src/main/resources/chef-solo/prepare_deployment.sh
|
Shell
|
mpl-2.0
| 1,420 |
#!/bin/bash
lcov --capture --directory . --output-file coverage.info
lcov --directory . --output-file coverage.info --remove coverage.info "/usr/*" "*.moc" "test/*"
genhtml coverage.info
|
ppekala/injeqt
|
test/coverage.sh
|
Shell
|
lgpl-2.1
| 188 |
#!/bin/bash
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DGXSYSTEM=${DGXSYSTEM:-"DGX1"}
if [[ -f config_${DGXSYSTEM}.sh ]]; then
source config_${DGXSYSTEM}.sh
else
source config_DGX1.sh
echo "Unknown system, assuming DGX1"
fi
SLURM_NTASKS_PER_NODE=${SLURM_NTASKS_PER_NODE:-$DGXNGPU}
SLURM_JOB_ID=${SLURM_JOB_ID:-$RANDOM}
MULTI_NODE=${MULTI_NODE:-''}
echo "Run vars: id $SLURM_JOB_ID gpus $SLURM_NTASKS_PER_NODE mparams $MULTI_NODE"
# runs benchmark and reports time to convergence
# to use the script:
# run_and_time.sh
set -e
cd minigo
# get data and target model
python ml_perf/get_data.py
BASE_DIR=$(pwd)/results/$(hostname)-$(date +%Y-%m-%d-%H-%M)
# start timing
start=$(date +%s)
start_fmt=$(date +%Y-%m-%d\ %r)
echo "STARTING TIMING RUN AT $start_fmt"
# run benchmark
set -x
echo "running benchmark"
# run training
python ml_perf/reference_implementation.py \
--base_dir=$BASE_DIR \
--flagfile=ml_perf/flags/9/rl_loop.flags
# end timing
end=$(date +%s)
end_fmt=$(date +%Y-%m-%d\ %r)
echo "ENDING TIMING RUN AT $end_fmt"
# run eval
python ml_perf/eval_models.py \
--base_dir=$BASE_DIR \
--flags_dir=ml_perf/flags/9/ ; ret_code=$?
set +x
sleep 3
if [[ $ret_code != 0 ]]; then exit $ret_code; fi
# report result
result=$(( $end - $start ))
result_name="REINFORCEMENT"
echo "RESULT,$result_name,$result,nvidia,$start_fmt"
|
lablup/sorna-repl
|
vendor/benchmark/run_and_time.sh
|
Shell
|
lgpl-3.0
| 1,922 |
#!/bin/bash
set -e
apt-get update
apt-get install gcc libssl-dev pkg-config capnproto git -y
cd /root
curl https://sh.rustup.rs -sSf | sh -s -- -y
. /root/.cargo/env
git clone https://github.com/hjr3/weldr.git
cd /root/weldr
cargo build --release
|
hjr3/alacrity
|
benchmark/install.sh
|
Shell
|
apache-2.0
| 249 |
. ../lib.sh
testSimpleExpansion() {
set -e
d=`mktemp -d`
[ -n $d ]
[ -d $d ]
export y=67890
expand_conf ./gen_conf $d
set +e
grep "HELLO" $d/test1.conf >/dev/null
assertEquals 0 $?
grep "file.*test2.conf" $d/test2.conf >/dev/null
assertEquals 0 $?
grep "here testme here" $d/test3.conf >/dev/null
assertEquals 0 $?
grep "here2 67890 here" $d/test4.conf >/dev/null
assertEquals 0 $?
grep "partial_content" $d/test5.conf >/dev/null
assertEquals 0 $?
rm -rf $d
}
t_func() {
echo SUCESS
}
testCommentStr() {
set -e
d=`mktemp -d`
d2=`mktemp -d`
[ -n $d ]
[ -d $d ]
[ -n $d2 ]
[ -d $d2 ]
echo ";! t_func" >$d2/test.conf
COMMENT_STR=\; expand_conf $d2 $d
set +e
val=`cat $d/test.conf`
assertEquals SUCESS "$val"
rm -rf $d $d2
}
mytest_hdl() {
echo "handler_cmd $2" >$3
}
testFail2() {
set -e
d=`mktemp -d`
d2=`mktemp -d`
[ -n $d ]
[ -d $d ]
[ -n $d2 ]
[ -d $d2 ]
cp gen_conf/test5.conf $d2/test5.conf
FAILED_FILE_CONTENT=subst_content expand_conf $d2 $d
set +e
grep "subst_content" $d/test5.conf >/dev/null
assertEquals 0 $?
FAILED_FILE_HANDLER=mytest_hdl expand_conf $d2 $d
grep "handler_cmd $d2/test5.conf" $d/test5.conf >/dev/null
assertEquals 0 $?
rm -rf $d $d2
}
. ${SHUNIT_DIR:-../../shunit2-2.0.3}/src/shell/shunit2
|
aschults/bluebox_squid
|
integration_test/base_squid.sh
|
Shell
|
apache-2.0
| 1,360 |
#!/bin/bash
# Build and tag images
docker rmi redfish-simulator_0.99.0a
docker build -t "redfish-simulator_0.99.0a" .
|
bcornec/python-redfish
|
dmtf/mockup_0.99.0a/buildImage.sh
|
Shell
|
apache-2.0
| 119 |
#!/usr/bin/env bash
# Builds a "cleanroom" Docker container to run BATS tests in, and then
# executes the tests in that container, mounting the tests and Habitat
# binaries as needed.
if [ $# -eq 0 ] ; then
TESTS="."
else
TESTS="$*"
fi
docker build -t hab-bats-cleanroom "$(pwd)"/test/integration
docker run -it --rm \
--mount type=bind,source="$(pwd)/test/integration",target=/test \
--mount type=bind,source="$(pwd)/target/debug/hab-launch",target=/bin/hab-launch \
--mount type=bind,source="$(pwd)/target/debug/hab-sup",target=/bin/hab-sup \
--mount type=bind,source="$(pwd)/target/debug/hab",target=/bin/hab \
--env HAB_BIN_DIR=/bin \
--workdir=/test \
--name hab-bats \
hab-bats-cleanroom \
bats "${TESTS}"
|
rsertelon/habitat
|
run-bats.sh
|
Shell
|
apache-2.0
| 786 |
#!/bin/bash
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source base.sh
USER_BUILD=data/user
fuser -k -n tcp 8080
rm -rf $USER_BUILD/base.jsexe
rm -rf $USER_BUILD/LinkMain.js_hi
rm -rf $USER_BUILD/LinkMain.js_o
rm -rf $USER_BUILD/P??/*.jsexe
rm -rf $USER_BUILD/P??/*.js_hi
rm -rf $USER_BUILD/P??/*.js_o
rm -rf $USER_BUILD/P??/*.err.txt
mkdir -p log
run . codeworld-server -p 8080
|
Ye-Yong-Chi/codeworld
|
run.sh
|
Shell
|
apache-2.0
| 937 |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
OS_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${OS_ROOT}/hack/util.sh"
source "${OS_ROOT}/hack/cmd_util.sh"
os::log::install_errexit
# Cleanup cluster resources created by this test
(
set +e
oc delete all,templates --all
exit 0
) &>/dev/null
# This test validates the new-app command
os::cmd::expect_success_and_text 'oc new-app library/php mysql -o yaml' '3306'
os::cmd::expect_success_and_text 'oc new-app library/php mysql --dry-run' "Image \"php\" runs as the 'root' user which may not be permitted by your cluster administrator"
os::cmd::expect_failure 'oc new-app unknownhubimage -o yaml'
# verify we can generate a Docker image based component "mongodb" directly
os::cmd::expect_success_and_text 'oc new-app mongo -o yaml' 'image:\s*mongo'
# the local image repository takes precedence over the Docker Hub "mysql" image
os::cmd::expect_success 'oc create -f examples/image-streams/image-streams-centos7.json'
os::cmd::try_until_success 'oc get imagestreamtags mysql:latest'
os::cmd::try_until_success 'oc get imagestreamtags mysql:5.5'
os::cmd::try_until_success 'oc get imagestreamtags mysql:5.6'
os::cmd::expect_success_and_not_text 'oc new-app mysql -o yaml' 'image:\s*mysql'
os::cmd::expect_success_and_not_text 'oc new-app mysql --dry-run' "runs as the 'root' user which may not be permitted by your cluster administrator"
# trigger and output should say 5.6
os::cmd::expect_success_and_text 'oc new-app mysql -o yaml' 'mysql:5.6'
os::cmd::expect_success_and_text 'oc new-app mysql --dry-run' 'tag "5.6" for "mysql"'
# test deployments are created with the boolean flag and printed in the UI
os::cmd::expect_success_and_text 'oc new-app mysql --dry-run --as-test' 'This image will be test deployed'
os::cmd::expect_success_and_text 'oc new-app mysql -o yaml --as-test' 'test: true'
# docker strategy with repo that has no dockerfile
os::cmd::expect_failure_and_text 'oc new-app https://github.com/openshift/nodejs-ex --strategy=docker' 'No Dockerfile was found'
# check label creation
os::cmd::try_until_success 'oc get imagestreamtags php:latest'
os::cmd::try_until_success 'oc get imagestreamtags php:5.5'
os::cmd::try_until_success 'oc get imagestreamtags php:5.6'
os::cmd::expect_success 'oc new-app php mysql -l no-source=php-mysql'
os::cmd::expect_success 'oc delete all -l no-source=php-mysql'
os::cmd::expect_success 'oc new-app php mysql'
os::cmd::expect_success 'oc delete all -l app=php'
os::cmd::expect_failure 'oc get dc/mysql'
os::cmd::expect_failure 'oc get dc/php'
# check if we can create from a stored template
os::cmd::expect_success 'oc create -f examples/sample-app/application-template-stibuild.json'
os::cmd::expect_success 'oc get template ruby-helloworld-sample'
os::cmd::expect_success_and_text 'oc new-app ruby-helloworld-sample -o yaml' 'MYSQL_USER'
os::cmd::expect_success_and_text 'oc new-app ruby-helloworld-sample -o yaml' 'MYSQL_PASSWORD'
os::cmd::expect_success_and_text 'oc new-app ruby-helloworld-sample -o yaml' 'ADMIN_USERNAME'
os::cmd::expect_success_and_text 'oc new-app ruby-helloworld-sample -o yaml' 'ADMIN_PASSWORD'
# verify we can create from a template when some objects in the template declare an app label
# the app label should still be applied to the other objects in the template.
os::cmd::expect_success_and_text 'oc new-app -f test/fixtures/template-with-app-label.json -o yaml' 'app: ruby-sample-build'
os::cmd::expect_success_and_text 'oc new-app -f test/fixtures/template-with-app-label.json -o yaml' 'app: myapp'
# check search
os::cmd::expect_success_and_text 'oc new-app --search mysql' "Tags:\s+5.5, 5.6, latest"
os::cmd::expect_success_and_text 'oc new-app --search ruby-helloworld-sample' 'ruby-helloworld-sample'
# check search - partial matches
os::cmd::expect_success_and_text 'oc new-app --search ruby-hellow' 'ruby-helloworld-sample'
os::cmd::expect_success_and_text 'oc new-app --search --template=ruby-hel' 'ruby-helloworld-sample'
os::cmd::expect_success_and_text 'oc new-app --search --template=ruby-helloworld-sam -o yaml' 'ruby-helloworld-sample'
os::cmd::expect_success_and_text 'oc new-app --search rub' "Tags:\s+2.0, 2.2, latest"
os::cmd::expect_success_and_text 'oc new-app --search --image-stream=rub' "Tags:\s+2.0, 2.2, latest"
# check search - check correct usage of filters
os::cmd::expect_failure_and_not_text 'oc new-app --search --image-stream=ruby-heloworld-sample' 'application-template-stibuild'
os::cmd::expect_failure 'oc new-app --search --template=php'
os::cmd::expect_failure 'oc new-app -S --template=nodejs'
os::cmd::expect_failure 'oc new-app -S --template=perl'
# check search - filtered, exact matches
# make sure the imagestreams are imported first.
os::cmd::try_until_success 'oc get imagestreamtags mongodb:latest'
os::cmd::try_until_success 'oc get imagestreamtags mongodb:2.4'
os::cmd::try_until_success 'oc get imagestreamtags mongodb:2.6'
os::cmd::try_until_success 'oc get imagestreamtags mysql:latest'
os::cmd::try_until_success 'oc get imagestreamtags mysql:5.5'
os::cmd::try_until_success 'oc get imagestreamtags mysql:5.6'
os::cmd::try_until_success 'oc get imagestreamtags nodejs:latest'
os::cmd::try_until_success 'oc get imagestreamtags nodejs:0.10'
os::cmd::try_until_success 'oc get imagestreamtags perl:latest'
os::cmd::try_until_success 'oc get imagestreamtags perl:5.16'
os::cmd::try_until_success 'oc get imagestreamtags perl:5.20'
os::cmd::try_until_success 'oc get imagestreamtags php:latest'
os::cmd::try_until_success 'oc get imagestreamtags php:5.5'
os::cmd::try_until_success 'oc get imagestreamtags php:5.6'
os::cmd::try_until_success 'oc get imagestreamtags postgresql:latest'
os::cmd::try_until_success 'oc get imagestreamtags postgresql:9.2'
os::cmd::try_until_success 'oc get imagestreamtags postgresql:9.4'
os::cmd::try_until_success 'oc get imagestreamtags python:latest'
os::cmd::try_until_success 'oc get imagestreamtags python:2.7'
os::cmd::try_until_success 'oc get imagestreamtags python:3.3'
os::cmd::try_until_success 'oc get imagestreamtags python:3.4'
os::cmd::try_until_success 'oc get imagestreamtags ruby:latest'
os::cmd::try_until_success 'oc get imagestreamtags ruby:2.0'
os::cmd::try_until_success 'oc get imagestreamtags ruby:2.2'
os::cmd::try_until_success 'oc get imagestreamtags wildfly:latest'
os::cmd::try_until_success 'oc get imagestreamtags wildfly:10.0'
os::cmd::expect_success_and_text 'oc new-app --search --image-stream=mongodb' "Tags:\s+2.4, 2.6, latest"
os::cmd::expect_success_and_text 'oc new-app --search --image-stream=mysql' "Tags:\s+5.5, 5.6, latest"
os::cmd::expect_success_and_text 'oc new-app --search --image-stream=nodejs' "Tags:\s+0.10, latest"
os::cmd::expect_success_and_text 'oc new-app --search --image-stream=perl' "Tags:\s+5.16, 5.20, latest"
os::cmd::expect_success_and_text 'oc new-app --search --image-stream=php' "Tags:\s+5.5, 5.6, latest"
os::cmd::expect_success_and_text 'oc new-app --search --image-stream=postgresql' "Tags:\s+9.2, 9.4, latest"
os::cmd::expect_success_and_text 'oc new-app -S --image-stream=python' "Tags:\s+2.7, 3.3, 3.4, latest"
os::cmd::expect_success_and_text 'oc new-app -S --image-stream=ruby' "Tags:\s+2.0, 2.2, latest"
os::cmd::expect_success_and_text 'oc new-app -S --image-stream=wildfly' "Tags:\s+10.0, 8.1, 9.0, latest"
os::cmd::expect_success_and_text 'oc new-app --search --template=ruby-helloworld-sample' 'ruby-helloworld-sample'
# check search - no matches
os::cmd::expect_failure_and_text 'oc new-app -S foo-the-bar' 'no matches found'
os::cmd::expect_failure_and_text 'oc new-app --search winter-is-coming' 'no matches found'
# check search - mutually exclusive flags
os::cmd::expect_failure_and_text 'oc new-app -S mysql --env=FOO=BAR' "can't be used"
os::cmd::expect_failure_and_text 'oc new-app --search mysql --code=https://github.com/openshift/ruby-hello-world' "can't be used"
os::cmd::expect_failure_and_text 'oc new-app --search mysql --param=FOO=BAR' "can't be used"
# set context-dir
os::cmd::expect_success_and_text 'oc new-app https://github.com/openshift/sti-ruby.git --context-dir="2.0/test/puma-test-app" -o yaml' 'contextDir: 2.0/test/puma-test-app'
os::cmd::expect_success_and_text 'oc new-app ruby~https://github.com/openshift/sti-ruby.git --context-dir="2.0/test/puma-test-app" -o yaml' 'contextDir: 2.0/test/puma-test-app'
# set strategy
os::cmd::expect_success_and_text 'oc new-app ruby~https://github.com/openshift/ruby-hello-world.git --strategy=docker -o yaml' 'dockerStrategy'
os::cmd::expect_success_and_text 'oc new-app https://github.com/openshift/ruby-hello-world.git --strategy=source -o yaml' 'sourceStrategy'
# prints volume and root user info
os::cmd::expect_success_and_text 'oc new-app --dry-run mysql' 'This image declares volumes'
os::cmd::expect_success_and_not_text 'oc new-app --dry-run mysql' "runs as the 'root' user"
os::cmd::expect_success_and_text 'oc new-app --dry-run --docker-image=mysql' 'This image declares volumes'
os::cmd::expect_success_and_text 'oc new-app --dry-run --docker-image=mysql' "WARNING: Image \"mysql\" runs as the 'root' user"
# verify multiple errors are displayed together, a nested error is returned, and that the usage message is displayed
os::cmd::expect_failure_and_text 'oc new-app --dry-run __template_fail __templatefile_fail' 'error: no match for "__template_fail"'
os::cmd::expect_failure_and_text 'oc new-app --dry-run __template_fail __templatefile_fail' 'error: no match for "__templatefile_fail"'
os::cmd::expect_failure_and_text 'oc new-app --dry-run __template_fail __templatefile_fail' 'error: unable to find the specified template file'
os::cmd::expect_failure_and_text 'oc new-app --dry-run __template_fail __templatefile_fail' "The 'oc new-app' command will match arguments"
# verify partial match error
os::cmd::expect_failure_and_text 'oc new-app --dry-run mysq' 'error: only a partial match was found for "mysq"'
os::cmd::expect_failure_and_text 'oc new-app --dry-run mysq' 'The argument "mysq" only partially matched'
os::cmd::expect_failure_and_text 'oc new-app --dry-run mysq' "Image stream \"mysql\" \\(tag \"5.6\"\\) in project"
# verify image streams with no tags are reported correctly and that --allow-missing-imagestream-tags works
# new-app
os::cmd::expect_success 'printf "apiVersion: v1\nkind: ImageStream\nmetadata:\n name: emptystream\n" | oc create -f -'
os::cmd::expect_failure_and_text 'oc new-app --dry-run emptystream' 'error: no tags found on matching image stream'
os::cmd::expect_success 'oc new-app --dry-run emptystream --allow-missing-imagestream-tags'
# new-build
os::cmd::expect_failure_and_text 'oc new-build --dry-run emptystream~https://github.com/openshift/ruby-ex' 'error: no tags found on matching image stream'
os::cmd::expect_success 'oc new-build --dry-run emptystream~https://github.com/openshift/ruby-ex --allow-missing-imagestream-tags --strategy=source'
# Allow setting --name when specifying grouping
os::cmd::expect_success "oc new-app mysql+ruby~https://github.com/openshift/ruby-ex --name foo -o yaml"
# but not with multiple components
os::cmd::expect_failure_and_text "oc new-app mysql ruby~https://github.com/openshift/ruby-ex --name foo -o yaml" "error: only one component or source repository can be used when specifying a name"
# do not allow specifying output image when specifying multiple input repos
os::cmd::expect_failure_and_text 'oc new-build https://github.com/openshift/nodejs-ex https://github.com/openshift/ruby-ex --to foo' 'error: only one component with source can be used when specifying an output image reference'
# but succeed with multiple intput repos and no output image specified
os::cmd::expect_success 'oc new-build https://github.com/openshift/nodejs-ex https://github.com/openshift/ruby-ex -o yaml'
# check that binary build with a builder image results in a source type build
os::cmd::expect_success_and_text 'oc new-build --binary --image-stream=ruby -o yaml' 'type: Source'
# check that binary build with a specific strategy uses that strategy regardless of the image type
os::cmd::expect_success_and_text 'oc new-build --binary --image=ruby --strategy=docker -o yaml' 'type: Docker'
os::cmd::expect_success 'oc delete imageStreams --all'
# check that we can create from the template without errors
os::cmd::expect_success_and_text 'oc new-app ruby-helloworld-sample -l app=helloworld' 'service "frontend" created'
os::cmd::expect_success 'oc delete all -l app=helloworld'
os::cmd::expect_success_and_text 'oc new-app ruby-helloworld-sample -l app=helloworld -o name' 'service/frontend'
os::cmd::expect_success 'oc delete all -l app=helloworld'
# create from template with code explicitly set is not supported
os::cmd::expect_failure 'oc new-app [email protected]:mfojtik/sinatra-app-example'
os::cmd::expect_success 'oc delete template ruby-helloworld-sample'
# override component names
os::cmd::expect_success_and_text 'oc new-app mysql --name=db' 'db'
os::cmd::expect_success 'oc new-app https://github.com/openshift/ruby-hello-world -l app=ruby'
os::cmd::expect_success 'oc delete all -l app=ruby'
# check for error when template JSON file has errors
jsonfile="${OS_ROOT}/test/fixtures/invalid.json"
os::cmd::expect_failure_and_text "oc new-app '${jsonfile}'" "error: unable to load template file \"${jsonfile}\": at offset 8: invalid character '}' after object key"
# check new-build
os::cmd::expect_failure_and_text 'oc new-build mysql -o yaml' 'you must specify at least one source repository URL'
os::cmd::expect_success_and_text 'oc new-build mysql --binary -o yaml --to mysql:bin' 'type: Binary'
os::cmd::expect_success_and_text 'oc new-build mysql https://github.com/openshift/ruby-hello-world --strategy=docker -o yaml' 'type: Docker'
os::cmd::expect_failure_and_text 'oc new-build mysql https://github.com/openshift/ruby-hello-world --binary' 'specifying binary builds and source repositories at the same time is not allowed'
# new-build image source tests
os::cmd::expect_failure_and_text 'oc new-build mysql --source-image centos' 'error: --source-image-path must be specified when --source-image is specified.'
os::cmd::expect_failure_and_text 'oc new-build mysql --source-image-path foo' 'error: --source-image must be specified when --source-image-path is specified.'
# do not allow use of non-existent image (should fail)
os::cmd::expect_failure_and_text 'oc new-app openshift/bogusImage https://github.com/openshift/ruby-hello-world.git -o yaml' "no match for"
# allow use of non-existent image (should succeed)
os::cmd::expect_success 'oc new-app openshift/bogusImage https://github.com/openshift/ruby-hello-world.git -o yaml --allow-missing-images'
os::cmd::expect_success 'oc create -f test/fixtures/installable-stream.yaml'
project=$(oc project -q)
os::cmd::expect_success 'oc policy add-role-to-user edit test-user'
os::cmd::expect_success 'oc login -u test-user -p anything'
os::cmd::try_until_success 'oc project ${project}'
os::cmd::try_until_success 'oc get imagestreamtags installable:file'
os::cmd::try_until_success 'oc get imagestreamtags installable:token'
os::cmd::try_until_success 'oc get imagestreamtags installable:serviceaccount'
os::cmd::expect_failure 'oc new-app installable:file'
os::cmd::expect_failure_and_text 'oc new-app installable:file' 'requires that you grant the image access'
os::cmd::expect_failure_and_text 'oc new-app installable:serviceaccount' "requires an 'installer' service account with project editor access"
os::cmd::expect_success_and_text 'oc new-app installable:file --grant-install-rights -o yaml' '/var/run/openshift.secret.token'
os::cmd::expect_success_and_text 'oc new-app installable:file --grant-install-rights -o yaml' 'activeDeadlineSeconds: 14400'
os::cmd::expect_success_and_text 'oc new-app installable:file --grant-install-rights -o yaml' 'openshift.io/generated-job: "true"'
os::cmd::expect_success_and_text 'oc new-app installable:file --grant-install-rights -o yaml' 'openshift.io/generated-job.for: installable:file'
os::cmd::expect_success_and_text 'oc new-app installable:token --grant-install-rights -o yaml' 'name: TOKEN_ENV'
os::cmd::expect_success_and_text 'oc new-app installable:token --grant-install-rights -o yaml' 'openshift/origin@sha256:'
os::cmd::expect_success_and_text 'oc new-app installable:serviceaccount --grant-install-rights -o yaml' 'serviceAccountName: installer'
os::cmd::expect_success_and_text 'oc new-app installable:serviceaccount --grant-install-rights -o yaml' 'fieldPath: metadata.namespace'
os::cmd::expect_success_and_text 'oc new-app installable:serviceaccount --grant-install-rights -o yaml A=B' 'name: A'
# Ensure output is valid JSON
os::cmd::expect_success 'oc new-app mongo -o json | python -m json.tool'
# Ensure custom branch/ref works
os::cmd::expect_success 'oc new-app https://github.com/openshift/ruby-hello-world#beta4'
# Ensure the resulting BuildConfig doesn't have unexpected sources
os::cmd::expect_success_and_not_text 'oc new-app https://github.com/openshift/ruby-hello-world --output-version=v1 -o=jsonpath="{.items[?(@.kind==\"BuildConfig\")].spec.source}"' 'dockerfile|binary'
echo "new-app: ok"
|
spinolacastro/origin
|
test/cmd/newapp.sh
|
Shell
|
apache-2.0
| 17,164 |
#!/bin/bash
#/*
# * Copyright (c) 2012 Adobe Systems Incorporated. All rights reserved.
# *
# * Permission is hereby granted, free of charge, to any person obtaining a
# * copy of this software and associated documentation files (the "Software"),
# * to deal in the Software without restriction, including without limitation
# * the rights to use, copy, modify, merge, publish, distribute, sublicense,
# * and/or sell copies of the Software, and to permit persons to whom the
# * Software is furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included in
# * all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# * DEALINGS IN THE SOFTWARE.
# *
# */
debug_mode=${DEBUG}
log_level=${LOG_LEVEL:-warn}
marathon_host=${MARATHON_HOST}
redis_host=${REDIS_HOST}
redis_port=${REDIS_PORT}
sleep_duration=${MARATHON_POLL_INTERVAL:-5}
# location for a remote /etc/api-gateway folder.
# i.e s3://api-gateway-config
remote_config=${REMOTE_CONFIG}
remote_config_sync_interval=${REMOTE_CONFIG_SYNC_INTERVAL:-10s}
echo "Starting api-gateway ..."
if [ "${debug_mode}" == "true" ]; then
echo " ... in DEBUG mode "
mv /usr/local/sbin/api-gateway /usr/local/sbin/api-gateway-no-debug
ln -sf /usr/local/sbin/api-gateway-debug /usr/local/sbin/api-gateway
fi
/usr/local/sbin/api-gateway -V
echo "------"
echo resolver $(awk 'BEGIN{ORS=" "} /nameserver/{print $2}' /etc/resolv.conf | sed "s/ $/;/g") > /etc/api-gateway/conf.d/includes/resolvers.conf
echo " ... with dns $(cat /etc/api-gateway/conf.d/includes/resolvers.conf)"
echo " ... testing configuration "
api-gateway -t -p /usr/local/api-gateway/ -c /etc/api-gateway/api-gateway.conf
echo " ... using log level: '${log_level}'. Override it with -e 'LOG_LEVEL=<level>' "
api-gateway -p /usr/local/api-gateway/ -c /etc/api-gateway/api-gateway.conf -g "daemon off; error_log /dev/stderr ${log_level};" &
if [[ -n "${redis_host}" && -n "${redis_port}" ]]; then
sleep 1 # sleep until api-gateway is set up
tail -f /var/log/api-gateway/access.log -f /var/log/api-gateway/error.log \
-f /var/log/api-gateway/gateway_error.log -f /var/log/api-gateway/management.log
else
echo "REDIS_HOST and/or REDIS_PORT not defined"
fi
|
taylorking/apigateway
|
init.sh
|
Shell
|
apache-2.0
| 2,738 |
#!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KOPS_ROOT=$(git rev-parse --show-toplevel)
export API_OPTIONS="--verify-only"
if make apimachinery-codegen; then
echo "apimachinery is up to date"
else
echo "\n FAIL: - the verify-apimachinery.sh test failed, apimachinery is not up to date"
echo "\n FAIL: - please run the command 'make apimachinery'"
exit 1
fi
|
gambol99/kops
|
hack/verify-apimachinery.sh
|
Shell
|
apache-2.0
| 973 |
#!/bin/sh
. /usr/share/atomiadns/eventlibrary/afilias_sync/afilias_sync.conf
curl -s -k -u "$user:$pass" \
-H "Content-Type: text/xml" \
-X GET "$urlbase/nameservers/names" 2>&1 | tr "<" "\n" | grep "^name>" | cut -d ">" -f 2
exit 0
|
atomia/atomiadns
|
syncer/eventlibrary/afilias_sync/afilias_get_nameservers.sh
|
Shell
|
isc
| 237 |
#!/bin/bash
set -e
BASE_DIR=./script-size
## Ensure required directories are created.
mkdir -p ${BASE_DIR}/data
## Clean data
# Add separate(,) after script name.
# Add separate(,) after date. Don't retain time.
# Add separate(,) total size.
# Remove K. at the end.
# Clean up scriptname: Remove up to repository/
# Clean up scriptname: Remove up : install.sh/
# Clean up scriptname: Remove ./
cat install-size-history.txt | \
sed 's/\.sh: /.sh, /' | \
sed 's/_........: CLDS: . Disk size = /, /' | \
sed 's/K. Space Used = /, /' | \
sed 's/K.$//' | \
sed 's/.*repository\///' | \
sed 's/.*: install.sh/install.sh/' |\
sed 's/^\.\///' > ${BASE_DIR}/lss-clean.txt
## Get script name.
# Remove everything after comma(,).
cat ${BASE_DIR}/lss-clean.txt | \
sed 's/,.*//g' | \
sort | uniq > ${BASE_DIR}/lss-uniq-names.txt
## Exclude scripts
cat list-script-size-ex.lst | # Supply input from a file.
while IFS='' read -r LINE || [[ -n "$LINE" ]]; do
if [ ! -z ${LINE} ]; then
echo "Exclude ${LINE}."
sed -i "/${LINE}/d" ${BASE_DIR}/lss-uniq-names.txt
fi
done
## Create data set per script and generate gnuplot script
DATASET_PLOT_CMD=${BASE_DIR}/gnuplot-dataset-cmd.pg
echo -n "plot" > ${DATASET_PLOT_CMD}
cat ${BASE_DIR}/lss-uniq-names.txt | # Supply input from a file.
while IFS='' read -r SCRIPT_NAME || [[ -n "$LINE" ]]; do
if [ ! -z ${SCRIPT_NAME} ]; then
# Create data set per script.
grep "${SCRIPT_NAME}" ${BASE_DIR}/lss-clean.txt > ${BASE_DIR}/data/${SCRIPT_NAME}
# Generate gnuplot script
echo " \"${BASE_DIR}/data/${SCRIPT_NAME}\" using 2:4 title \"${SCRIPT_NAME}\", \\" >> ${DATASET_PLOT_CMD}
fi
done
## Plot the graph
# Remove the last ', \'
sed -i '$s/, \\//' ${DATASET_PLOT_CMD}
PLOT_EXE_SCRIPT=list-script-size-exe.pg
cat list-script-size.pg > ${PLOT_EXE_SCRIPT}
cat ${DATASET_PLOT_CMD} >> ${PLOT_EXE_SCRIPT}
chmod +x ${PLOT_EXE_SCRIPT}
./${PLOT_EXE_SCRIPT}
|
bankonmeOS/cust-live-deb
|
list-script-size.sh
|
Shell
|
gpl-2.0
| 1,946 |
#!/bin/sh
cd ${0%/*}
fails=0
i=0
tests=`ls valid/*.json | wc -l`
echo "1..${tests##* }"
for input in valid/*.json
do
expected="${input%.json}.parsed"
i=$((i+1))
if ! ../JSON.sh < "$input" | diff -u - "$expected"
then
echo "not ok $i - $input"
fails=$((fails+1))
else
echo "ok $i - $input"
fi
done
echo "$fails test(s) failed"
exit $fails
|
vbextreme/TelegramBotBash
|
core/JSON.sh/test/valid-test.sh
|
Shell
|
gpl-2.0
| 368 |
#!/bin/sh
docker build -t sismics/reader .
|
Feitianyuan/reader
|
build.sh
|
Shell
|
gpl-2.0
| 43 |
#!/bin/sh
#############################################################################
#
# Purpose: This script will install viking
#
#############################################################################
# Copyright (c) 2010-2018 The Open Source Geospatial Foundation.
# Licensed under the GNU LGPL.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License,
# or any later version. This library is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY, without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details, either
# in the "LICENSE.LGPL.txt" file distributed with this software or at
# web page "http://www.fsf.org/licenses/lgpl.html".
#############################################################################
./diskspace_probe.sh "`basename $0`" begin
####
BUILD_DIR=`pwd`
if [ -z "$USER_NAME" ] ; then
USER_NAME="user"
fi
USER_HOME="/home/$USER_NAME"
apt-get --assume-yes install viking gpsbabel gpsd
# copy icon to Desktop
cp /usr/share/applications/viking.desktop "$USER_HOME/Desktop/"
#Temp fix for #1754
mkdir -p "$USER_HOME"/.viking
cp "$BUILD_DIR"/../app-conf/viking/* "$USER_HOME"/.viking/
####
./diskspace_probe.sh "`basename $0`" end
|
astroidex/OSGeoLive
|
bin/retired/install_viking.sh
|
Shell
|
lgpl-2.1
| 1,451 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.