code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
echo "Installing modules..."
insmod mysql_sequence_driver.ko
insmod mysql_sequence_handler.ko
echo "Creating devices files..."
if [ -e /dev/mysql_seq_dev ]
then
echo "devices already created"
else
mknod /dev/mysql_seq_dev c 100 0
mknod /dev/mysql_seq_handler c 101 0
fi
echo "Success!"
|
pedrocruzlopez/sequence_generator
|
modules/mysql/insmod.sh
|
Shell
|
gpl-3.0
| 312 |
#!/bin/sh
# Read the documentation from http://www.postfix.org/FILTER_README.html
# Simple shell-based filter. It is meant to be invoked as follows:
# /path/to/script -f sender recipients...
# Localize these. The -G option does nothing before Postfix 2.3.
INSPECT_DIR=/var/spool/filter
SENDMAIL="/usr/sbin/sendmail -G -i" # NEVER NEVER NEVER use "-t" here.
# Exit codes from <sysexits.h>
EX_TEMPFAIL=75
EX_UNAVAILABLE=69
# Clean up when done or when aborting.
trap "rm -f in.$$" 0 1 2 3 15
# Start processing.
cd $INSPECT_DIR || {
echo $INSPECT_DIR does not exist; exit $EX_TEMPFAIL; }
cat >in.$$ || {
echo Cannot save mail to file; exit $EX_TEMPFAIL; }
#Specify your content filter here.
/usr/bin/java -jar /home/filter/AttachmentFilter.jar <in.$$ || {
echo Message content rejected; exit 0; }
$SENDMAIL "$@" <in.$$
exit $?
|
danij/PostfixAttachmentFilter
|
filter.sh
|
Shell
|
gpl-3.0
| 851 |
#!/bin/sh
cd ui
npm install -g grunt-cli bower
npm install
bower install
grunt test
cd ..
|
Gab0rB/weplantaforest
|
ui/run-ui-test-on-travis.sh
|
Shell
|
gpl-3.0
| 92 |
#!/bin/bash
echo | sudo add-apt-repository ppa:webupd8team/brackets
sudo apt-get update
sudo apt-get install -y puppet
sudo cp -r ./finaldigi /etc/puppet/modules/pakettimoduuli
sudo puppet apply -e 'class {pakettimoduuli:}'
sudo cp -r template.html /var/www/html/index.html
firefox 'localhost'
|
leksenderi/Digimoduuli
|
startup.sh
|
Shell
|
gpl-3.0
| 294 |
#!/bin/bash
MountDir='/media/Temple of The Binary Data'
CODE='sefho023wt0pva34thwtu'
VM_NAME='Window 7'
#์ธ์ฅ ํ๋ ๋์คํฌ๊ฐ ๋ง์ดํธ๋์ด์์์ด ํ์ธ๋๋ฉด
if [ -d "$MountDir" ]; then
#ํด๋๋ฅผ ์ด๊ณ
caja "$MountDir"
#๋ง์ดํธ๋์ด์์ง ์๋ค๋ฉด
else
#๊ฐ์๋จธ์ ์ ์คํํฉ๋๋ค.
/usr/lib/virtualbox/VirtualBox --comment "$VM_NAME" --startvm "$CODE"
fi
exit
|
Thestar3Preservation/ScriptPack
|
Bash Shell Script/virtualbox_to_usbc.sh
|
Shell
|
gpl-3.0
| 393 |
#!/bin/bash
wget -o /dev/null -q -O $HOME/wallpaper.html http://www.lostsheep.com/lj.php 2>/dev/null
wget -o /dev/null -q -O $HOME/wallpaper `grep "img src" $HOME/wallpaper.html |head -n1 |sed -e "s/.*img src=.\(.*\). alt.*/\1/"` 2>/dev/null
# testing
#eog /tmp/wallpaper &
# reset background with a non-existent image
# if you don't use this, then next line will not update the wallpaper
gconftool -s -t string /desktop/gnome/background/picture_filename /usr/share/backgrounds/dummy.jpg
# load new desktop background image
gconftool -s -t string /desktop/gnome/background/picture_filename $HOME/wallpaper
|
wwood/bbbin
|
autoupdate_wallpaper.sh
|
Shell
|
gpl-3.0
| 610 |
#!/bin/bash
# Provides: dockerfeliz
# Required-Start:
# Required-Stop:
# Should-Start:
# Should-Stop:
# Short-Desctiprion: arranque y parada oraclefeliz
# Description: para el oracle feliz
docker start 3b1
|
googolplex/librocomprasv4
|
gitfeliz/docker_arrancar.sh
|
Shell
|
gpl-3.0
| 210 |
#!/system/bin/sh
# Script principal - Manualify
# Por Luiz Nickel e Pedro Franco @ Shelleprechaun
echo "seu superusuario foi ativado?"
sleep 2
echo "caso funcionar, o superusuario foi configurado corretamente!"
sleep 2
echo "caso aparecer uma caralhada de erro em Java, o superusuario nao foi ativado ou voce nao o possui!"
sleep 2
echo "tentando spotify em 3 segundos"
sleep 3
am start -n com.spotify.music/com.spotify.music.MainActivity
echo "comando inserido."
sleep 4
echo "este comando apenas abre o spotify. irei abrir uma pagina com todos os scripts disponiveis para o manualify!"
am start -a android.intent.action.VIEW -d https://github.com/leprechaunproject/shelleprechaun/tree/master/Manualify
end
|
leprechaunproject/shelleprechaun
|
Manualify/manualify.sh
|
Shell
|
mpl-2.0
| 711 |
#!/bin/bash
# =============================================================================
# Project: vagrant-atlassian
#
# Purpose: Restore {Confluence, JIRA} backups
#
# The backups can be fetched from:
# 1. A remote server running Confluence/JIRA
# 2. A tarball
# 3. A Clonezilla image
#
# Known issues and limitations:
# Although this script attempts to be as generic as possible,
# there are a few hardcoded values to make it work on the tested use cases.
# Please read and customize the script to fit your specific needs.
# =============================================================================
#set -x
set -e
# --------------------------------------------------------------
# Configurable paramers (others are hardcoded inside the script)
# --------------------------------------------------------------
CONFLUENCE_BKDIR=/var/atlassian/application-data/confluence/backups
CONFLUENCE_BACKUP=backup-2014_07_30.zip
JIRA_BKDIR=/var/atlassian/application-data/jira/export
JIRA_BACKUP=2014-Aug-04--1041.zip
DESTDIR=`pwd`/tmp
# -------------
# Sanity checks
# -------------
if [ `whoami` != root ]; then
echo This script has to be run as root
exit 1
fi
mkdir -p "${DESTDIR}"
[ ! -z "${CONFLUENCE_BACKUP}" ] && if [ -e "${DESTDIR}/${CONFLUENCE_BACKUP}" ]; then
echo "WARNING: Will overwrite existing ${DESTDIR}/${CONFLUENCE_BACKUP}"
fi
[ ! -z "${JIRA_BACKUP}" ] && if [ -e "${DESTDIR}/${JIRA_BACKUP}" ]; then
echo "WARNING: Will overwrite existing ${DESTDIR}/${JIRA_BACKUP}"
fi
# --------------------------------------------
# OPTION 1: RESTORE FILES FROM A REMOTE SERVER
# --------------------------------------------
#[ ! -z "${CONFLUENCE_BACKUP}" ] && if [ ! -e "${DESTDIR}/${CONFLUENCE_BACKUP}" ]; then
# echo "INFO: Downloading ${CONFLUENCE_BACKUP}"
# scp "[email protected]:${CONFLUENCE_BKDIR}/${CONFLUENCE_BACKUP}" "${DESTDIR}"
#fi
# --------------------------------------
# OPTION 2: RESTORE FILES FROM A TARBALL
# --------------------------------------
if [ ! -e /tmp/sdc1/BACKUP ]; then
mkdir -p /tmp/sdc1
mount -o ro /dev/sdc1 /tmp/sdc1
fi
#tar tvfz /tmp/sdc1/BACKUP/20140805-bk-mv-linux-powerhorse-opt.tar.gz
#tar tvfz /tmp/sdc1/BACKUP/20140805-bk-mv-linux-powerhorse-scratch.tar.gz
#tar tvz -C "${DESTDIR}" -f /tmp/sdc1/BACKUP/20140805-bk-mv-linux-powerhorse-opt.tar.gz \
# ./atlassian/confluence/backup \
# ./atlassian/jira/backup
# -----------------------------------------------
# OPTION 3: RESTORE FILES FROM A CLONEZILLA IMAGE
# -----------------------------------------------
# See http://blog.christosoft.de/2012/05/mount-clonezilla-image-to-restore-single-file-browse/
CLONEZILLA_BKDIR=/tmp/sdc1/CLONEZILLA/2014-08-04-15-img_mv-linux-powerhorse
BIGTMPDIR=/tmp/sdb1/tmp
RESTOREIMG=${BIGTMPDIR}/sdd1-restore.img
MOUNTPOINT=/tmp/oldsdd1
if [ ! -e /tmp/sdc1/CLONEZILLA ]; then
mkdir -p /tmp/sdc1
mount -o ro /dev/sdc1 /tmp/sdc1
fi
which clonezilla >/dev/null || sudo apt-get -y install clonezilla
#sudo ls -la ${CLONEZILLA_BKDIR}
# Convert the clonezilla-image into an .img file
if [ ! -e ${RESTOREIMG} ]; then
echo "INFO: Restoring partition to ${RESTOREIMG}"
mkdir -p ${BIGTMPDIR}
sudo cat ${CLONEZILLA_BKDIR}/sdd1.ext4-ptcl-img.gz.* \
| sudo gzip -dc \
| sudo partclone.restore -C -s - -O ${RESTOREIMG}
fi
if [ ! -e ${MOUNTPOINT}/vmlinuz ]; then
echo INFO: Mount image to ${MOUNTPOINT}
mkdir -p ${MOUNTPOINT}
sudo mount -o ro,loop -t ext4 ${RESTOREIMG} ${MOUNTPOINT}
fi
echo "INFO: You can now access the old partition under ${MOUNTPOINT}"
#ls -la ${MOUNTPOINT}/${CONFLUENCE_BKDIR}
if [ ! -e ${CONFLUENCE_BKDIR} ]; then
mkdir -p ${CONFLUENCE_BKDIR}
#chown confluence.root ${CONFLUENCE_BKDIR}
fi
echo "INFO: Restoring Confluence backup ${CONFLUENCE_BACKUP}"
cp ${MOUNTPOINT}/${CONFLUENCE_BKDIR}/${CONFLUENCE_BACKUP} "${DESTDIR}"
#ls -la ${MOUNTPOINT}/${JIRA_BKDIR}
if [ ! -e ${JIRA_BKDIR} ]; then
mkdir -p ${JIRA_BKDIR}
#chown jira.root ${JIRA_BKDIR}
fi
echo "INFO: Restoring JIRA backup ${JIRA_BACKUP}"
cp ${MOUNTPOINT}/${JIRA_BKDIR}/${JIRA_BACKUP} "${DESTDIR}"
# Unmount images
sudo umount ${MOUNTPOINT}
sudo rm -f ${RESTOREIMG}
# EOF
|
gmacario/vagrant-atlassian
|
do-extract-backups.sh
|
Shell
|
mpl-2.0
| 4,190 |
#!/bin/bash
HARAKA_DIR=/usr/src/app
echo "$HQ_DOMAIN" > "$HARAKA_DIR/config/me"
echo "$REAL_DOMAIN" > "$HARAKA_DIR/config/host_list"
echo "dn1=cn=%u,$USER_BASE_DN" >> "$HARAKA_DIR/config/auth_ldap.ini"
cat > "$HARAKA_DIR/config/smtp_forward.ini" <<EOF
host=$FORWARD_HOST
port=$FORWARD_PORT
enable_tls=$FORWARD_USE_TLS
auth_type=plain
auth_user=$FORWARD_HOST_USER
auth_pass=$FORWARD_HOST_PASSWORD
EOF
haraka -c "$HARAKA_DIR"
|
cloudfleet/zeppelin-haraka
|
scripts/start.sh
|
Shell
|
agpl-3.0
| 427 |
#!/bin/bash
#Edit the next two lines as appropriate
time='22:30 oct 2'
file='music/sam.ogg'
#Don't change anything below this line
SCHEDULE='_schedule.sh'
chmod +x $SCHEDULE
./$SCHEDULE $file "$time"
|
papio/papio_nix
|
run.sh
|
Shell
|
agpl-3.0
| 202 |
#!/bin/bash
# xtc - The eXTensible Compiler
# Copyright (C) 2009-2012 New York University
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
# This script generates the performance cdf graph.
if [[ $# -lt 3 ]]; then
echo "USAGE: `basename $0` outfile superc typechef"
exit 1
fi
outfile=$1
superc=$2
typechef=$3
# Generate a multi-line chart.
superc_cdf=$(tempfile -p superc) || exit
typechef_cdf=$(tempfile -p typechef) || exit
superc_max=`cat $superc | grep "^performance " | sort -nk3 | tail -n1 | cut -d' ' -f3 | awk '{printf("%.2f", $0)}'`
superc_max_position=`echo $superc_max | awk '{print $0 + 1}'`
typechef_max=`cat $typechef | grep "^performance " | sort -nk3 | tail -n1 | cut -d' ' -f3 | awk '{printf("%.2f", $0)}'`
superc_total=`performance_sum.sh $superc | awk '{ printf("%.2f", $0/3600)}'`
typechef_total=`performance_sum.sh $typechef | awk '{ printf("%.2f", $0/3600)}'`
performance_cdf.sh $superc > $superc_cdf
performance_cdf.sh $typechef > $typechef_cdf
echo "SuperC summary"
percentile_summary.sh -p ".5 .8 .9 1" $superc performance 3
seconds=`performance_sum.sh $superc`
echo "Total time (seconds): $seconds"
echo "Total time (hours):" `echo "$seconds / 3600" | bc -lq`
echo ""
echo "TypeChef summary"
percentile_summary.sh -p ".5 .8 .9 1" $typechef performance 3
seconds=`performance_sum.sh $typechef`
echo "Total time (seconds): $seconds"
echo "Total time (hours):" `echo "$seconds / 3600" | bc -lq`
echo ""
# The center of the legend.
legendx=30.25
legendy=21
echo "
set term pdfcairo monochrome dashed font \"Helvetica,5pt\" size 3.125, 2.0 lw 2
set output \"$outfile\"
set lmargin 9
set rmargin 3.125
set size ratio .618
set ytics 0,25,100 scale 0
set grid ytics
set xtics scale .5
set xlabel \"Latency in Seconds\"
set ylabel \"Cumulative Percentage\"
set yrange [0:100]
set xrange [0:40]
set datafile separator ' '
set nokey
# set label \"SuperC\" at 6.2, 87 right
set obj rect center 9.5, 62.5 size char(6), 1.5 front fillstyle solid 1.0 border
set label \"SuperC\" at 9.5, 62.5 center front
set label \"TypeChef\" at 24, 62.5 left
# set label \"TypeChef\" at 30, 92 right
# set label \"SuperC's\\nMaximum\\nis $superc_max\s\" at $superc_max_position, 94 left
# set label \"TypeChef's\\nMaximum\\nis $typechef_max\s\" at 39.4, 79 right
set label \"Max: $superc_max\s\" at $superc_max_position, 93 left
set label \"Max: $typechef_max\s\" at 39.4, 84 right
set obj rect center $legendx, $legendy+.32 size 15, 25
set label \"Total Time\" at $legendx, $legendy+8 center
set label \"SuperC\" at $legendx-7, $legendy left
set label \"TypeChef\" at $legendx-7, $legendy-7 left
set label \"$superc_total\hrs\" at $legendx+7, $legendy right
set label \"$typechef_total\hrs\" at $legendx+7, $legendy-7 right
# set key bottom right Right #samplen 1 width -5 reverse
plot \"$superc_cdf\" using 1:(\$2*100) title 'SuperC' with line lc rgb \"black\" lt 1 , \
\"$typechef_cdf\" using 1:(\$2*100) title 'TypeChef' with line lc rgb \"black\" lt 1 , \
\"< echo '$superc_max 100'\" with impulses lt 2
" | gnuplot
|
wandoulabs/xtc-rats
|
xtc-core/src/main/java/xtc/lang/cpp/scripts/graph_performance_cdf.sh
|
Shell
|
lgpl-2.1
| 3,644 |
#!/bin/bash
####################################################################
# script to apply json commands to start the db usage
# @author nicolas malservet
# @version 1.0
####################################################################
mongo qualityformsdb --port 32020 -u qfuseradmin -p 'bbanques2015' ./questionnaire.json.js
mongo qualityformsdb --port 32020 -u qfuseradmin -p 'bbanques2015' ./questionnaire_cession.json.js
mongo qualityformsdb --port 32020 -u qfuseradmin -p 'bbanques2015' ./answer.json.js
|
Biobanques/qualityforms
|
webapp/protected/data/resetdb.sh
|
Shell
|
lgpl-3.0
| 524 |
#! /bin/sh
srcdir=$1
shift
while [ $# -ne 0 ]; do
f=$1
shift
x=`echo $f|sed -e 's,\.\([ch]\),.gir.\1,g'`
echo "$x: $srcdir/$f \$(srcdir)/gengir.sh Makefile"
echo " \$(AM_V_GEN) \$(srcdir)/gengir.sh \$(top_srcdir)/liblangtag \$@"
done
echo "\$(srcdir)/gendir.sh:"
|
tagoh/liblangtag
|
liblangtag-gobject/gendeps.sh
|
Shell
|
lgpl-3.0
| 285 |
swig -python fib.i
gcc -fpic -c fibmodule.c fib_wrap.c -I/usr/include/python2.7/
gcc -shared fibmodule.o fib_wrap.o -o _fib.so
echo "Done."
echo "call in python:"
echo "import fib"
echo "fib.fib(10)"
|
haphaeu/yoshimi
|
C_Extension/SWIG/fib/SwigCompileLink.sh
|
Shell
|
lgpl-3.0
| 200 |
#!/bin/sh
NAME="${ALIAS}[$$]"
quit () {
echo "$(date -u +%R) * ${NAME} quits" | nc localhost 7000
}
trap quit EXIT
JOINMSG="$(date -u +%R) * ${NAME} joins"
echo "${JOINMSG}"
echo "${JOINMSG}" | nc localhost 7000
while read -r LINE
do echo "$(date -u +%R) ${NAME}: ${LINE}"
done | stdbuf -oL tr -d '\000-\011\013-\037' | nc localhost 7000
|
defanor/tlsd
|
examples/chat/tlsd-chat.sh
|
Shell
|
unlicense
| 349 |
rrdtool graph - \
--start=end-$ShortTime \
--title=Ylakerta \
--imgformat=PNG \
--width=$XSize \
--base=1000 \
--height=$YSize \
--right-axis 1:0 \
--interlaced \
DEF:b=$RRADIR/ylakerta.rrd:temp:AVERAGE \
DEF:c=$RRADIR/sannanhuone.rrd:temp:AVERAGE \
DEF:d=$RRADIR/jussinhuone.rrd:temp:AVERAGE \
LINE1:b#00BB55:avg \
LINE1:d#002288:jussinhuone \
LINE1:c#AADD00:sannanhuone \
GPRINT:b:MIN:Min%8.2lf%s\
GPRINT:b:AVERAGE:Avg%8.2lf%s\
GPRINT:b:MAX:Max%8.2lf%s\
GPRINT:b:LAST:Last%8.2lf%s > $OutDir/ylakerta.png
|
jussikin/homeAutomation
|
graphScripts/ylakerta.sh
|
Shell
|
unlicense
| 524 |
#!/bin/sh
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage ./trick_helper index
#
# You will have to MANUALLY merge the generated adaptation set representation
# from dash_out_trick/stream.mpd into dash_out/stream.mpd and add
# <SupplementalProperty schemeIdUri="http://dashif.org/guidelines/trickmode" value="1"/>
# under the adapataion set that will hold the trick streams. Also, make sure
# that each id is unique among all the generated trick rates.
# TODO: Try to automate this step!
IDX=$1
RATE=(4 15 60 120 240)
FPS=8
# Extract frames
rm -rf tmp
mkdir tmp
ffmpeg -i 480p-2mbps-test.mp4 -vf fps=${FPS}/${RATE[$IDX]} tmp/frame-%d.png
BPS=(1600k 400k 100k 50k 25k)
# Make video with only i-frames
ffmpeg -r ${FPS}/${RATE[$IDX]} -start_number 1 -i tmp/frame-%d.png -b:v ${BPS[$IDX]} -c:v libx264 -pix_fmt yuv420p -profile:v high -g 0 480p-trick${RATE[$IDX]}.mp4
# Fragment
DUR=(10000 40000 160000 320000 640000)
rm -f 480p-trick${RATE[$IDX]}-frag.mp4
mp4fragment --fragment-duration ${DUR[$IDX]} --timescale 90000 480p-trick${RATE[$IDX]}.mp4 480p-trick${RATE[$IDX]}-frag.mp4
# Make manifest
rm -rf dash_out_trick${RATE[$IDX]}
mp4dash --no-split -o dash_out_trick${RATE[$IDX]} --max-playout-rate=lowest:${RATE[$IDX]} 480p-trick${RATE[$IDX]}-frag.mp4
mv dash_out_trick${RATE[$IDX]}/480p-trick${RATE[$IDX]}-frag.mp4 dash_out/
|
google/ndash
|
tools/streamgen/trick_helper.sh
|
Shell
|
apache-2.0
| 1,868 |
#!/bin/bash
c++ -std=c++17 -O3 -Wall -pedantic -Weffc++ -o popcnt_code_histograms -fopenmp popcnt_code_histograms.cpp
c++ -std=c++17 -O3 -Wall -pedantic -Weffc++ -o pascals_triangle pascals_triangle.cpp
|
tuddbresilience/coding_reliability
|
xor_checksums/build.sh
|
Shell
|
apache-2.0
| 203 |
#!/bin/sh
if [ -z $IFMAPCLI ]; then
echo "set IFMAPCLI environment with 'export IFMAPCLI=/path/to/ifmapcli/jars'"
exit 1
fi
COMMAND="java -jar $IFMAPCLI"
################################################################################
IP_ADDRESS=10.0.0.1
MAC_ADDRESS=ee:ee:ee:ee:ee:ee
USERNAME=joe
echo "delete pdp subgraph"
$COMMAND/pdp.jar delete $USERNAME $IP_ADDRESS $MAC_ADDRESS > /dev/null
|
MReichenbach/visitmeta
|
dataservice/src/main/templates/scripts/08-delete-sub-graph-from-02.sh
|
Shell
|
apache-2.0
| 409 |
#!/usr/bin/env bash
if ! [[ "$0" =~ "tests/semaphore.test.bash" ]]; then
echo "must be run from repository root"
exit 255
fi
TEST_SUFFIX=$(date +%s | base64 | head -c 15)
TEST_OPTS="PASSES='build release e2e' MANUAL_VER=v3.3.7"
if [ "$TEST_ARCH" == "386" ]; then
TEST_OPTS="GOARCH=386 PASSES='build e2e'"
fi
docker run \
--rm \
--volume=`pwd`:/go/src/github.com/coreos/etcd \
gcr.io/etcd-development/etcd-test:go1.10.3 \
/bin/bash -c "${TEST_OPTS} ./test 2>&1 | tee test-${TEST_SUFFIX}.log"
! egrep "(--- FAIL:|panic: test timed out|appears to have leaked)" -B50 -A10 test-${TEST_SUFFIX}.log
|
glevand/coreos--etcd
|
tests/semaphore.test.bash
|
Shell
|
apache-2.0
| 611 |
#!/usr/bin/env bash
APP_PID=../log/app.pid
APP=./app
PKG_FILE=../kibana-5.2.0-darwin-x86_64.tar.gz
WORKSPACE=$(cd $(dirname $0)/; pwd)
cd $WORKSPACE
function create() {
if [ -f $APP_PID ]; then
$APP stop || return $?
fi
tar -xf $PKG_FILE -C ../
if [ $? -ne "0" ]; then
echo {\"error\":\"tar -xf ${PKG_FILE}\"}
fi
}
create
|
ChaosXu/nerv
|
cmd/logui/bin/create.sh
|
Shell
|
apache-2.0
| 369 |
#!/bin/bash
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
source ~/.rvm/scripts/rvm
set -ex
repo=$(dirname $0)/../../..
# First set up all dependences needed for PHP QPS test
cd $repo
cd src/php/tests/qps
composer install
# The proxy worker for PHP is implemented in Ruby
cd ../../../..
ruby src/ruby/qps/proxy-worker.rb $@
|
yongni/grpc
|
tools/run_tests/performance/run_worker_php.sh
|
Shell
|
apache-2.0
| 1,810 |
#!/bin/sh -x
set -e
apt-get update
apt-get install --yes make clang g++ libc++-dev
VERSION='2.18.0'
cd /opt/
wget "https://github.com/stan-dev/cmdstan/releases/download/v${VERSION}/cmdstan-${VERSION}.tar.gz"
tar -xzf "cmdstan-${VERSION}.tar.gz"
mv "cmdstan-${VERSION}" "cmdstan"
cd "cmdstan"
make build -j4
|
thomasathorne/clj-stan
|
install/install_stan.sh
|
Shell
|
apache-2.0
| 313 |
#!/bin/bash
#
# Copyright 2015-2017 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -o errexit
set -o nounset
set -o pipefail
STARTTIME=$(date +%s)
source_root=$(dirname "${0}")/..
prefix="jshaughn/"
# set to the alerting version in the target metrics ear
version="1.6.0.Final"
verbose=false
options=""
help=false
for args in "$@"
do
case $args in
--prefix=*)
prefix="${args#*=}"
;;
--version=*)
version="${args#*=}"
;;
--no-cache)
options="${options} --no-cache"
;;
--verbose)
verbose=true
;;
--help)
help=true
;;
esac
done
# allow ENV to take precedent over switches
prefix="${PREFIX:-$prefix}"
version="${OS_TAG:-$version}"
if [ "$help" = true ]; then
echo "Builds the docker images for Hawkular Alerting Tutorial"
echo
echo "Options: "
echo " --prefix=PREFIX"
echo " The prefix to use for the image names."
echo " default: jshaughn/"
echo
echo " --version=VERSION"
echo " The version used to tag the image"
echo " default: 1.6.0.Final"
echo
echo " --no-cache"
echo " If set will perform the build without a cache."
echo
echo " --verbose"
echo " Enables printing of the commands as they run."
echo
echo " --help"
echo " Prints this help message"
echo
exit 0
fi
if [ "$verbose" = true ]; then
set -x
fi
for component in hawkular-alerting-tutorial; do
BUILD_STARTTIME=$(date +%s)
comp_path=.
docker_tag=${prefix}${component}:${version}
echo
echo
echo "--- Building component '$comp_path' with docker tag '$docker_tag' ---"
docker build ${options} -t $docker_tag $comp_path
BUILD_ENDTIME=$(date +%s); echo "--- $docker_tag took $(($BUILD_ENDTIME - $BUILD_STARTTIME)) seconds ---"
echo
echo
done
echo
echo
echo "++ Active images"
docker images | grep ${prefix} | grep ${version} | sort
echo
ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
|
jpkrohling/hawkular-alerts
|
examples/tutorial/docker/build-images.sh
|
Shell
|
apache-2.0
| 2,589 |
#!/bin/bash
curl -XPOST '172.17.0.33:8081/bank/account/_bulk?pretty' --data-binary "@data/accounts.json"
|
zhaochl/bash-utils
|
es/put_json_data.sh
|
Shell
|
apache-2.0
| 106 |
#!/bin/sh
#
# Script used to generate all the certificate files used in this example.
#
# Requirements:
#
# * openssl
# * keytool (Java JDK 1.8.0+)
# * certstrap (https://github.com/square/certstrap)
#
# See https://github.com/square/keywhiz/wiki/Development-and-test-key-material
#
# Generate a new CA
certstrap init --key-bits 2048 --years 30 --common-name "Keywhiz ACME CA"
keytool -import -file out/Keywhiz_ACME_CA.crt -alias ca -storetype pkcs12 -storepass unicorns -keystore out/Keywhiz_ACME_CA.p12
cp out/Keywhiz_ACME_CA.p12 acme_truststore.p12
# Create client certificates
certstrap request-cert --common-name client
certstrap sign --years 30 --CA "Keywhiz ACME CA" client
certstrap request-cert --common-name noSecretsClient
certstrap sign --years 30 --CA "Keywhiz ACME CA" noSecretsClient
# ...
# Genrate pem file for curl
openssl pkcs12 -export -in out/client.crt -inkey out/client.key -out out/client.p12
openssl pkcs12 -in out/client.p12 -nodes -out out/client.pem
openssl pkcs12 -export -in out/noSecretsClient.crt -inkey out/noSecretsClient.key -out out/noSecretsClient.p12
openssl pkcs12 -in out/noSecretsClient.p12 -nodes -out out/noSecretsClient.pem
# Create a server certificate
certstrap request-cert --domain localhost --ip 127.0.0.1 --organizational-unit server
certstrap sign --years 30 --CA "Keywhiz ACME CA" localhost
keytool -import -file out/localhost.crt -storetype pkcs12 -storepass unicorns -keystore out/localhost.p12
# openssl pkcs12 -aes128 -in out/localhost.crt -inkey out/localhost.key -out out/localhost.p12
cp out/localhost.p12 acme_keystore.p12
|
zuazo/keywhiz-docker
|
examples/keygen/generate_files.sh
|
Shell
|
apache-2.0
| 1,588 |
#!/bin/bash -ex
# This example can be used to manually run a scenario. Some edits and
# customizations may be necessary to match a specific lab.
#
# Typically, the variables set here are representative of the variables
# which would normally be passed from Jenkins jobs to the runner.
export BUNDLE_SCENARIO="openstack-base"
export BUNDLE_STABILITY="development"
export UBUNTU_RELEASE="xenial"
export OPENSTACK_RELEASE="ocata"
export ARCH="arm64"
export TAGS="gigabyte"
export CLOUD_NAME="ruxton-maas"
# 45m is not enough for the Gigabyte arm64 machines to reach ready state.
export WAIT_TIMEOUT="75m"
# WIP and workaround for https://bugs.launchpad.net/bugs/1567807
export BUNDLE_REPO_BRANCH="automation-lp1567807"
# WIP post-deploy tools
export OCT_REPO="lp:~1chb1n/openstack-charm-testing/update-tools-1703"
# ----------------------------------------------------------------------------
openstack-base/run.sh
|
ryan-beisner/charm-test-infra
|
runners/manual-examples/openstack-base-xenial-ocata-arm64-manual.sh
|
Shell
|
apache-2.0
| 919 |
#!/bin/bash
set -o xtrace
set -o errexit
export REPO_URL=https://git.openstack.org
export ZUUL_URL=/home/jenkins/cache-workspace
mkdir -p $ZUUL_URL
export ZUUL_REF=HEAD
export WORKSPACE=/home/jenkins/workspace/testing
mkdir -p $WORKSPACE
export ZUUL_PROJECT=openstack/ironic
export ZUUL_BRANCH=master
# git clone $REPO_URL/$ZUUL_PROJECT $ZUUL_URL/$ZUUL_PROJECT \
# && cd $ZUUL_URL/$ZUUL_PROJECT \
# && git checkout remotes/origin/$ZUUL_BRANCH
ARGS_RSYNC="-rlptDH"
if [ -d /opt/git/pip-cache/ ]
then
for user in jenkins root
do
eval user_dir=~${user}
echo Copying pip cache from /opt/git/pip-cache/ to ${user_dir}/.cache/pip/
sudo mkdir -p ${user_dir}/.cache/pip/
sudo rsync ${ARGS_RSYNC} --exclude=selfcheck.json /opt/git/pip-cache/ ${user_dir}/.cache/pip/
sudo chown -R $user:$user ${user_dir}/.cache/pip/
done
fi
cd $WORKSPACE \
&& git clone --depth 1 $REPO_URL/openstack-infra/devstack-gate
# # Cherry pick our patch: Send DEVSTACK_GATE_TEMPEST_REGEX to grenade jobs
# (cd devstack-gate; git fetch https://review.openstack.org/openstack-infra/devstack-gate refs/changes/44/241044/3 && git cherry-pick FETCH_HEAD)
export DEVSTACK_GATE_SETTINGS="/home/jenkins/update-projects.sh"
# At this point you're ready to set the same environment variables and run the
# same commands/scripts as used in the desired job. The definitions for these
# are found in the openstack-infra/project-config project under the
# jenkins/jobs directory in a file named devstack-gate.yaml. It will probably
# look something like:
# # Let's use KVM
# export DEVSTACK_GATE_LIBVIRT_TYPE=kvm
# From openstack-infra/project-config/jenkins/jobs/devstack-gate.yaml
# # Simulate Ironic
# # name: '{pipeline}-tempest-dsvm-ironic-pxe_ssh{job-suffix}'
# # - devstack-virtual-ironic:
# # postgres: 0
# # build-ramdisk: 1
# # deploy_driver: pxe_ssh
# # deploy-with-ipa: 0
# # client-from-source: 0
# # ironic-lib-from-source: 0
# # ipxe-enabled: 0
# # branch-override: '{branch-override}'
# # tempest-env: 'DEVSTACK_GATE_TEMPEST_REGEX=baremetal'
# # devstack-timeout: 120
# export PROJECTS="openstack/ironic $PROJECTS"
# export PROJECTS="openstack/ironic-lib $PROJECTS"
# export PROJECTS="openstack/ironic-python-agent $PROJECTS"
# export PROJECTS="openstack/python-ironicclient $PROJECTS"
# export PYTHONUNBUFFERED=true
# export DEVSTACK_GATE_TIMEOUT=120
# export DEVSTACK_GATE_TEMPEST=1
# export DEVSTACK_GATE_POSTGRES=0
# export DEVSTACK_GATE_IRONIC=1
# export DEVSTACK_GATE_NEUTRON=1
# export DEVSTACK_GATE_VIRT_DRIVER=ironic
# export DEVSTACK_GATE_IRONIC_DRIVER=pxe_ssh
# export DEVSTACK_GATE_IRONIC_BUILD_RAMDISK=1
# export TEMPEST_CONCURRENCY=1
# export BRANCH_OVERRIDE=default
# if [ "$BRANCH_OVERRIDE" != "default" ] ; then
# export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
# fi
#
# export IRONICCLIENT_FROM_SOURCE=0
# if [ "$IRONICCLIENT_FROM_SOURCE" == "1" ]; then
# export DEVSTACK_PROJECT_FROM_GIT="python-ironicclient"
# fi
#
# export IRONIC_LIB_FROM_SOURCE=0
# if [ "$IRONIC_LIB_FROM_SOURCE" == "1" ]; then
# export DEVSTACK_PROJECT_FROM_GIT="ironic-lib"
# fi
#
# # The IPA ramdisk needs at least 1GB of RAM to run
# export DEVSTACK_LOCAL_CONFIG="IRONIC_VM_SPECS_RAM=1024"$'\n'"IRONIC_VM_COUNT=1"
#
# export DEPLOY_WITH_IPA=0
# if [ "$DEPLOY_WITH_IPA" == "1" ] ; then
# export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=True"
# fi
#
# export IPXE_ENABLED=0
# if [ "$IPXE_ENABLED" == "1" ] ; then
# export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_IPXE_ENABLED=True"
# fi
#
# # Allow switching between full tempest and baremetal-only
# export DEVSTACK_GATE_TEMPEST_REGEX=baremetal
#
# # devstack plugin didn't exist until mitaka
# if [[ "$ZUUL_BRANCH" != "stable/kilo" && "$ZUUL_BRANCH" != "stable/liberty" ]] ; then
# export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin ironic git://git.openstack.org/openstack/ironic"
# fi
#
# cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
# ./safe-devstack-vm-gate-wrap.sh 2>&1 | tee ~/output.log
# exit
# From openstack-infra/project-config/jenkins/jobs/devstack-gate.yaml
# ***************** Grenade stuff ************************
# Local mods
export PROJECTS="openstack/ironic $PROJECTS"
export PROJECTS="openstack/ironic-lib $PROJECTS"
export PROJECTS="openstack/ironic-python-agent $PROJECTS"
export PROJECTS="openstack/python-ironicclient $PROJECTS"
export PROJECTS="openstack-dev/grenade $PROJECTS"
export PYTHONUNBUFFERED=true
export GIT_BASE=https://git.openstack.org/
export DEVSTACK_GATE_TIMEOUT=120
export DEVSTACK_GATE_TEMPEST=1
export DEVSTACK_GATE_GRENADE=pullup
export DEVSTACK_GATE_IRONIC=1
export DEVSTACK_GATE_NEUTRON=1
export DEVSTACK_GATE_VIRT_DRIVER=ironic
#export TEMPEST_CONCURRENCY=2
export TEMPEST_CONCURRENCY=1
# The IPA ramdisk needs at least 1GB of RAM to run
export DEVSTACK_LOCAL_CONFIG="IRONIC_VM_SPECS_RAM=1024"$'\n'"IRONIC_VM_COUNT=1"
#export DEVSTACK_LOCAL_CONFIG="IRONIC_VM_SPECS_RAM=1024"$'\n'"IRONIC_VM_COUNT=3"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=True"
export IRONIC_RAMDISK_TYPE=coreos
export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_RAMDISK_TYPE=$IRONIC_RAMDISK_TYPE"
# export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_HW_NODE_DISK=2"
# export DEVSTACK_LOCAL_CONFIG+=$'\n'"IRONIC_VM_SPECS_DISK=2"
# JLV set GIT_BASE since those devstack people refuse to change to a sensible
# default and insist on using 'git://' :( Yay for insecurity!
export DEVSTACK_LOCAL_CONFIG+=$'\n'"GIT_BASE=https://git.openstack.org/"
# export BRANCH_OVERRIDE={branch-override}
export BRANCH_OVERRIDE=default
if [ "$BRANCH_OVERRIDE" != "default" ] ; then
export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
fi
# Run only baremetal tests
export DEVSTACK_GATE_TEMPEST_REGEX="baremetal"
cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
# Pipe in /dev/null as had strange issues occur if didn't
./safe-devstack-vm-gate-wrap.sh </dev/null
if [ -d /opt/git/pip-cache/ ]
then
set +o errexit
for user in jenkins root stack
do
eval user_dir=~${user}
echo Copying pip cache files from ${user_dir}/.cache/pip/ to /opt/git/pip-cache/
sudo rsync ${ARGS_RSYNC} --exclude=selfcheck.json ${user_dir}/.cache/pip/ /opt/git/pip-cache/
done
fi
|
sommishra/devstack-gate-test
|
ansible/roles/devstack-gate/files/ironic-grenade.sh
|
Shell
|
apache-2.0
| 6,404 |
#!/bin/bash
# treat unset variables as an error when substituting.
set -u
# exit immediately if a command exits with a nonzero exit status.
set -e
# py.test -x
touch temp.db && rm temp.db
django-admin.py migrate --noinput
django-admin.py demo_data_login
django-admin.py init_app_cms
django-admin.py init_app_compose
django-admin.py init_project
django-admin.py runserver
|
pkimber/hatherleighcommunitycentre_couk
|
init_dev.sh
|
Shell
|
apache-2.0
| 372 |
#!/bin/bash
RED='\033[0;31m'
GREEN='\033[0;32m'
ORANGE='\033[0;33m'
NC='\033[0m'
while IFS=' ' read -ra line || [[ -n "$line" ]]; do
if [ "${line[1]}" = "$1" ]; then
ID="${line[2]}"
fi
done < "deploylog.txt"
ARGS=''
for var in ${@:3}
do
ARGS+="\"$var\", "
done
ARGS=${ARGS%,*}
RESULT=$(curl -s -X POST --header "Content-Type: application/json" --header "Accept: application/json" -d "{
\"jsonrpc\": \"2.0\",
\"method\": \"query\",
\"params\": {
\"type\": 1,
\"chaincodeID\": {
\"name\": \"$ID\"
},
\"ctorMsg\": {
\"function\": \"$2\",
\"args\": [$ARGS]
},
\"secureContext\": \"admin\"
},
\"id\": 2
}" "https://1acda275b31041d89efd8a04b9bac2ea-vp0.us.blockchain.ibm.com:5004/chaincode")
ST=$(echo $RESULT | jq -r '.result.status')
MSG=$(echo $RESULT | jq -r '.result.message')
echo -e "${ORANGE}Status: $ST ${NC}"
echo -e "${GREEN}Result: $MSG ${NC}"
|
Earlvik/learn-chaincode
|
query.sh
|
Shell
|
apache-2.0
| 990 |
vagrant up
vagrant ssh
ansible-playbook /vagrant/provisioning/playbook.yml --connection=local
# PHP HTTP
cd ~/code/riak-php-client
composer update
./vendor/bin/phpunit
# PHP PB
cp ~/code/riak-phppb-client/vendor/allegro/protobuf /tmp/
cd /tmp/protobuf
phpize
./configure
make
sudo make install
cd ~/code/riak-phppb-client
composer update
./vendor/bin/phpunit
# Go
cd ~/code/go/src/github.com/basho/riak-go-client/
"host" go get -u -v
go test -v -tags=integration
# Java
cd ~/code/riak-java-client
mvn clean install -Pitest -Dcom.basho.riak.2i=true -Dcom.basho.riak.yokozuna=true -Dcom.basho.riak.buckettype=true -Dcom.basho.riak.crdt=true
# Node
cd ~/code/riak-nodejs-client
curl riak-test:8098/ping
curl 127.0.0.1:8098/ping
|
basho-labs/riak-clients-vagrant
|
provisioning/commands.sh
|
Shell
|
apache-2.0
| 732 |
#!/usr/bin/env bash
if [ -n "$REDIS_MASTER" ]; then
sed -i "s/^# slaveof\(.*\)$/slaveof $REDIS_MASTER 6379/" /etc/redis/redis.conf
fi
redis-server /etc/redis/redis.conf
|
webrecorder/webrecorder
|
redis/init-redis-conf.sh
|
Shell
|
apache-2.0
| 176 |
#!/bin/bash
# Auto install astercc commercial and related packages
# By Solo #### [email protected] last modify 2012-04-17
# By Solo #### [email protected] last modify 2013-02-06 for asterCC 1.2-beta
# By Solo #### [email protected] last modify 2013-05-20, ไฟฎๆญฃไบasteriskๆปๆฏไฝฟ็จasterccuser asterccsecretไฝไธบAMI็จๆท็bug
# By Solo #### [email protected] last modify 2014-02-07, ็ฆ็จไบnetjet dahdi้ฉฑๅจ
# By Bob #### ๆนไธบUCServer UI๏ผๆดๆฐ่ช2015ๅนด9ๆ8ๆฅ
# uname -r, ๅฆๆๅ
ๅซ-pve, ้่ฆๅฐ/usr/srcๆง่ก
# ln -s kernels/2.6.18-308.4.1.el5-x86_64/ linux
function newRepo_install(){
cd /usr/src
version=`cat /etc/issue|grep -o 'release [0-9]\+'`
arch=i386
bit=`getconf LONG_BIT`
if [ $bit == 64 ]; then
arch=x86_64
fi;
if [ "$version" == "release 6" ]; then
if [ ! -e ./epel-release-$epelver6.noarch.rpm ]; then
wget http://dl.iuscommunity.org/pub/ius/archive/Redhat/6/$arch/epel-release-$epelver6.noarch.rpm
fi;
if [ ! -e ./ius-release-$iusver6.ius.el6.noarch.rpm ]; then
wget http://dl.iuscommunity.org/pub/ius/archive/Redhat/6/$arch/ius-release-$iusver6.ius.el6.noarch.rpm
fi;
rpm -ivh epel-release-$epelver6.noarch.rpm ius-release-$iusver6.ius.el6.noarch.rpm;
if [ ! -e ./percona-release-0.1-3.noarch.rpm ]; then
wget http://www.percona.com/downloads/percona-release/redhat/0.1-3/percona-release-0.1-3.noarch.rpm
fi;
rpm -ivh percona-release-0.1-3.noarch.rpm
else
if [ ! -e ./epel-release-$epelver5.noarch.rpm ]; then
wget http://dl.iuscommunity.org/pub/ius/archive/Redhat/5/$arch/epel-release-$epelver5.noarch.rpm
fi;
if [ ! -e ./ius-release-$iusver5.ius.el5.noarch.rpm ]; then
wget http://dl.iuscommunity.org/pub/ius/archive/Redhat/5/$arch/ius-release-$iusver5.ius.el5.noarch.rpm
fi;
rpm -ivh epel-release-$epelver5.noarch.rpm ius-release-$iusver5.ius.el5.noarch.rpm;
fi
sed -i "s/mirrorlist/#mirrorlist/" /etc/yum.repos.d/ius.repo
sed -i "s/#baseurl/baseurl/" /etc/yum.repos.d/ius.repo
}
function yum_install(){
#yum -y upgrade
yum -y remove php*
yum -y remove asterisk*
yum -y install bash openssl openssh-server openssh-clients tcpdump wget mlocate openvpn ghostscript mailx cpan crontabs Percona-Server-server-55 Percona-Server-devel-55 Percona-Server-client-55 glibc gcc-c++ libtermcap-devel newt newt-devel ncurses ncurses-devel libtool libxml2-devel kernel-devel kernel-PAE-devel subversion flex libstdc++-devel libstdc++ unzip sharutils openssl-devel make kernel-header
chkconfig mysql on
chkconfig crond on
service crond start
}
function ioncube_install(){
echo -e "\e[32mStarting Install ioncube\e[m"
cd /usr/src
bit=`getconf LONG_BIT`
if [ $bit == 32 ]; then
if [ ! -e ./ioncube_loaders_lin_x86.tar.gz ]; then
wget http://downloads2.ioncube.com/loader_downloads/ioncube_loaders_lin_x86.tar.gz
fi
tar zxf ioncube_loaders_lin_x86.tar.gz
else
if [ ! -e ./ioncube_loaders_lin_x86-64.tar.gz ]; then
wget http://downloads2.ioncube.com/loader_downloads/ioncube_loaders_lin_x86-64.tar.gz
fi
tar zxf ioncube_loaders_lin_x86-64.tar.gz
fi
mv /usr/src/ioncube /usr/local/
sed -i "/ioncube/d" /etc/php.ini
echo "zend_extension = /usr/local/ioncube/ioncube_loader_lin_5.3.so" >> /etc/php.ini
/etc/init.d/php-fpm start
echo -e "\e[32mIoncube Install OK!\e[m"
}
function php_install(){
echo -e "\e[32mStarting Install PHP-Fpm\e[m"
if [ -e /etc/php.ini.rpmnew -a ! -e /etc/php.ini ]; then
cp /etc/php.ini.rpmnew /etc/php.ini
fi
yum -y install php54-fpm php54-cli pcre-devel php54-mysql sox php54-gd php54-mbstring php54-ioncube-loader
sed -i "s/short_open_tag = Off/short_open_tag = On/" /etc/php.ini
sed -i "s/memory_limit = 16M /memory_limit = 128M /" /etc/php.ini
sed -i "s/upload_max_filesize = 2M/upload_max_filesize = 40M /" /etc/php.ini
sed -i "s/post_max_size = 8M/post_max_size = 40M/" /etc/php.ini
sed -i '/^error_reporting/c error_reporting = E_ALL & ~E_DEPRECATED' /etc/php.ini
sed -i "s/user = apache/user = asterisk/" /etc/php-fpm.d/www.conf
sed -i "s/group = apache/group = asterisk/" /etc/php-fpm.d/www.conf
chkconfig php-fpm on
echo -e "\e[32mPHP-Fpm Install OK!\e[m"
}
function fax_install(){
echo -e "\e[32mStarting Install FAX\e[m"
version=`cat /etc/issue|grep -o 'release [0-9]\+'`
cd /usr/src
#yum -y install hylafax
yum -y install libtiff libtiff-devel
bit=`getconf LONG_BIT`
if [ $bit == 32 ]; then
if [ "$version" == "release 6" ]; then
if [ ! -e ./hylafax-client-6.0.6-1rhel6.i686.rpm ]; then
wget ftp://ftp.hylafax.org/binary/linux/redhat/6.0.6/hylafax-client-6.0.6-1rhel6.i686.rpm
fi
if [ ! -e ./hylafax-server-6.0.6-1rhel6.i686.rpm ]; then
wget ftp://ftp.hylafax.org/binary/linux/redhat/6.0.6/hylafax-server-6.0.6-1rhel6.i686.rpm
fi
else
if [ ! -e ./hylafax-client-6.0.6-1rhel5.i386.rpm ]; then
wget ftp://ftp.hylafax.org/binary/linux/redhat/6.0.6/hylafax-client-6.0.6-1rhel5.i386.rpm
fi
if [ ! -e ./hylafax-server-6.0.6-1rhel5.i386.rpm ]; then
wget ftp://ftp.hylafax.org/binary/linux/redhat/6.0.6/hylafax-server-6.0.6-1rhel5.i386.rpm
fi
fi
else
if [ "$version" == "release 6" ]; then
if [ ! -e ./hylafax-server-6.0.6-1rhel6.x86_64.rpm ]; then
wget ftp://ftp.hylafax.org/binary/linux/redhat/6.0.6/hylafax-server-6.0.6-1rhel6.x86_64.rpm
fi
if [ ! -e ./hylafax-client-6.0.6-1rhel6.x86_64.rpm ]; then
wget ftp://ftp.hylafax.org/binary/linux/redhat/6.0.6/hylafax-client-6.0.6-1rhel6.x86_64.rpm
fi
else
if [ ! -e ./hylafax-server-6.0.6-1rhel5.x86_64.rpm ]; then
wget ftp://ftp.hylafax.org/binary/linux/redhat/6.0.6/hylafax-server-6.0.6-1rhel5.x86_64.rpm
fi
if [ ! -e ./hylafax-client-6.0.6-1rhel5.x86_64.rpm ]; then
wget ftp://ftp.hylafax.org/binary/linux/redhat/6.0.6/hylafax-client-6.0.6-1rhel5.x86_64.rpm
fi
fi
fi
rpm -ivh hylafax-*
if [ ! -e ./iaxmodem-1.2.0.tar.gz ]; then
wget http://sourceforge.net/projects/iaxmodem/files/latest/download?source=files -O iaxmodem-1.2.0.tar.gz
fi
tar zxf iaxmodem-1.2.0.tar.gz
cd iaxmodem-1.2.0
./configure
make
cp ./iaxmodem /usr/sbin/
chmod 777 /var/spool/hylafax/bin
chmod 777 /var/spool/hylafax/etc/
chmod 777 /var/spool/hylafax/docq/
chmod 777 /var/spool/hylafax/doneq/
mkdir /etc/iaxmodem/
chown asterisk.asterisk /etc/iaxmodem/
mkdir /var/log/iaxmodem/
chown asterisk.asterisk /var/log/iaxmodem/
cat > /var/spool/hylafax/etc/setup.cache << EOF
# Warning, this file was automatically generated by faxsetup
# on Thu Jun 28 13:48:41 CST 2012 for root
AWK='/usr/bin/gawk'
BASE64ENCODE='/usr/bin/uuencode -m ==== | /bin/grep -v ===='
BIN='/usr/bin'
CAT='/bin/cat'
CHGRP='/bin/chgrp'
CHMOD='/bin/chmod'
CHOWN='/bin/chown'
CP='/bin/cp'
DPSRIP='/var/spool/hylafax/bin/ps2fax'
ECHO='/bin/echo'
ENCODING='base64'
FAXQ_SERVER='yes'
FONTPATH='/usr/share/ghostscript/8.70/Resource/Init:/usr/share/ghostscript/8.70/lib:/usr/share/ghostscript/8.70/Resource/Font:/usr/share/ghostscript/fonts:/usr/share/fonts/default/ghostscript:/usr/share/fonts/default/Type1:/usr/share/fonts/default/amspsfnt/pfb:/usr/share/fonts/default/cmpsfont/pfb:/usr/share/fonts/japanese:/etc/ghostscript'
FUSER='/sbin/fuser'
GREP='/bin/grep'
GSRIP='/usr/bin/gs'
HFAXD_OLD_PROTOCOL='no'
HFAXD_SERVER='yes'
HFAXD_SNPP_SERVER='no'
IMPRIP=''
LIBDATA='/etc/hylafax'
LIBEXEC='/usr/sbin'
LN='/bin/ln'
MANDIR='/usr/share/man'
MIMENCODE='mimencode'
MKFIFO='/usr/bin/mkfifo'
MV='/bin/mv'
PATHEGETTY='/bin/egetty'
PATHGETTY='/sbin/mgetty'
PATH='/usr/sbin:/bin:/usr/bin:/etc:/usr/local/bin'
PATHVGETTY='/sbin/vgetty'
PSPACKAGE='gs'
QPENCODE='qp-encode'
RM='/bin/rm'
SBIN='/usr/sbin'
SCRIPT_SH='/bin/bash'
SED='/bin/sed'
SENDMAIL='/usr/sbin/sendmail'
SPOOL='/var/spool/hylafax'
SYSVINIT=''
TARGET='i686-pc-linux-gnu'
TIFF2PDF='/usr/bin/tiff2pdf'
TIFFBIN='/usr/bin'
TTYCMD='/usr/bin/tty'
UUCP_LOCKDIR='/var/lock'
UUCP_LOCKTYPE='ascii'
UUENCODE='/usr/bin/uuencode'
EOF
echo -e "\e[32mFAX Install OK!\e[m"
}
function mpg123_install(){
echo -e "\e[32mStarting Install MPG123\e[m"
cd /usr/src
if [ ! -e ./mpg123-$mpg123ver.tar.bz2 ]; then
wget http://sourceforge.net/projects/mpg123/files/mpg123/$mpg123ver/mpg123-$mpg123ver.tar.bz2/download -O mpg123-$mpg123ver.tar.bz2
fi
tar jxf mpg123-$mpg123ver.tar.bz2
cd mpg123-$mpg123ver
./configure
make
make install
echo -e "\e[32mMPG123 Install OK!\e[m"
}
function dahdi_install() {
echo -e "\e[32mStarting Install DAHDI\e[m"
cd /usr/src
if [ ! -e ./dahdi-linux-complete-$dahdiver.tar.gz ]; then
wget http://downcc.ucserver.org:8082/Files/dahdi-linux-complete-$dahdiver.tar.gz
if [ ! -e ./dahdi-linux-complete-$dahdiver.tar.gz ]; then
wget http://downcc.ucserver.org:8082/Files/dahdi-linux-complete/releases/dahdi-linux-complete-$dahdiver.tar.gz
fi
fi
tar zxf dahdi-linux-complete-$dahdiver.tar.gz
if [ $? != 0 ]; then
echo -e "fatal: dont have valid dahdi tar package\n"
exit 1
fi
cd dahdi-linux-complete-$dahdiver
make
if [ $? != 0 ]; then
yum -y update kernel
echo -e "\e[32mplease reboot your server and run this script again\e[m\n"
exit 1;
fi
make install
make config
echo "blacklist netjet" >> /etc/modprobe.d/dahdi.blacklist.conf
/etc/init.d/dahdi start
/usr/sbin/dahdi_genconf
echo -e "\e[32mDAHDI Install OK!\e[m"
}
function nginx_install(){
echo -e "\e[32mStarting install nginx\e[m"
service httpd stop
chkconfig httpd off
cd /usr/src
if [ ! -e ./nginx-$nginxver.tar.gz ]; then
wget $downloadmirror/nginx-$nginxver.tar.gz
fi
tar zxf nginx-$nginxver.tar.gz
if [ $? != 0 ]; then
echo -e "fatal: dont have valid nginx tar package\n"
exit 1
fi
if [ ! -e ./nginx-push-stream-module-master-20130206.tar.gz ]; then
wget $downloadmirror/nginx-push-stream-module-master-20130206.tar.gz
fi
tar zxf nginx-push-stream-module-master-20130206.tar.gz
if [ $? != 0 ]; then
echo -e "fatal: dont have valid nginx push tar package\n"
exit 1
fi
cd nginx-$nginxver
./configure --add-module=/usr/src/nginx-push-stream-module-master --with-http_ssl_module --user=asterisk --group=asterisk
make
make install
wget $downloadmirror/nginx.zip
unzip ./nginx.zip
mv ./nginx /etc/init.d/
chmod +x /etc/init.d/nginx
chkconfig nginx on
echo -e "\e[32mNginx Install OK!\e[m"
}
function asterisk_install() {
echo -e "\e[32mStarting Install Asterisk\e[m"
useradd -u 500 -c "Asterisk PBX" -d /var/lib/asterisk asterisk
#Define a user called asterisk.
mkdir /var/run/asterisk /var/log/asterisk /var/spool/asterisk /var/lib/asterisk
chown -R asterisk:asterisk /var/run/asterisk /var/log/asterisk /var/lib/php /var/lib/asterisk /var/spool/asterisk/
#Change the owner of this file to asterisk.
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0
#shutdown selinux
cd /usr/src
if [ ! -e ./asterisk-$asteriskver.tar.gz ]; then
wget http://downcc.ucserver.org:8082/Files/asterisk-$asteriskver.tar.gz
fi
tar zxf asterisk-$asteriskver.tar.gz
if [ $? != 0 ]; then
echo "fatal: dont have valid asterisk tar package"
exit 1
fi
cd asterisk-$asteriskver
./configure '-disable-xmldoc'
make
make install
make samples
#This command will install the default configuration files.
#make progdocs
#This command will create documentation using the doxygen software from comments placed within the source code by the developers.
make config
#This command will install the startup scripts and configure the system (through the use of the chkconfig command) to execute Asterisk automatically at startup.
sed -i "s/#AST_USER/AST_USER/" /etc/init.d/asterisk
sed -i "s/#AST_GROUP/AST_GROUP/" /etc/init.d/asterisk
sed -i 's/;enable=yes/enable=no/' /etc/asterisk/cdr.conf
# set AMI user
cat > /etc/asterisk/manager.conf << EOF
[general]
enabled = yes
port = 5038
bindaddr = 0.0.0.0
displayconnects=no
[asterccuser]
secret = asterccsecret
deny=0.0.0.0/0.0.0.0
permit=127.0.0.1/255.255.255.0
read = system,call,agent
write = all
EOF
/etc/init.d/asterisk restart
chkconfig asterisk on
echo -e "\e[32mAsterisk Install OK!\e[m"
}
function lame_install(){
echo -e "\e[32mStarting Install Lame for mp3 monitor\e[m"
cd /usr/src
if [ ! -e ./lame-3.99.5.tar.gz ]; then
wget http://sourceforge.net/projects/lame/files/lame/3.99/lame-3.99.5.tar.gz/download -O lame-3.99.5.tar.gz
fi
tar zxf lame-3.99.5.tar.gz
if [ $? != 0 ]; then
echo -e "\e[32mdont have valid lame tar package, you may lose the feature to check recordings on line\e[m\n"
return 1
fi
cd lame-3.99.5
./configure && make && make install
if [ $? != 0 ]; then
echo -e "\e[32mfailed to install lame, you may lose the feature to check recordings on line\e[m\n"
return 1
fi
ln -s /usr/local/bin/lame /usr/bin/
echo -e "\e[32mLame install OK!\e[m"
return 0;
}
function libpri_install() {
echo -e "\e[32mStarting Install LibPRI\e[m"
cd /usr/src
if [ ! -e ./libpri-$libpriver.tar.gz ]; then
wget http://downloads.asterisk.org/pub/telephony/libpri/releases/libpri-$libpriver.tar.gz
fi
tar zxf libpri-$libpriver.tar.gz
if [ $? != 0 ]; then
echo -e "fatal: dont have valid libpri tar package\n"
exit 1
fi
cd libpri-$libpriver
make
make install
echo -e "\e[32mLibPRI Install OK!\e[m"
}
function nginx_conf_install(){
mkdir /var/www/html/asterCC/http-log -p
cat > /usr/local/nginx/conf/nginx.conf << EOF
#user nobody;
worker_processes auto;
worker_rlimit_nofile 655350;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
pid /var/run/nginx.pid;
events {
use epoll;
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '\$remote_addr - \$remote_user [$time_local] "\$request" '
'\$status \$body_bytes_sent "\$http_referer" '
'"\$http_user_agent" "\$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
client_header_buffer_size 32k;
large_client_header_buffers 4 32k;
push_stream_store_messages on;
push_stream_shared_memory_size 256M;
push_stream_message_ttl 15m;
#gzip on;
server
{
listen 80 default;
client_max_body_size 20M;
index index.html index.htm index.php;
root /var/www/html/asterCC/app/webroot;
location / {
index index.php;
if (-f \$request_filename) {
break;
}
if (!-f \$request_filename) {
rewrite ^/(.+)\$ /index.php?url=\$1 last;
break;
}
location /agentindesks/pushagent {
push_stream_publisher admin;
set \$push_stream_channel_id \$arg_channel;
}
location ~ /agentindesks/agentpull/(.*) {
push_stream_subscriber long-polling;
set \$push_stream_channels_path \$1;
push_stream_message_template ~text~;
push_stream_longpolling_connection_ttl 60s;
}
location /publicapi/pushagent {
push_stream_publisher admin;
set \$push_stream_channel_id \$arg_channel;
}
location ~ /publicapi/agentpull/(.*) {
push_stream_subscriber long-polling;
set \$push_stream_channels_path \$1;
push_stream_message_template "{\\"text\\":\\"~text~\\",\\"tag\\":~tag~,\\"time\\":\\"~time~\\"}";
push_stream_longpolling_connection_ttl 60s;
push_stream_last_received_message_tag \$arg_etag;
push_stream_last_received_message_time \$arg_since;
}
location /systemevents/pushagent {
push_stream_publisher admin;
set \$push_stream_channel_id \$arg_channel;
}
location ~ /systemevents/agentpull/(.*) {
push_stream_subscriber long-polling;
set \$push_stream_channels_path \$1;
push_stream_message_template ~text~;
push_stream_longpolling_connection_ttl 60s;
}
}
location ~ /\.ht {
deny all;
}
location ~ .*\.(php|php5)?\$
{
fastcgi_pass 127.0.0.1:9000;
fastcgi_index index.php;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;
fastcgi_connect_timeout 60;
fastcgi_send_timeout 180;
fastcgi_read_timeout 180;
fastcgi_buffer_size 128k;
fastcgi_buffers 4 256k;
fastcgi_busy_buffers_size 256k;
fastcgi_temp_file_write_size 256k;
fastcgi_intercept_errors on;
}
location ~ .*\.(gif|jpg|jpeg|png|bmp|swf|wav)$
{
access_log off;
expires 15d;
}
location ~ .*\.(js|css)?$
{
expires 1d;
}
# access_log /var/www/html/asterCC/http-log/access.log main;
}
}
EOF
echo -ne "
* soft nofile 655360
* hard nofile 655360
" >> /etc/security/limits.conf
echo "fs.file-max = 1572775" >> /etc/sysctl.conf
echo "net.ipv4.ip_local_port_range = 1024 65000" >> /etc/sysctl.conf
echo "net.ipv4.tcp_fin_timeout = 45" >> /etc/sysctl.conf
echo "vm.dirty_ratio=10" >> /etc/sysctl.conf
echo "net.ipv4.tcp_tw_reuse = 1" >> /etc/sysctl.conf
echo "net.ipv4.tcp_tw_recycle = 1" >> /etc/sysctl.conf
sysctl -p
service nginx restart
}
function astercc_install() {
/etc/init.d/asterisk restart
echo -e "\e[32mStarting Install AsterCC\e[m"
cd /usr/src
if [ ! -e ./astercc-$asterccver.tar.gz ]; then
wget $downloadmirror/astercc-$asterccver.tar.gz -t 5
fi
tar zxf astercc-$asterccver.tar.gz
if [ $? != 0 ]; then
echo "dont have valid astercc tar package, try run this script again or download astercc-$asterccver.tar.gz to /usr/src manually then run this script again"
exit 1
fi
cd astercc-$asterccver
chmod +x install.sh
. /tmp/.mysql_root_pw.$$
./install.sh -dbu=root -dbpw=$mysql_root_pw -amiu=$amiu -amipw=$amipw -allbydefault
echo -e "\e[32mAsterCC Commercial Install OK!\e[m"
}
function set_ami(){
while true;do
echo -e "\e[32mplease give an AMI user\e[m";
read amiu;
if [ "X${amiu}" != "X" ]; then
break;
fi
done
while true;do
echo -e "\e[32mplease give an AMI secret\e[m";
read amipw;
if [ "X${amipw}" != "X" ]; then
break;
fi
done
cat > /etc/asterisk/manager.conf << EOF
[general]
enabled = yes
port = 5038
bindaddr = 0.0.0.0
displayconnects=no
[$amiu]
secret = $amipw
deny=0.0.0.0/0.0.0.0
permit=127.0.0.1/255.255.255.0
read = system,call,agent
write = all
EOF
asterisk -rx "manager reload"
echo amiu=$amiu >> /tmp/.mysql_root_pw.$$
echo amipw=$amipw >> /tmp/.mysql_root_pw.$$
}
function get_mysql_passwd(){
service mysql start
while true;do
echo -e "\e[32mplease enter your mysql root passwd\e[m";
read mysql_passwd;
# make sure it's not a empty passwd
if [ "X${mysql_passwd}" != "X" ]; then
mysqladmin -uroot -p$mysql_passwd password $mysql_passwd # try empty passwd
if [ $? == 0 ]; then
break;
fi
mysqladmin password "$mysql_passwd"
if [ $? == 0 ]; then
break;
fi
echo -e "\e[32minvalid password,please try again\e[m"
fi
done
echo mysql_root_pw=$mysql_passwd > /tmp/.mysql_root_pw.$$
}
function iptables_config(){
echo "start setting firewall"
iptables -I INPUT -p tcp --dport 80 -j ACCEPT
iptables -A INPUT -p udp -m udp --dport 5060 -j ACCEPT
iptables -A INPUT -p udp -m udp --dport 5036 -j ACCEPT
iptables -A INPUT -p udp -m udp --dport 4569 -j ACCEPT
iptables -A INPUT -p udp -m udp --dport 10000:20000 -j ACCEPT
iptables-save > /etc/sysconfig/iptables
service iptables restart
}
function UI() {
mkdir -p /usr/src/UI
cd /usr/src/UI
echo "Start setting UCServer UI"
wget http://downcc.ucserver.org:8082/Files/UCS-UI.tar.gz
wget http://downcc.ucserver.org:8082/Files/update.sh
bash /usr/src/UI/update.sh
rm -rf /usr/src/UI
}
function run() {
downloadmirror=http://downcc.ucserver.org:8082
echo "please select the mirror you want to download from:"
echo "1: Shanghai Huaqiao IDC "
read downloadserver;
if [ "$downloadserver" == "1" ]; then
downloadmirror=http://downcc.ucserver.org:8082/Files;
fi
wget $downloadmirror/ucservercc1 -t 5
if [ ! -e ./ucservercc1 ]; then
echo "failed to get version infromation,please try again"
exit 1;
fi
. ./ucservercc1
/bin/rm -rf ./ucservercc1
newRepo_install
yum_install
php_install
fax_install
dahdi_install
libpri_install
asterisk_install
lame_install
mpg123_install
nginx_install
#ioncube_install
get_mysql_passwd
set_ami
/etc/init.d/asterisk restart
astercc_install
nginx_conf_install
iptables_config
UI
echo "asterisk ALL = NOPASSWD :/etc/init.d/asterisk" >> /etc/sudoers
echo "asterisk ALL = NOPASSWD: /usr/bin/reboot" >> /etc/sudoers
echo "asterisk ALL = NOPASSWD: /sbin/shutdown" >> /etc/sudoers
/bin/rm -rf /tmp/.mysql_root_pw.$$
ln -s /var/lib/asterisk/moh /var/lib/asterisk/mohmp3
/etc/init.d/php-fpm start
/etc/init.d/iptables stop
/etc/init.d/asterccd restart
chkconfig --del iptables
echo -e "\e[32mUCServer-CC installation finish!\e[m";
echo -e "\e[32mPlease email to [email protected] to get the license!\e[m";
}
run
|
cake654326/UCServer-CC
|
installcc-centos.sh
|
Shell
|
apache-2.0
| 21,103 |
#!/usr/bin/env bash
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configurable parameters
PROJECT=""
BUCKET=""
REGION=""
# Datastore parameters
KIND="wikipedia"
# Cloud ML Engine parameters
TIER="CUSTOM"
# Annoy parameters
NUM_TREES=100
[[ -z "${PROJECT}" ]] && echo "PROJECT not set" && exit 1
[[ -z "${BUCKET}" ]] && echo "BUCKET not set" && exit 1
[[ -z "${REGION}" ]] && echo "REGION not set" && exit 1
[[ -z "${KIND}" ]] && echo "KIND not set" && exit 1
[[ -z "${TIER}" ]] && echo "TIER not set" && exit 1
[[ -z "${NUM_TREES}" ]] && echo "NUM_TREES not set" && exit 1
# File locations parameters
EMBED_FILES=gs://"${BUCKET}/${KIND}/embeddings/embed-*"
INDEX_FILE=gs://"${BUCKET}/${KIND}/index/embeds.index"
# Cloud ML Engine parameters
PACKAGE_PATH=builder
JOB_DIR=gs://"${BUCKET}/${KIND}/index/jobs"
CURRENT_DATE=`date +%Y%m%d_%H%M%S`
JOB_NAME="${KIND}_build_annoy_index_${CURRENT_DATE}"
echo "Submitting a Cloud ML Engine job..."
# Command to submit the Cloud ML Engine job
gcloud ml-engine jobs submit training "${JOB_NAME}" \
--job-dir="${JOB_DIR}" \
--runtime-version=1.12 \
--region="${REGION}" \
--scale-tier="${TIER}" \
--module-name=builder.task \
--package-path="${PACKAGE_PATH}" \
--config=config.yaml \
-- \
--embedding-files="${EMBED_FILES}" \
--index-file="${INDEX_FILE}" \
--num-trees="${NUM_TREES}"
echo -e "Cloud ML Engine job submitted successfully!"
|
GoogleCloudPlatform/realtime-embeddings-matching
|
text-semantic-search/index_builder/submit.sh
|
Shell
|
apache-2.0
| 2,005 |
#! /bin/bash
display_usage() {
echo "give :"
echo " 1- name of the node"
echo " "
}
# if less than two arguments supplied, display usage
if [ $# != 1 ]
then
display_usage
exit 1
fi
source ./configs/config.cfg
echo " "
echo "Now you can :"
echo " $ sudo docker ps"
echo " find the CONTAINER ID (on left)"
echo " type this command with the 3/4 first caracters of the CONTAINER ID"
echo " $ sudo docker exec -i -t 3_4firstChar /bin/bash"
echo " "
echo " "
gcloud compute ssh \
$1 \
--zone=$kube_zone
|
tdeheurles/gcloudAutomation
|
sshMachine.sh
|
Shell
|
apache-2.0
| 515 |
#!/bin/bash
#
# Copyright 2014-2016 CyberVision, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
RUN_DIR=`pwd`
function help {
echo "Choose one of the following: {build|run|deploy|clean}"
exit 1
}
if [ $# -eq 0 ]
then
help
fi
APP_NAME="demo_client"
PROJECT_HOME=$(pwd)
BUILD_DIR="build"
LIBS_PATH="libs"
KAA_LIB_PATH="$LIBS_PATH/kaa"
KAA_C_LIB_HEADER_PATH="$KAA_LIB_PATH/src"
KAA_CPP_LIB_HEADER_PATH="$KAA_LIB_PATH/kaa"
KAA_SDK_TAR="kaa-c*.tar.gz"
#Wifi settings
SSID="xxx"
PASSWORD="xxxxxxxxx"
#Firmware version
MAJOR_VERSION=1
MINOR_VERSION=0
DEMO_LED=0
function build_app {
read -p "Enter WiFi SSID: " SSID
read -p "Enter WiFi Password: " PASSWORD
read -p "Enter firmware major version: " MAJOR_VERSION
read -p "Enter firmware minor version: " MINOR_VERSION
read -p "Enter firmware classifier: " CLASSIFIER_VERSION
read -p "Enter flags of an active leds[ red=0x01 orange=0x02 green=0x04 ]: " DEMO_LED
if [ -z $DEMO_LED ]
then
DEMO_LED=0
fi
cd $PROJECT_HOME &&
mkdir -p "$PROJECT_HOME/$BUILD_DIR" &&
cd $BUILD_DIR
cmake -DKAA_PLATFORM=cc32xx -DCMAKE_TOOLCHAIN_FILE=../libs/kaa/toolchains/cc32xx.cmake -DBUILD_TESTING=OFF -DSSID=$SSID -DPWD=$PASSWORD -DMAJOR_VERSION=$MAJOR_VERSION -DMINOR_VERSION=$MINOR_VERSION $ENV_VAR -DCLASSIFIER_VERSION=$CLASSIFIER_VERSION -DDEMO_LED="$DEMO_LED" ..
make
}
function clean {
rm -rf "$KAA_LIB_PATH/$BUILD_DIR"
rm -rf "$PROJECT_HOME/$BUILD_DIR"
}
function run {
mkdir -p $PROJECT_HOME/../../fmw_bin
cp $PROJECT_HOME/$BUILD_DIR/demo_client.bin $PROJECT_HOME/../../fmw_bin/demo_client_0x0$DEMO_LED.bin
}
#for cmd in $@
#do
cmd=$1
case "$cmd" in
build)
build_app
;;
run)
run
;;
deploy)
clean
build_app
run
;;
clean)
clean
;;
*)
help
;;
esac
|
kaaproject/sample-apps
|
ota/source/cc32xx/build.sh
|
Shell
|
apache-2.0
| 2,406 |
#!/bin/bash
source ./ci/functions.sh
runBuild=false
echo "Reviewing changes that might affect the Gradle build..."
currentChangeSetAffectsTests
retval=$?
if [ "$retval" == 0 ]
then
echo "Found changes that require the build to run test cases."
runBuild=true
else
echo "Changes do NOT affect project test cases."
runBuild=false
fi
if [ "$runBuild" = false ]; then
exit 0
fi
prepCommand="echo 'Running command...'; "
gradle="./gradlew $@"
gradleBuild=""
gradleBuildOptions="--stacktrace --build-cache --configure-on-demand --no-daemon -DtestCategoryType=MSSQLSERVER "
echo -e "***********************************************"
echo -e "Gradle build started at `date`"
echo -e "***********************************************"
./ci/tests/mssqlserver/run-mssql-server.sh
gradleBuild="$gradleBuild testMsSqlServer coveralls -x test -x javadoc -x check \
-DskipNpmLint=true -DskipGradleLint=true -DskipSass=true -DskipNpmLint=true --parallel \
-DskipNodeModulesCleanUp=true -DskipNpmCache=true -DskipNestedConfigMetadataGen=true "
if [[ "${TRAVIS_COMMIT_MESSAGE}" == *"[show streams]"* ]]; then
gradleBuild="$gradleBuild -DshowStandardStreams=true "
fi
if [[ "${TRAVIS_COMMIT_MESSAGE}" == *"[rerun tasks]"* ]]; then
gradleBuild="$gradleBuild --rerun-tasks "
fi
if [[ "${TRAVIS_COMMIT_MESSAGE}" == *"[refresh dependencies]"* ]]; then
gradleBuild="$gradleBuild --refresh-dependencies "
fi
if [ -z "$gradleBuild" ]; then
echo "Gradle build will be ignored since no commands are specified to run."
else
tasks="$gradle $gradleBuildOptions $gradleBuild"
echo -e "***************************************************************************************"
echo $prepCommand
echo $tasks
echo -e "***************************************************************************************"
waitloop="while sleep 9m; do echo -e '\n=====[ Gradle build is still running ]====='; done &"
eval $waitloop
waitRetVal=$?
eval $prepCommand
eval $tasks
retVal=$?
echo -e "***************************************************************************************"
echo -e "Gradle build finished at `date` with exit code $retVal"
echo -e "***************************************************************************************"
if [ $retVal == 0 ]; then
echo "Gradle build finished successfully."
else
echo "Gradle build did NOT finish successfully."
exit $retVal
fi
fi
|
tduehr/cas
|
ci/tests/mssqlserver/run-tests-mssqlserver.sh
|
Shell
|
apache-2.0
| 2,483 |
#!/bin/bash -x
#
# Generated - do not edit!
#
# Macros
TOP=`pwd`
CND_PLATFORM=FlasCC-Windows
CND_CONF=FlasCC-Release
CND_DISTDIR=dist
CND_BUILDDIR=build
CND_DLIB_EXT=dll
NBTMPDIR=${CND_BUILDDIR}/${CND_CONF}/${CND_PLATFORM}/tmp-packaging
TMPDIRNAME=tmp-packaging
OUTPUT_PATH=${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/libmojpeg.a
OUTPUT_BASENAME=libmojpeg.a
PACKAGE_TOP_DIR=MoJpeg/
# Functions
function checkReturnCode
{
rc=$?
if [ $rc != 0 ]
then
exit $rc
fi
}
function makeDirectory
# $1 directory path
# $2 permission (optional)
{
mkdir -p "$1"
checkReturnCode
if [ "$2" != "" ]
then
chmod $2 "$1"
checkReturnCode
fi
}
function copyFileToTmpDir
# $1 from-file path
# $2 to-file path
# $3 permission
{
cp "$1" "$2"
checkReturnCode
if [ "$3" != "" ]
then
chmod $3 "$2"
checkReturnCode
fi
}
# Setup
cd "${TOP}"
mkdir -p ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package
rm -rf ${NBTMPDIR}
mkdir -p ${NBTMPDIR}
# Copy files and create directories and links
cd "${TOP}"
makeDirectory "${NBTMPDIR}/MoJpeg/lib"
copyFileToTmpDir "${OUTPUT_PATH}" "${NBTMPDIR}/${PACKAGE_TOP_DIR}lib/${OUTPUT_BASENAME}" 0644
# Generate tar file
cd "${TOP}"
rm -f ${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/MoJpeg.tar
cd ${NBTMPDIR}
tar -vcf ../../../../${CND_DISTDIR}/${CND_CONF}/${CND_PLATFORM}/package/MoJpeg.tar *
checkReturnCode
# Cleanup
cd "${TOP}"
rm -rf ${NBTMPDIR}
|
favedit/MoCross
|
Source/Library/LibJpeg/nbproject/Package-FlasCC-Release.bash
|
Shell
|
apache-2.0
| 1,458 |
step="core-dns-service"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
_dns_prefix=${CONTAINER_INFRA_PREFIX:-docker.io/coredns/}
_autoscaler_prefix=${CONTAINER_INFRA_PREFIX:-gcr.io/google_containers/}
CORE_DNS=/srv/magnum/kubernetes/manifests/kube-coredns.yaml
[ -f ${CORE_DNS} ] || {
echo "Writing File: $CORE_DNS"
mkdir -p $(dirname ${CORE_DNS})
cat << EOF > ${CORE_DNS}
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
log stdout
health
kubernetes ${DNS_CLUSTER_DOMAIN} ${PORTAL_NETWORK_CIDR} ${PODS_NETWORK_CIDR} {
pods verified
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
# Make sure the pod can be scheduled on master kubelet.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: coredns
image: ${_dns_prefix}coredns:${COREDNS_TAG}
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
- name: tmp
mountPath: /tmp
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
dnsPolicy: Default
volumes:
- name: tmp
emptyDir: {}
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: ${DNS_SERVICE_IP}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-dns-autoscaler
labels:
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
# Remove the configmaps rule once below issue is fixed:
# kubernetes-incubator/cluster-proportional-autoscaler#16
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-dns-autoscaler
labels:
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: kube-dns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:kube-dns-autoscaler
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
k8s-app: kube-dns-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: kube-dns-autoscaler
template:
metadata:
labels:
k8s-app: kube-dns-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
priorityClassName: system-cluster-critical
containers:
- name: autoscaler
image: ${_autoscaler_prefix}cluster-proportional-autoscaler-${ARCH}:1.1.2
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kube-dns-autoscaler
# Should keep target in sync with above coredns deployment name
- --target=Deployment/coredns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
serviceAccountName: kube-dns-autoscaler
EOF
}
echo "Waiting for Kubernetes API..."
until [ "ok" = "$(kubectl get --raw='/healthz')" ]
do
sleep 5
done
kubectl apply --validate=false -f $CORE_DNS
printf "Finished running ${step}\n"
|
ArchiFleKs/magnum
|
magnum/drivers/common/templates/kubernetes/fragments/core-dns-service.sh
|
Shell
|
apache-2.0
| 7,364 |
# Minimal: JSON
http POST http://127.0.0.1:5000/bigdata/api/v1/products name=reference version=1.0.0 description='Reference product: minimal'
curl -X PUT http://127.0.0.1:5000/bigdata/api/v1/products/reference/1.0.0/template --data-binary @templates/minimal.json -H "Content-type: application/json"
curl -X PUT http://127.0.0.1:5000/bigdata/api/v1/products/reference/1.0.0/options --data-binary @options/size.json
curl -X PUT http://127.0.0.1:5000/bigdata/api/v1/products/reference/1.0.0/orquestrator --data-binary @orquestrators/minimal/fabfile.py
# Minimal: YAML
http POST http://127.0.0.1:5000/bigdata/api/v1/products name=reference version=1.0.0-yaml description='Reference product: minimal (yaml version)'
curl -X PUT http://127.0.0.1:5000/bigdata/api/v1/products/reference/1.0.0-yaml/template --data-binary @templates/minimal.yaml -H "Content-type: application/yaml"
curl -X PUT http://127.0.0.1:5000/bigdata/api/v1/products/reference/1.0.0-yaml/options --data-binary @options/size.json
curl -X PUT http://127.0.0.1:5000/bigdata/api/v1/products/reference/1.0.0-yaml/orquestrator --data-binary @orquestrators/minimal/fabfile.py
|
bigdatacesga/paas-service
|
tests/register_templates.sh
|
Shell
|
apache-2.0
| 1,133 |
#!/usr/bin/env bash
# =========================================================================
# | This is a script snippet that is included (via shell script sourcing) from
# | a main build script. This snippet provides the required environment
# | variables and functions to build the Boost and Fuego libraries.
# |
# | http://www.boost.org/
# | http://fuego.sourceforge.net/
# |
# | See the main build script for more information.
# =========================================================================
SRC_DIR="$SRC_BASEDIR/fuego-on-ios"
DEST_DIR="$PREFIX_BASEDIR"
BOOST_SRC_DIR="$SRC_DIR/boost"
BOOST_FRAMEWORK_NAME="boost.framework"
BOOST_FRAMEWORK_SRC_DIR="$BOOST_SRC_DIR/ios/framework/$BOOST_FRAMEWORK_NAME"
BOOST_FRAMEWORK_DEST_DIR="$DEST_DIR/$BOOST_FRAMEWORK_NAME"
FUEGO_SRC_DIR="$SRC_DIR"
FUEGO_FRAMEWORK_NAME="fuego-on-ios.framework"
FUEGO_FRAMEWORK_SRC_DIR="$FUEGO_SRC_DIR/ios/framework/$FUEGO_FRAMEWORK_NAME"
FUEGO_FRAMEWORK_DEST_DIR="$DEST_DIR/$FUEGO_FRAMEWORK_NAME"
# +------------------------------------------------------------------------
# | Performs pre-build steps.
# |
# | This function expects that the current working directory is the root
# | directory of the extracted source archive.
# +------------------------------------------------------------------------
# | Arguments:
# | None
# +------------------------------------------------------------------------
# | Return values:
# | * 0: No error
# | * 1: Error
# +------------------------------------------------------------------------
PRE_BUILD_STEPS_SOFTWARE()
{
echo "Cleaning up Git repository ..."
# Remove everything not under version control...
git clean -dfx
if test $? -ne 0; then
return 1
fi
# Throw away local changes
git reset --hard
if test $? -ne 0; then
return 1
fi
# The Boost build script performs its own cleanup in the Boost submodule
return 0
}
# +------------------------------------------------------------------------
# | Builds the software package.
# |
# | This function expects that the current working directory is the root
# | directory of the extracted source archive.
# +------------------------------------------------------------------------
# | Arguments:
# | None
# +------------------------------------------------------------------------
# | Return values:
# | * 0: No error
# | * 1: Error
# +------------------------------------------------------------------------
BUILD_STEPS_SOFTWARE()
{
# Exporting these variables makes them visible to the Boost and Fuego build
# scripts. We expect that the variables are set by build-env.sh.
export IPHONEOS_BASESDK_VERSION
export IPHONEOS_DEPLOYMENT_TARGET
export IPHONE_SIMULATOR_BASESDK_VERSION
export IPHONE_SIMULATOR_DEPLOYMENT_TARGET
# Build Boost first. Build script runs both the iPhone and simulator builds.
echo "Begin building Boost ..."
pushd "$BOOST_SRC_DIR" >/dev/null
./boost.sh
RETVAL=$?
popd >/dev/null
if test $RETVAL -ne 0; then
return 1
fi
# Build Fuego after Boost. Build script Runs both the iPhone and simulator builds.
echo "Begin building Fuego ..."
./build.sh
return $?
}
# +------------------------------------------------------------------------
# | Performs steps to install the software.
# |
# | This function expects that the current working directory is the root
# | directory of the extracted source archive.
# +------------------------------------------------------------------------
# | Arguments:
# | None
# +------------------------------------------------------------------------
# | Return values:
# | * 0: No error
# | * 1: Error
# +------------------------------------------------------------------------
INSTALL_STEPS_SOFTWARE()
{
echo "Removing installation files from previous build ..."
rm -rf "$BOOST_FRAMEWORK_DEST_DIR"
if test $? -ne 0; then
return 1
fi
rm -rf "$FUEGO_FRAMEWORK_DEST_DIR"
if test $? -ne 0; then
return 1
fi
echo "Creating installation folder $DEST_DIR ..."
mkdir -p "$DEST_DIR"
echo "Copying Boost installation files to $BOOST_FRAMEWORK_DEST_DIR ..."
cp -R "$BOOST_FRAMEWORK_SRC_DIR" "$BOOST_FRAMEWORK_DEST_DIR"
if test $? -ne 0; then
return 1
fi
echo "Copying Fuego installation files to $FUEGO_FRAMEWORK_DEST_DIR ..."
cp -R "$FUEGO_FRAMEWORK_SRC_DIR" "$FUEGO_FRAMEWORK_DEST_DIR"
if test $? -ne 0; then
return 1
fi
return 0
}
|
puremourning/littlego
|
script/build-fuego.sh
|
Shell
|
apache-2.0
| 4,397 |
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################
# GENERAL CONFIGURATION #
##############################################################
#
if [ "$TOMCAT_HOME" = "" ] ; then
echo You must set TOMCAT_HOME to point at your Tomcat installation
exit 1
fi
CONTEXT=$TOMCAT_HOME/webapps/lenya
PUBLICATION_DIR=$CONTEXT/lenya/pubs
SFTP_BATCH=$CONTEXT/lenya/bin/copy-recursive.sh
#
##############################################################
# PUBLICATION CONFIGURATION #
##############################################################
#
PUBLICATION_ID_2=oscom
EXPORT_DIR_2=$PUBLICATION_DIR/oscom/resources/export
PENDING_DIR_2=$EXPORT_DIR_2/pending/lenya/oscom
REPLICATION_DIR_2=$EXPORT_DIR_2/replication
RU_2_1=username
RH_2_1=127.0.0.1
RDOCS_2_1=/usr/local/jakarta-tomcat-4.0.4-b3/webapps/ROOT
RU_2_2=username
RH_2_2=127.0.0.1
RDOCS_2_2=/usr/local/apache/htdocs_oscom
###########################################
# MAIN #
###########################################
echo "START"
date
# Loop over all publications (BEGIN)
#### PUBLICATION 2
echo ""
echo "=================================================="
echo "= PUBLICATION: $PUBLICATION_ID_2"
echo "=================================================="
echo ""
mkdir $REPLICATION_DIR_2
if [ -d $REPLICATION_DIR_2 ];then
echo "DEBUG: Replication Directory: $REPLICATION_DIR_2"
PROCESS_ID=$$
DATUM=`date +%Y.%m.%d_%H.%M.%S`
TEMP_ID=$DATUM\_$PROCESS_ID
TEMP_DIR=$REPLICATION_DIR_2/temp\_$TEMP_ID
mkdir -p $TEMP_DIR
echo "DEBUG: Temporary Directory: $TEMP_DIR"
if [ -d $PENDING_DIR_2 ];then
echo "DEBUG: Pending Directory: $PENDING_DIR_2"
if [ -d $PENDING_DIR_2 ];then
mv $PENDING_DIR_2/* $TEMP_DIR/.
fi
if [ -d $TEMP_DIR ];then
scp -r $TEMP_DIR/* $RU_2_1@$RH_2_1:$RDOCS_2_1/.
fi
if [ -d $TEMP_DIR ];then
scp -r $TEMP_DIR/* $RU_2_2@$RH_2_2:$RDOCS_2_2/.
fi
else
echo "WARN: No such directory: $PENDING_DIR_2"
fi
rm -r $TEMP_DIR
else
echo "FATAL: No such directory: $REPLICATION_DIR_2"
#exit 0
fi
# Loop over all publications (END)
date
echo "STOP"
|
apache/lenya
|
src/webapp/lenya/bin/replicate.sh
|
Shell
|
apache-2.0
| 3,009 |
cp wildfly-helloworld-html5.war wildfly-10.1.0.Final/standalone/deployments
|
lucasponce/hawkular-alerts-demo
|
06_deploy-app.sh
|
Shell
|
apache-2.0
| 76 |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Call this to dump all master and node logs into the folder specified in $1
# (defaults to _artifacts). Only works if the provider supports SSH.
set -o errexit
set -o nounset
set -o pipefail
readonly report_dir="${1:-_artifacts}"
# Enable LOG_DUMP_USE_KUBECTL to dump logs from a running cluster. In
# this mode, this script is standalone and doesn't use any of the bash
# provider infrastructure. Instead, the cluster is expected to have
# one node with the `kube-apiserver` image that we assume to be the
# master, and the LOG_DUMP_SSH_KEY and LOG_DUMP_SSH_USER variables
# must be set for auth.
readonly use_kubectl="${LOG_DUMP_USE_KUBECTL:-}"
readonly master_ssh_supported_providers="gce aws kubemark"
readonly node_ssh_supported_providers="gce gke aws"
readonly master_logfiles="kube-apiserver kube-scheduler kube-controller-manager etcd glbc cluster-autoscaler"
readonly node_logfiles="kube-proxy"
readonly aws_logfiles="cloud-init-output"
readonly gce_logfiles="startupscript"
readonly kern_logfile="kern"
readonly initd_logfiles="docker"
readonly supervisord_logfiles="kubelet supervisor/supervisord supervisor/kubelet-stdout supervisor/kubelet-stderr supervisor/docker-stdout supervisor/docker-stderr"
# Limit the number of concurrent node connections so that we don't run out of
# file descriptors for large clusters.
readonly max_scp_processes=25
# This template spits out the external IPs and images for each node in the cluster in a format like so:
# 52.32.7.85 gcr.io/google_containers/kube-apiserver:1355c18c32d7bef16125120bce194fad gcr.io/google_containers/kube-controller-manager:46365cdd8d28b8207950c3c21d1f3900 [...]
readonly ips_and_images='{range .items[*]}{@.status.addresses[?(@.type == "ExternalIP")].address} {@.status.images[*].names[*]}{"\n"}{end}'
function setup() {
if [[ -z "${use_kubectl}" ]]; then
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
: ${KUBE_CONFIG_FILE:="config-test.sh"}
source "${KUBE_ROOT}/cluster/kube-util.sh"
detect-project &> /dev/null
elif [[ -z "${LOG_DUMP_SSH_KEY:-}" ]]; then
echo "LOG_DUMP_SSH_KEY not set, but required by LOG_DUMP_USE_KUBECTL"
exit 1
elif [[ -z "${LOG_DUMP_SSH_USER:-}" ]]; then
echo "LOG_DUMP_SSH_USER not set, but required by LOG_DUMP_USE_KUBECTL"
exit 1
fi
}
function log-dump-ssh() {
if [[ -z "${use_kubectl}" ]]; then
ssh-to-node "$@"
return
fi
local host="$1"
local cmd="$2"
ssh -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${host}" "${cmd}"
}
# Copy all files /var/log/{$3}.log on node $1 into local dir $2.
# $3 should be a space-separated string of files.
# This function shouldn't ever trigger errexit, but doesn't block stderr.
function copy-logs-from-node() {
local -r node="${1}"
local -r dir="${2}"
local files=( ${3} )
# Append ".log*"
# The * at the end is needed to also copy rotated logs (which happens
# in large clusters and long runs).
files=( "${files[@]/%/.log*}" )
# Prepend "/var/log/"
files=( "${files[@]/#/\/var\/log\/}" )
# Comma delimit (even the singleton, or scp does the wrong thing), surround by braces.
local -r scp_files="{$(printf "%s," "${files[@]}")}"
if [[ -n "${use_kubectl}" ]]; then
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${node}:${scp_files}" "${dir}" > /dev/null || true
else
case "${KUBERNETES_PROVIDER}" in
gce|gke)
gcloud compute copy-files --project "${PROJECT}" --zone "${ZONE}" "${node}:${scp_files}" "${dir}" > /dev/null || true
;;
aws)
local ip=$(get_ssh_hostname "${node}")
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${ip}:${scp_files}" "${dir}" > /dev/null || true
;;
esac
fi
}
# Save logs for node $1 into directory $2. Pass in any non-common files in $3.
# $3 should be a space-separated list of files.
# This function shouldn't ever trigger errexit
function save-logs() {
local -r node_name="${1}"
local -r dir="${2}"
local files="${3}"
if [[ -n "${use_kubectl}" ]]; then
if [[ -n "${LOG_DUMP_SAVE_LOGS:-}" ]]; then
files="${files} ${LOG_DUMP_SAVE_LOGS:-}"
fi
else
case "${KUBERNETES_PROVIDER}" in
gce|gke)
files="${files} ${gce_logfiles}"
;;
aws)
files="${files} ${aws_logfiles}"
;;
esac
fi
if log-dump-ssh "${node_name}" "sudo systemctl status kubelet.service" &> /dev/null; then
log-dump-ssh "${node_name}" "sudo journalctl --output=cat -u kubelet.service" > "${dir}/kubelet.log" || true
log-dump-ssh "${node_name}" "sudo journalctl --output=cat -u docker.service" > "${dir}/docker.log" || true
log-dump-ssh "${node_name}" "sudo journalctl --output=cat -k" > "${dir}/kern.log" || true
else
files="${kern_logfile} ${files} ${initd_logfiles} ${supervisord_logfiles}"
fi
echo "Copying '${files}' from ${node_name}"
copy-logs-from-node "${node_name}" "${dir}" "${files}"
}
function kubectl-guess-master() {
kubectl get node -ojsonpath --template="${ips_and_images}" | grep kube-apiserver | cut -f1 -d" "
}
function kubectl-guess-nodes() {
kubectl get node -ojsonpath --template="${ips_and_images}" | grep -v kube-apiserver | cut -f1 -d" "
}
function dump_master() {
local master_name
if [[ -n "${use_kubectl}" ]]; then
master_name=$(kubectl-guess-master)
elif [[ ! "${master_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
echo "Master SSH not supported for ${KUBERNETES_PROVIDER}"
return
else
if ! (detect-master &> /dev/null); then
echo "Master not detected. Is the cluster up?"
return
fi
master_name="${MASTER_NAME}"
fi
readonly master_dir="${report_dir}/${master_name}"
mkdir -p "${master_dir}"
save-logs "${master_name}" "${master_dir}" "${master_logfiles}"
}
function dump_nodes() {
local node_names
if [[ -n "${use_kubectl}" ]]; then
node_names=( $(kubectl-guess-nodes) )
elif [[ ! "${node_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
echo "Node SSH not supported for ${KUBERNETES_PROVIDER}"
return
else
detect-node-names &> /dev/null
if [[ "${#NODE_NAMES[@]}" -eq 0 ]]; then
echo "Nodes not detected. Is the cluster up?"
return
fi
node_names=( "${NODE_NAMES[@]}" )
fi
proc=${max_scp_processes}
for node_name in "${node_names[@]}"; do
node_dir="${report_dir}/${node_name}"
mkdir -p "${node_dir}"
# Save logs in the background. This speeds up things when there are
# many nodes.
save-logs "${node_name}" "${node_dir}" "${node_logfiles}" &
# We don't want to run more than ${max_scp_processes} at a time, so
# wait once we hit that many nodes. This isn't ideal, since one might
# take much longer than the others, but it should help.
proc=$((proc - 1))
if [[ proc -eq 0 ]]; then
proc=${max_scp_processes}
wait
fi
done
# Wait for any remaining processes.
if [[ proc -gt 0 && proc -lt ${max_scp_processes} ]]; then
wait
fi
}
setup
echo "Dumping master and node logs to ${report_dir}"
dump_master
dump_nodes
|
ravihansa3000/kubernetes
|
cluster/log-dump.sh
|
Shell
|
apache-2.0
| 7,932 |
#!/bin/bash/
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
. $(dirname $0)/common_functions.sh
msg "Running AWS CLI with region: $(get_instance_region)"
# get this instance's ID
INSTANCE_ID=$(get_instance_id)
if [ $? != 0 -o -z "$INSTANCE_ID" ]; then
error_exit "Unable to get this instance's ID; cannot continue."
fi
# Get current time
msg "Started $(basename $0) at $(/bin/date "+%F %T")"
start_sec=$(/bin/date +%s.%N)
msg "Checking if instance $INSTANCE_ID is part of an AutoScaling group"
asg=$(autoscaling_group_name $INSTANCE_ID)
if [ $? == 0 -a -n "${asg}" ]; then
msg "Found AutoScaling group for instance $INSTANCE_ID: ${asg}"
msg "Checking that installed CLI version is at least at version required for AutoScaling Standby"
check_cli_version
if [ $? != 0 ]; then
error_exit "CLI must be at least version ${MIN_CLI_X}.${MIN_CLI_Y}.${MIN_CLI_Z} to work with AutoScaling Standby"
fi
msg "Attempting to put instance into Standby"
autoscaling_enter_standby $INSTANCE_ID "${asg}"
if [ $? != 0 ]; then
error_exit "Failed to move instance into standby"
else
msg "Instance is in standby"
finish_msg
exit 0
fi
fi
msg "Instance is not part of an ASG, trying with ELB..."
set_flag "dereg" "true"
if [ -z "$ELB_LIST" ]; then
error_exit "ELB_LIST is empty. Must have at least one load balancer to deregister from, or \"_all_\", \"_any_\" values."
elif [ "${ELB_LIST}" = "_all_" ]; then
msg "Automatically finding all the ELBs that this instance is registered to..."
get_elb_list $INSTANCE_ID
if [ $? != 0 ]; then
error_exit "Couldn't find any. Must have at least one load balancer to deregister from."
fi
set_flag "ELBs" "$ELB_LIST"
elif [ "${ELB_LIST}" = "_any_" ]; then
msg "Automatically finding all the ELBs that this instance is registered to..."
get_elb_list $INSTANCE_ID
if [ $? != 0 ]; then
msg "Couldn't find any, but ELB_LIST=any so finishing successfully without deregistering."
set_flag "ELBs" ""
finish_msg
exit 0
fi
set_flag "ELBs" "$ELB_LIST"
fi
# Loop through all LBs the user set, and attempt to deregister this instance from them.
for elb in $ELB_LIST; do
msg "Checking validity of load balancer named '$elb'"
validate_elb $INSTANCE_ID $elb
if [ $? != 0 ]; then
msg "Error validating $elb; cannot continue with this LB"
continue
fi
msg "Deregistering $INSTANCE_ID from $elb"
deregister_instance $INSTANCE_ID $elb
if [ $? != 0 ]; then
error_exit "Failed to deregister instance $INSTANCE_ID from ELB $elb"
fi
done
# Wait for all deregistrations to finish
msg "Waiting for instance to de-register from its load balancers"
for elb in $ELB_LIST; do
wait_for_state "elb" $INSTANCE_ID "OutOfService" $elb
if [ $? != 0 ]; then
error_exit "Failed waiting for $INSTANCE_ID to leave $elb"
fi
done
finish_msg
|
ahedevops/sandbox
|
expirimental/deregister_from_elb.sh
|
Shell
|
apache-2.0
| 3,415 |
#!/bin/sh
#
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Configure an environment to run Registry clients with a Cloud Run-based server.
#
# The following assumes you have run `gcloud auth login` and that the current
# gcloud project is the one with your Cloud Run instance.
#
if ! [ -x "$(command -v gcloud)" ]; then
echo 'ERROR: This script requires the gcloud command. Please install it to continue.' >&2; return
fi
### SERVER CONFIGURATION
# This is used in the Makefile to build and publish your server image.
export REGISTRY_PROJECT_IDENTIFIER=$(gcloud config list --format 'value(core.project)')
### CLIENT CONFIGURATION
# Calls to the Cloud Run service are secure.
unset APG_REGISTRY_INSECURE
# Get the service address from the gcloud tool.
export APG_REGISTRY_AUDIENCES=$(gcloud run services describe registry-backend --platform managed --format="value(status.address.url)")
export APG_REGISTRY_ADDRESS=${APG_REGISTRY_AUDIENCES#https://}:443
# The auth token is generated for the gcloud logged-in user.
export APG_REGISTRY_CLIENT_EMAIL=$(gcloud config list account --format "value(core.account)")
export APG_REGISTRY_TOKEN=$(gcloud auth print-identity-token ${APG_REGISTRY_CLIENT_EMAIL})
# Calls don't use an API key.
unset APG_REGISTRY_API_KEY
|
apigee/registry
|
auth/CLOUDRUN.sh
|
Shell
|
apache-2.0
| 1,821 |
#!/usr/bin/env bash
#set -x
if [[ $# -ne 2 ]]; then
echo "usage: checkAllRules.sh inputFile outputFile"
echo
echo "where"
echo " inputFile : filename or directory path"
echo " outputFile : name of the output file for the results"
echo
exit
fi
inputFile=$1
outputFile=$2
echo -n "${inputFile}: "
src/countBytes.sh $inputFile $outputFile
if [[ $inputFile == *.js ]]; then
src/checkString.sh $inputFile evalCount eval "Potential Slow and Code Smell" $outputFile
src/checkString.sh $inputFile jQueryFunctionCalls '$(' "JQuery Function - Potential Slow" $outputFile
src/checkString.sh $inputFile jQueryLocatorCalls '$.' "JQuery Utility Function - Potential Slow" $outputFile
src/checkString.sh $inputFile withCount with "Potential Slow" $outputFile
src/checkString.sh $inputFile newCount new "Potential Slow" $outputFile
src/checkString.sh $inputFile documentWriteCount "document.write" "Potential Slow" $outputFile
src/checkRegex.sh $inputFile forInCount "for\s+in" "Potential Slow" $outputFile
src/checkRegex.sh $inputFile returnNullCount "return\s+null" "Code Smell" $outputFile
fi
if [[ $inputFile == *.css ]]; then
src/checkCssLint.sh $inputFile $outputFile
src/checkString.sh $inputFile mediaQueryCount '@media' "Media Query" $outputFile
src/checkRegex.sh $inputFile breakpointMCount '@media\s+[^{}]*\(min-width:\s*28em\)' "Breakpoint M" $outputFile
src/checkRegex.sh $inputFile breakpointLCount '@media\s+[^{}]*\(min-width:\s*48em\)' "Breakpoint L" $outputFile
src/checkRegex.sh $inputFile breakpointXLCount '@media\s+[^{}]*\(min-width:\s*62em\)' "Breakpoint XL" $outputFile
src/countMultiLineRegexBytes.sh $inputFile breakpointMBytes '@media\s+[^{}]*\(min-width:\s*28em\)[^{}]*{([^{}]*{[^{}]*})*[^{}]*}' "Breakpoint M bytes" $outputFile
src/countMultiLineRegexBytes.sh $inputFile breakpointLBytes '@media\s+[^{}]*\(min-width:\s*48em\)[^{}]*{([^{}]*{[^{}]*})*[^{}]*}' "Breakpoint L bytes" $outputFile
src/countMultiLineRegexBytes.sh $inputFile breakpointXLBytes '@media\s+[^{}]*\(min-width:\s*62em\)[^{}]*{([^{}]*{[^{}]*})*[^{}]*}' "Breakpoint XL bytes" $outputFile
src/countMultiLineRegexBytes.sh $inputFile mediaQueryBytes '@media\s+[^{}]*{([^{}]*{[^{}]*})*[^{}]*}' "Media Query bytes" $outputFile
fi
echo ""
|
mirkoebert/simplejslint
|
src/checkAllRules.sh
|
Shell
|
apache-2.0
| 2,256 |
#!/bin/sh
set -e
mkdir -p "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
RESOURCES_TO_COPY=${PODS_ROOT}/resources-to-copy-${TARGETNAME}.txt
> "$RESOURCES_TO_COPY"
XCASSET_FILES=""
install_resource()
{
case $1 in
*.storyboard)
echo "ibtool --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .storyboard`.storyboardc" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.xib)
echo "ibtool --errors --warnings --notices --output-format human-readable-text --compile ${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib ${PODS_ROOT}/$1 --sdk ${SDKROOT}"
ibtool --errors --warnings --notices --output-format human-readable-text --compile "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename \"$1\" .xib`.nib" "${PODS_ROOT}/$1" --sdk "${SDKROOT}"
;;
*.framework)
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
echo "rsync -av ${PODS_ROOT}/$1 ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
rsync -av "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
;;
*.xcdatamodel)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1"`.mom\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodel`.mom"
;;
*.xcdatamodeld)
echo "xcrun momc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd\""
xcrun momc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcdatamodeld`.momd"
;;
*.xcmappingmodel)
echo "xcrun mapc \"${PODS_ROOT}/$1\" \"${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm\""
xcrun mapc "${PODS_ROOT}/$1" "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}/`basename "$1" .xcmappingmodel`.cdm"
;;
*.xcassets)
XCASSET_FILES="$XCASSET_FILES '$1'"
;;
/*)
echo "$1"
echo "$1" >> "$RESOURCES_TO_COPY"
;;
*)
echo "${PODS_ROOT}/$1"
echo "${PODS_ROOT}/$1" >> "$RESOURCES_TO_COPY"
;;
esac
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_resource "../../Source/LumberjackConsole.storyboard"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_resource "../../Source/LumberjackConsole.storyboard"
fi
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${CONFIGURATION_BUILD_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
if [[ "${ACTION}" == "install" ]]; then
rsync -avr --copy-links --no-relative --exclude '*/.svn/*' --files-from="$RESOURCES_TO_COPY" / "${INSTALL_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
rm -f "$RESOURCES_TO_COPY"
if [[ -n "${WRAPPER_EXTENSION}" ]] && [ "`xcrun --find actool`" ] && [ -n $XCASSET_FILES ]
then
case "${TARGETED_DEVICE_FAMILY}" in
1,2)
TARGET_DEVICE_ARGS="--target-device ipad --target-device iphone"
;;
1)
TARGET_DEVICE_ARGS="--target-device iphone"
;;
2)
TARGET_DEVICE_ARGS="--target-device ipad"
;;
*)
TARGET_DEVICE_ARGS="--target-device mac"
;;
esac
echo $XCASSET_FILES | xargs actool --output-format human-readable-text --notices --warnings --platform "${PLATFORM_NAME}" --minimum-deployment-target "${IPHONEOS_DEPLOYMENT_TARGET}" ${TARGET_DEVICE_ARGS} --compress-pngs --compile "${BUILT_PRODUCTS_DIR}/${UNLOCALIZED_RESOURCES_FOLDER_PATH}"
fi
|
z8927623/LumberjackConsole
|
Demo/Pods/Target Support Files/Pods/Pods-resources.sh
|
Shell
|
apache-2.0
| 4,038 |
#!/bin/sh
# Converts a mysqldump file into a Sqlite 3 compatible file. It also extracts the MySQL `KEY xxxxx` from the
# CREATE block and create them in separate commands _after_ all the INSERTs.
# Awk is choosen because it's fast and portable. You can use gawk, original awk or even the lightning fast mawk.
# The mysqldump file is traversed only once.
# Usage: $ ./mysql2sqlite mysqldump-opts db-name | sqlite3 database.sqlite
# Example: $ ./mysql2sqlite --no-data -u root -pMySecretPassWord myDbase | sqlite3 database.sqlite
# Thanks to and @artemyk and @gkuenning for their nice tweaks.
mysqldump5 --compatible=ansi --skip-extended-insert --compact "$@" | \
awk '
BEGIN {
FS=",$"
print "PRAGMA synchronous = OFF;"
print "PRAGMA journal_mode = MEMORY;"
print "BEGIN TRANSACTION;"
}
# CREATE TRIGGER statements have funny commenting. Remember we are in trigger.
/^\/\*.*CREATE.*TRIGGER/ {
gsub( /^.*TRIGGER/, "CREATE TRIGGER" )
print
inTrigger = 1
next
}
# The end of CREATE TRIGGER has a stray comment terminator
/END \*\/;;/ { gsub( /\*\//, "" ); print; inTrigger = 0; next }
# The rest of triggers just get passed through
inTrigger != 0 { print; next }
# Skip other comments
/^\/\*/ { next }
# Print all `INSERT` lines. The single quotes are protected by another single quote.
/INSERT/ {
gsub( /\\\047/, "\047\047" )
gsub(/\\n/, "\n")
gsub(/\\r/, "\r")
gsub(/\\"/, "\"")
gsub(/\\\\/, "\\")
gsub(/\\\032/, "\032")
print
next
}
# Print the `CREATE` line as is and capture the table name.
/^CREATE/ {
print
if ( match( $0, /\"[^\"]+/ ) ) tableName = substr( $0, RSTART+1, RLENGTH-1 )
}
# Replace `FULLTEXT KEY` or any other `XXXXX KEY` except PRIMARY by `KEY`
/^ [^"]+KEY/ && !/^ PRIMARY KEY/ { gsub( /.+KEY/, " KEY" ) }
# Get rid of field lengths in KEY lines
/ KEY/ { gsub(/\([0-9]+\)/, "") }
# Print all fields definition lines except the `KEY` lines.
/^ / && !/^( KEY|\);)/ {
gsub( /AUTO_INCREMENT|auto_increment/, "" )
gsub( /(CHARACTER SET|character set) [^ ]+ /, "" )
gsub( /DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP|default current_timestamp on update current_timestamp/, "" )
gsub( /(COLLATE|collate) [^ ]+ /, "" )
gsub(/(ENUM|enum)[^)]+\)/, "text ")
gsub(/(SET|set)\([^)]+\)/, "text ")
gsub(/UNSIGNED|unsigned/, "")
if (prev) print prev ","
prev = $1
}
# `KEY` lines are extracted from the `CREATE` block and stored in array for later print
# in a separate `CREATE KEY` command. The index name is prefixed by the table name to
# avoid a sqlite error for duplicate index name.
/^( KEY|\);)/ {
if (prev) print prev
prev=""
if ($0 == ");"){
print
} else {
if ( match( $0, /\"[^"]+/ ) ) indexName = substr( $0, RSTART+1, RLENGTH-1 )
if ( match( $0, /\([^()]+/ ) ) indexKey = substr( $0, RSTART+1, RLENGTH-1 )
key[tableName]=key[tableName] "CREATE INDEX \"" tableName "_" indexName "\" ON \"" tableName "\" (" indexKey ");\n"
}
}
# Print all `KEY` creation lines.
END {
for (table in key) printf key[table]
print "END TRANSACTION;"
}
'
exit 0
|
nebgnahz/CS268NetworkMeasurement
|
king/mysql2sqlite.sh
|
Shell
|
bsd-2-clause
| 3,036 |
#!/bin/bash
if [ "$(uname)" = "Darwin" ]; then
tmpdir="$(mktemp -d)"
cp /bin/ls "$tmpdir"
PATH=$tmpdir:$PATH
trap 'rm -r "$tmpdir"' exit
fi
preload () {
local library
library=$1
shift
if [ "$(uname)" = "Darwin" ]; then
DYLD_INSERT_LIBRARIES=target/debug/"$library".dylib "$@"
else
LD_PRELOAD=target/debug/"$library".so "$@"
fi
}
set -ex
set -o pipefail
cd examples/readlinkspy
cargo update
cargo build
preload libreadlinkspy ls -l /dev/stdin | grep readlink
cd ../neverfree
cargo update
cargo build
|
geofft/redhook
|
test.sh
|
Shell
|
bsd-2-clause
| 560 |
#!/bin/sh
## one script to be used by travis, jenkins, packer...
umask 022
if [ $# != 0 ]; then
rolesdir=$1
else
rolesdir=$(dirname $0)/..
fi
[ ! -d $rolesdir/juju4.ipv6 ] && git clone https://github.com/juju4/ansible-ipv6 $rolesdir/juju4.ipv6
[ ! -d $rolesdir/juju4.redhat_epel ] && git clone https://github.com/juju4/ansible-redhat-epel $rolesdir/juju4.redhat_epel
[ ! -d $rolesdir/juju4.harden_sysctl ] && git clone https://github.com/juju4/ansible-harden-sysctl $rolesdir/juju4.harden_sysctl
[ ! -d $rolesdir/juju4.harden_mailserver ] && git clone https://github.com/juju4/ansible-harden-mailserver $rolesdir/juju4.harden_mailserver
#[ ! -d $rolesdir/kbrebanov.osquery ] && git clone https://github.com/kbrebanov/ansible-osquery.git $rolesdir/kbrebanov.osquery
[ ! -d $rolesdir/kbrebanov.osquery ] && git clone https://github.com/juju4/ansible-osquery.git $rolesdir/kbrebanov.osquery
[ ! -d $rolesdir/juju4.auditd ] && git clone https://github.com/juju4/ansible-auditd $rolesdir/juju4.auditd
[ ! -d $rolesdir/juju4.syslogclient ] && git clone https://github.com/juju4/ansible-syslogclient $rolesdir/juju4.syslogclient
[ ! -d $rolesdir/juju4.ntpclient ] && git clone https://github.com/juju4/ansible-ntpclient $rolesdir/juju4.ntpclient
[ ! -d $rolesdir/juju4.falco ] && git clone https://github.com/juju4/ansible-falco $rolesdir/juju4.falco
## galaxy naming: kitchen fails to transfer symlink folder
#[ ! -e $rolesdir/juju4.harden ] && ln -s ansible-harden $rolesdir/juju4.harden
[ ! -e $rolesdir/juju4.harden ] && cp -R $rolesdir/ansible-harden $rolesdir/juju4.harden
## don't stop build on this script return code
true
|
juju4/ansible-harden
|
get-dependencies.sh
|
Shell
|
bsd-2-clause
| 1,628 |
#!/bin/bash
set -e
export PATH=./:$PATH
# eq
echo '"abc"' | R eq '"abc"'
echo '1' | R eq '1'
echo '1.0' | R eq '1.0'
echo 'true' | R eq 'true'
echo '[1,2,3]' | R eq '[1,2,3]'
echo '{"a":{"b":1}}' | R eq '{"a":{"b":1}}'
echo -e '"abc"\n"abc"' | R eq '"abc"' | uniq | R eq true
# not eq
echo '"abc"' | R not eq '"jkl"'
echo '0' | R not eq '1'
echo '1' | R not eq '{}'
echo '{"a":1}' | R not eq '{"b":1,"c":2,"a":1}'
echo -e '"xz"\n"xz"' | R not eq '"abc"' | uniq | R eq true
# pick
echo '{}' | R pick a | R eq '{}'
echo '{"a":1,"b":2}{"a":1}' | R pick a | R eq '{"a":1}'
# omit
echo '{}' | R omit a | R eq '{}'
echo '{"a":1,"b":2}' | R omit a | R eq '{"b":2}'
# path
echo '{"a":{"b":true}}' | R path a.b | R eq true
echo '{"a":{"b":true}}' | R path a.b eq true
echo '{"a":{"c":2}}' | R path a.c | R eq 2
echo '[{"a":1}]' | R path 0.a | R eq 1
! echo '{}' | R path -1
! echo '[0]' | R path 1
! echo '{}' | R path a.b.c.d
# head
echo '[1,2,3,4]' | R head | R eq 1
echo '[1]' | R head | R eq 1
! echo '[]' | R head
# tail
echo '[1,2,3,4]' | R tail | R eq '[2,3,4]'
! echo '[1]' | R tail
! echo '[]' | R tail
# each
echo '[1,2,3]' | R each | head -1 | R eq 1
echo '[1,2,3]' | R each | tail -1 | R eq 3
# map
echo '[{"a":1},{"a":2}]' | R map path a | R eq '[1,2]'
# append
echo '[1]' | R append 2 | R eq '[1,2]'
# concat
echo '[1, 2]' | R concat '[3,4]' eq '[1,2,3,4]'
# values
echo '{"a":1,"b":2}' | R values | R eq '[1,2]'
! echo '[1,2,3]' | R values
# keys
echo '{"a":1,"b":2}' | R keys | R contains a
echo '{"a":1,"b":2}' | R keys | R contains b
# length
echo '[1,2,3,4]' | R length eq 4
# where
echo '{"a":1, "b":2}' | R where '{"a": 1}' | R eq '{"a":1, "b":2}'
# filter
echo '[{"a":1, "b":2}]' | R filter where '{"a":1}' | R eq '[{"a":1,"b":2}]'
# find
echo '[{"a":1, "b":2}]' | R find where '{"a":1}' | R eq '{"a":1,"b":2}'
! echo '[{"a":1, "b":2}]' | R find where '{"a":2}'
# mixin
echo '{"a":1, "b":2}' | R mixin '{"b": 5,"c":3}' | R eq '{"a":1,"b":5,"c":3}'
# mutli-json input
echo '[1,2,3][3,4,5][5,4,3]' | R contains 3 | wc -l | R eq 3
echo '[1,2,3][3,4,5][5,4,3]' | R contains 3 | uniq | R eq true
|
jpospychala/rcli
|
test.sh
|
Shell
|
bsd-2-clause
| 2,123 |
#!/bin/bash
#usage ./nattorture.sh [interface[] [ip] [number of runs] [outfile]
# A POSIX variable
OPTIND=1 # Reset in case getopts has been used previously in the shell.
# Initialize our own variables:
interface="eth0"
ip=""
runs=10
output_file="out.csv"
test_name="test"
lockfile=nattorture.lock
touch $lockfile
while getopts "h?i:f:r:o:t:" opt; do
case "$opt" in
h|\?)
show_help
exit 0
;;
r) runs=$OPTARG
;;
i) interface=$OPTARG
;;
f) output_file=$OPTARG
;;
t) test_name=$OPTARG
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
ip=$@
echo "interface=$interface, output_file='$output_file',runs='$runs', ip: $ip"
rm output_file
printf "Name, run, Start, stop, transactions, failed, max rtt, min rtt, avg rtt, retries, client sent, server sent, start port, stop port\n" > $output_file
for (( i=1; i<=$runs; i++ ))
do
echo "Run $i"
printf "$test_name, $i, " >> $output_file
build/dist/bin/stunclient -i $interface $ip -j 60 --csv >> $output_file
done
rm $lockfile
|
NATTools/stunclient
|
nattorture.sh
|
Shell
|
bsd-2-clause
| 1,086 |
#!/bin/bash
rm -f *.rasl-module *.rasl caller
../../../bin/rlc --keep-rasls -OC -R module.sref
../../../bin/rlmake --keep-rasls -X-OC -X--markup-context caller.ref
./caller
if [[ $? -gt 0 ]]; then
echo TEST FAILED!
exit 1
else
echo Test OK!
rm -f *.rasl-module *.rasl caller *.lst
fi
|
Mazdaywik/simple-refal
|
autotests/dynamic/1-simple-load/run.sh
|
Shell
|
bsd-2-clause
| 293 |
#!/bin/sh -e
set -e
echo "test => $0"
printf "/ ^d.*.[abck]ey[0-9]$\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE}
SHA1=$(printf "/ ^d.*.[abck]ey[0-9]+$\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE} |grep -E -v -e '^<default% >' -e "^Opening '${KC_DB}'" -e "^Using '${KC_DB}' database." -e "^Using password file: ${KC_PASSFILE}" -e "^Decrypting\.\.\." |$SHA1_BIN |cut -d' ' -f1)
if [ "$SHA1" = '040b16edbb11c9e9e3da9c09389000a34d473a6a' ];then
echo "$0 test ok (current chain)!"
else
echo "$0 test failed (current chain)!"
exit 1
fi
printf "/i ^d.*.[ABCK]EY[0-9]$\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE}
SHA1=$(printf "/i ^d.*.[ABCK]EY[0-9]+$\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE} |grep -E -v -e '^<default% >' -e "^Opening '${KC_DB}'" -e "^Using '${KC_DB}' database." -e "^Using password file: ${KC_PASSFILE}" -e "^Decrypting\.\.\." |$SHA1_BIN |cut -d' ' -f1)
if [ "$SHA1" = '040b16edbb11c9e9e3da9c09389000a34d473a6a' ];then
echo "$0 test ok (current chain, ignore case)!"
else
echo "$0 test failed (current chain, ignore case)!"
exit 1
fi
printf "/! ^d.*.[abck]ey[1-9]$\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE}
SHA1=$(printf "/! ^d.*.[abck]ey[1-9]+$\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE} |grep -E -v -e '^<default% >' -e "^Opening '${KC_DB}'" -e "^Using '${KC_DB}' database." -e "^Using password file: ${KC_PASSFILE}" -e "^Decrypting\.\.\." |$SHA1_BIN |cut -d' ' -f1)
if [ "$SHA1" = '2f76d24ccb6ddabe20226c426370f8f2027e38b0' ];then
echo "$0 test ok (current chain, inverse)!"
else
echo "$0 test failed (current chain, inverse)!"
exit 1
fi
printf "/* ^(default)*[abck]ey(test)*[0-9]$\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE}
SHA1=$(printf "/* ^(default)*[abck]ey(test)*[0-9]$\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE} |grep -E -v -e '^<default% >' -e "^Opening '${KC_DB}'" -e "^Using '${KC_DB}' database." -e "^Using password file: ${KC_PASSFILE}" -e "^Decrypting\.\.\." |$SHA1_BIN |cut -d' ' -f1)
if [ "$SHA1" = 'bed82c25199d5e6a22f843fa7859cadabb147cbc' ];then
echo "$0 test ok (all chains)!"
else
echo "$0 test failed (all chains)!"
exit 1
fi
printf "/!* ^(default)*[abck]ey(test)*[1-9]$\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE}
SHA1=$(printf "/!* ^(default)*[abck]ey(test)*[1-9]$\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE} |grep -E -v -e '^<default% >' -e "^Opening '${KC_DB}'" -e "^Using '${KC_DB}' database." -e "^Using password file: ${KC_PASSFILE}" -e "^Decrypting\.\.\." |$SHA1_BIN |cut -d' ' -f1)
if [ "$SHA1" = '0a0c9d55e2809052c23c607ed581eba6dc3295a5' ];then
echo "$0 test ok (all chains, inverse)!"
else
echo "$0 test failed (all chains, inverse)!"
exit 1
fi
printf "/!*i ^(default)*[ABCK]EY(tESt)*[1-9]$\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE}
SHA1=$(printf "/!*i ^(default)*[ABCK]EY(tESt)*[1-9]$\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE} |grep -E -v -e '^<default% >' -e "^Opening '${KC_DB}'" -e "^Using '${KC_DB}' database." -e "^Using password file: ${KC_PASSFILE}" -e "^Decrypting\.\.\." |$SHA1_BIN |cut -d' ' -f1)
if [ "$SHA1" = '0a0c9d55e2809052c23c607ed581eba6dc3295a5' ];then
echo "$0 test ok (all chains, inverse, ignore case)!"
else
echo "$0 test failed (all chains, inverse, ignore case)!"
exit 1
fi
printf "/ nonexistent\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE}
SHA1=$(printf "/ nonexistent\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE} |grep -E -v -e '^<default% >' -e "^Opening '${KC_DB}'" -e "^Using '${KC_DB}' database." -e "^Using password file: ${KC_PASSFILE}" -e "^Decrypting\.\.\." |$SHA1_BIN |cut -d' ' -f1)
if [ "$SHA1" = '0fe706a810deffbafd203d78867e620c6bc2677f' ];then
echo "$0 test ok (nonexistent)!"
else
echo "$0 test failed (nonexistent)!"
exit 1
fi
printf "c/ chain\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE}
SHA1=$(printf "c/ chain\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE} |grep -E -v -e '^<default% >' -e "^Opening '${KC_DB}'" -e "^Using '${KC_DB}' database." -e "^Using password file: ${KC_PASSFILE}" -e "^Decrypting\.\.\." |$SHA1_BIN |cut -d' ' -f1)
if [ "$SHA1" = 'd04c0b373b429a0ef466e25be38946e5dde1e915' ];then
echo "$0 test ok (c/)!"
else
echo "$0 test failed (c/)!"
exit 1
fi
printf "c/i chain\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE}
SHA1=$(printf "c/i chain\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE} |grep -E -v -e '^<default% >' -e "^Opening '${KC_DB}'" -e "^Using '${KC_DB}' database." -e "^Using password file: ${KC_PASSFILE}" -e "^Decrypting\.\.\." |$SHA1_BIN |cut -d' ' -f1)
if [ "$SHA1" = 'd04c0b373b429a0ef466e25be38946e5dde1e915' ];then
echo "$0 test ok (c/i, ignore case)!"
else
echo "$0 test failed (c/i, ignore case)!"
exit 1
fi
printf "c/! chain\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE}
SHA1=$(printf "c/! chain\n" |${KC_RUN} -b -k ${KC_DB} -p ${KC_PASSFILE} |grep -E -v -e '^<default% >' -e "^Opening '${KC_DB}'" -e "^Using '${KC_DB}' database." -e "^Using password file: ${KC_PASSFILE}" -e "^Decrypting\.\.\." |$SHA1_BIN |cut -d' ' -f1)
if [ "$SHA1" = '205d065455c5977fea18fdb8521b87151503cec0' ];then
echo "$0 test ok (c/, inverse)!"
else
echo "$0 test failed (c/, inverse)!"
exit 1
fi
exit 0
|
levaidaniel/kc
|
regress/cmd_searchre.sh
|
Shell
|
bsd-2-clause
| 5,110 |
#!/bin/bash
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Loads the local shared library, and runs all of the test cases in tests/
# against it
set -e
cd $(dirname $0)/../../..
root=$(pwd)
cd src/php/bin
source ./determine_extension_dir.sh
# in some jenkins macos machine, somehow the PHP build script can't find libgrpc.dylib
export DYLD_LIBRARY_PATH=$root/libs/$config
php $extension_dir -d max_execution_time=300 $(which phpunit) -v --debug --strict \
../tests/unit_tests
|
bjori/grpc
|
src/php/bin/run_tests.sh
|
Shell
|
bsd-3-clause
| 1,962 |
#!/bin/bash -eu
# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Run tests for cgpt utility.
# Load common constants and variables.
. "$(dirname "$0")/common.sh"
CGPT=$(readlink -f "${1:-./cgpt}")
[ -x "$CGPT" ] || error "Can't execute $CGPT"
# Run tests in a dedicated directory for easy cleanup or debugging.
DIR="${TEST_DIR}/cgpt_test_dir"
[ -d "$DIR" ] || mkdir -p "$DIR"
warning "testing $CGPT in $DIR"
cd "$DIR"
DEV=fake_dev.bin
rm -f ${DEV}
echo "Test the cgpt create command..."
# test basic create and extend
$CGPT create -c -s 100 ${DEV} || error
[ $(stat --format=%s ${DEV}) -eq $((100*512)) ] || error
$CGPT create -c -s 200 ${DEV} || error
[ $(stat --format=%s ${DEV}) -eq $((200*512)) ] || error
$CGPT create -s 300 ${DEV} || error
[ $(stat --format=%s ${DEV}) -eq $((300*512)) ] || error
$CGPT create -s 200 ${DEV} || error
[ $(stat --format=%s ${DEV}) -eq $((300*512)) ] || error
# test argument requirements
$CGPT create -c ${DEV} &>/dev/null && error
# boy it'd be nice if dealing with block devices didn't always require root
if [ "$(id -u)" -ne 0 ]; then
echo "Skipping cgpt create tests w/ block devices (requires root)"
else
rm -f ${DEV}
$CGPT create -c -s 100 ${DEV}
loop=$(losetup -f --show ${DEV}) || error
trap "losetup -d ${loop}" EXIT
$CGPT create -c -s 100 ${loop} || error
$CGPT create -c -s 200 ${loop} && error
losetup -d ${loop}
trap - EXIT
fi
echo "Test that cgpt repair handles size changes"
# Use an alternate tool for reading for verification purposes
SGDISK=$(type -p sgdisk || echo /usr/sbin/sgdisk)
[[ -x "${SGDISK}" ]] || SGDISK=""
verify() {
if [[ -n "$SGDISK" ]]; then
$SGDISK --verify ${DEV} | grep -q "No problems found." \
|| error "sgdisk dislikes cgpt's disk!"
else
echo "Skipping extra verification with sgdisk"
fi
}
rm -f ${DEV}
$CGPT create -c -s 100 ${DEV} || error
$CGPT boot -p ${DEV} >/dev/null || error
verify
truncate --size=+1M ${DEV}
$CGPT repair ${DEV} || error
verify
echo "Test that cgpt preserves MBR boot code"
dd if=/dev/urandom of=${DEV}.mbr bs=446 count=1 status=noxfer || error
rm -f ${DEV}
$CGPT create -c -s 100 ${DEV} || error
dd if=${DEV}.mbr of=${DEV} conv=notrunc status=noxfer || error
$CGPT add -t rootfs -b 50 -s 1 ${DEV} || error
cmp --bytes=446 ${DEV}.mbr ${DEV} || error
# kill the MBR table and the primary GPT, leave the boot code
dd if=/dev/zero of=${DEV} bs=446 seek=1 count=2 conv=notrunc status=noxfer || error
$CGPT repair ${DEV} || error
verify
cmp --bytes=446 ${DEV}.mbr ${DEV} || error
# try switching between hybrid and protective MBRs
$CGPT add -i1 -B1 ${DEV} || error
verify
cmp --bytes=446 ${DEV}.mbr ${DEV} || error
$CGPT add -i1 -B0 ${DEV} || error
verify
cmp --bytes=446 ${DEV}.mbr ${DEV} || error
# resize requires a partitioned block device
if [ "$(id -u)" -ne 0 ]; then
echo "Skipping cgpt resize tests w/ block devices (requires root)"
else
echo "Test cgpt resize w/ ext2 filesystem."
rm -f ${DEV}
$CGPT create -c -s 1000 ${DEV} || error
$CGPT add -i 1 -b 40 -s 900 -t data ${DEV} || error
# FIXME(marineam): cgpt should always write a protective MBR.
# the boot command should only be for making the MBR bootable.
$CGPT boot -p ${DEV} || error
loop=$(losetup -f --show --partscan ${DEV}) || error
trap "losetup -d ${loop}" EXIT
loopp1=${loop}p1
# double check that partitioned loop devices work and have correct size
[ -b $loopp1 ] || error "$loopp1 is not a block device"
[ $(blockdev --getsz $loop) -eq 1000 ] || error
[ $(blockdev --getsz $loopp1) -eq 900 ] || error
mkfs.ext2 $loopp1 || error
# this should do nothing
$CGPT resize $loopp1 || error
[ $(blockdev --getsz $loop) -eq 1000 ] || error
[ $(blockdev --getsz $loopp1) -eq 900 ] || error
# now test a real rezize, up to 4MB in sectors
truncate --size=$((8192 * 512)) ${DEV} || error
losetup --set-capacity ${loop} || error
[ $(blockdev --getsz $loop) -eq 8192 ] || error
[ $(blockdev --getsz $loopp1) -eq 900 ] || error
$CGPT resize $loopp1 || error
[ $(blockdev --getsz $loop) -eq 8192 ] || error
[ $(blockdev --getsz $loopp1) -gt 8000 ] || error
losetup -d ${loop}
trap - EXIT
fi
# test passing partition devices to cgpt
if [ "$(id -u)" -ne 0 ]; then
echo "Skipping cgpt tests w/ partition block devices (requires root)"
else
echo "Test cgpt w/ partition block device"
rm -f ${DEV}
$CGPT create -c -s 1000 ${DEV} || error
$CGPT add -i 1 -b 40 -s 900 -t coreos-usr -A 0 ${DEV} || error
loop=$(losetup -f --show --partscan ${DEV}) || error
trap "losetup -d ${loop}" EXIT
loopp1=${loop}p1
# double check that partitioned loop devices work and have correct size
[ -b $loopp1 ] || error "$loopp1 is not a block device"
$CGPT add -S 1 $loopp1 || error
[ $($CGPT show -S ${loopp1}) -eq 1 ] || error
[ $($CGPT show -i 1 -S ${DEV}) -eq 1 ] || error
[ $($CGPT show -P ${loopp1}) -eq 0 ] || error
[ $($CGPT show -i 1 -P ${DEV}) -eq 0 ] || error
$CGPT prioritize $loopp1 || error
[ $($CGPT show -P ${loopp1}) -eq 1 ] || error
[ $($CGPT show -i 1 -P ${DEV}) -eq 1 ] || error
losetup -d ${loop}
trap - EXIT
fi
if [[ -n "$SGDISK" ]]; then
echo "Test cgpt disk GUID"
GUID='01234567-89AB-CDEF-0123-456789ABCDEF'
$CGPT create -s 1000 -g ${GUID} ${DEV} || error
[[ $($SGDISK --print ${DEV} | \
gawk -F': ' '/Disk identifier/ { print $2 }') == ${GUID} ]] || error
else
echo "Skipping cpgt disk GUID test because sgdisk wasn't found"
fi
echo "Create an empty file to use as the device..."
NUM_SECTORS=1000
rm -f ${DEV}
$CGPT create -c -s ${NUM_SECTORS} ${DEV}
echo "Create a bunch of partitions, using the real GUID types..."
DATA_START=100
DATA_SIZE=20
DATA_LABEL="data stuff"
DATA_GUID='0fc63daf-8483-4772-8e79-3d69d8477de4'
DATA_NUM=1
KERN_START=200
KERN_SIZE=30
KERN_LABEL="kernel stuff"
KERN_GUID='fe3a2a5d-4f32-41a7-b725-accc3285a309'
KERN_NUM=2
ROOTFS_START=300
ROOTFS_SIZE=40
ROOTFS_LABEL="rootfs stuff"
ROOTFS_GUID='3cb8e202-3b7e-47dd-8a3c-7ff2a13cfcec'
ROOTFS_NUM=3
ESP_START=400
ESP_SIZE=50
ESP_LABEL="ESP stuff"
ESP_GUID='c12a7328-f81f-11d2-ba4b-00a0c93ec93b'
ESP_NUM=4
FUTURE_START=500
FUTURE_SIZE=60
FUTURE_LABEL="future stuff"
FUTURE_GUID='2e0a753d-9e48-43b0-8337-b15192cb1b5e'
FUTURE_NUM=5
RANDOM_START=600
RANDOM_SIZE=70
RANDOM_LABEL="random stuff"
RANDOM_GUID='2364a860-bf63-42fb-a83d-9ad3e057fcf5'
RANDOM_NUM=6
$CGPT add -b ${DATA_START} -s ${DATA_SIZE} -t ${DATA_GUID} \
-l "${DATA_LABEL}" ${DEV}
$CGPT add -b ${KERN_START} -s ${KERN_SIZE} -t ${KERN_GUID} \
-l "${KERN_LABEL}" ${DEV}
$CGPT add -b ${ROOTFS_START} -s ${ROOTFS_SIZE} -t ${ROOTFS_GUID} \
-l "${ROOTFS_LABEL}" ${DEV}
$CGPT add -b ${ESP_START} -s ${ESP_SIZE} -t ${ESP_GUID} \
-l "${ESP_LABEL}" ${DEV}
$CGPT add -b ${FUTURE_START} -s ${FUTURE_SIZE} -t ${FUTURE_GUID} \
-l "${FUTURE_LABEL}" ${DEV}
$CGPT add -b ${RANDOM_START} -s ${RANDOM_SIZE} -t ${RANDOM_GUID} \
-l "${RANDOM_LABEL}" ${DEV}
echo "Extract the start and size of given partitions..."
X=$($CGPT show -b -i $DATA_NUM ${DEV})
Y=$($CGPT show -s -i $DATA_NUM ${DEV})
[ "$X $Y" = "$DATA_START $DATA_SIZE" ] || error
X=$($CGPT show -b -i $KERN_NUM ${DEV})
Y=$($CGPT show -s -i $KERN_NUM ${DEV})
[ "$X $Y" = "$KERN_START $KERN_SIZE" ] || error
X=$($CGPT show -b -i $ROOTFS_NUM ${DEV})
Y=$($CGPT show -s -i $ROOTFS_NUM ${DEV})
[ "$X $Y" = "$ROOTFS_START $ROOTFS_SIZE" ] || error
X=$($CGPT show -b -i $ESP_NUM ${DEV})
Y=$($CGPT show -s -i $ESP_NUM ${DEV})
[ "$X $Y" = "$ESP_START $ESP_SIZE" ] || error
X=$($CGPT show -b -i $FUTURE_NUM ${DEV})
Y=$($CGPT show -s -i $FUTURE_NUM ${DEV})
[ "$X $Y" = "$FUTURE_START $FUTURE_SIZE" ] || error
X=$($CGPT show -b -i $RANDOM_NUM ${DEV})
Y=$($CGPT show -s -i $RANDOM_NUM ${DEV})
[ "$X $Y" = "$RANDOM_START $RANDOM_SIZE" ] || error
echo "Change the beginning..."
DATA_START=$((DATA_START + 10))
$CGPT add -i 1 -b ${DATA_START} ${DEV} || error
X=$($CGPT show -b -i 1 ${DEV})
[ "$X" = "$DATA_START" ] || error
echo "Change the size..."
DATA_SIZE=$((DATA_SIZE + 10))
$CGPT add -i 1 -s ${DATA_SIZE} ${DEV} || error
X=$($CGPT show -s -i 1 ${DEV})
[ "$X" = "$DATA_SIZE" ] || error
echo "Change the type..."
$CGPT add -i 1 -t reserved ${DEV} || error
X=$($CGPT show -t -i 1 ${DEV} | tr 'A-Z' 'a-z')
[ "$X" = "$FUTURE_GUID" ] || error
# arbitrary value
$CGPT add -i 1 -t 610a563a-a55c-4ae0-ab07-86e5bb9db67f ${DEV} || error
X=$($CGPT show -t -i 1 ${DEV})
[ "$X" = "610A563A-A55C-4AE0-AB07-86E5BB9DB67F" ] || error
$CGPT add -i 1 -t data ${DEV} || error
X=$($CGPT show -t -i 1 ${DEV} | tr 'A-Z' 'a-z')
[ "$X" = "$DATA_GUID" ] || error
echo "Set the boot partition.."
$CGPT boot -i ${KERN_NUM} ${DEV} >/dev/null
echo "Check the PMBR's idea of the boot partition..."
X=$($CGPT boot ${DEV})
Y=$($CGPT show -u -i $KERN_NUM $DEV)
[ "$X" = "$Y" ] || error
echo "Test the cgpt next command..."
ROOT_A=562de070-1539-4edf-ac33-b1028227d525
ROOT_B=839c1172-5036-4efe-9926-7074340d5772
expect_next() {
local root=$($CGPT next $DEV)
[ "$root" == "$1" ] || error 1 "expected next to be $1 but got $root"
}
# Basic state, one good rootfs
$CGPT create $DEV || error
$CGPT add -i 1 -t coreos-rootfs -u $ROOT_A -b 100 -s 1 -P 1 -S 1 $DEV || error
$CGPT add -i 2 -t coreos-rootfs -u $ROOT_B -b 101 -s 1 -P 0 -S 0 $DEV || error
expect_next $ROOT_A
expect_next $ROOT_A
# Try the other order
$CGPT add -i 1 -P 0 -S 0 $DEV || error
$CGPT add -i 2 -P 1 -S 1 $DEV || error
expect_next $ROOT_B
expect_next $ROOT_B
# Try B, fall back to A
$CGPT add -i 1 -P 0 -S 1 -T 0 $DEV || error
$CGPT add -i 2 -P 1 -S 0 -T 1 $DEV || error
expect_next $ROOT_B
expect_next $ROOT_A
expect_next $ROOT_A
# Try A, fall back to B
$CGPT add -i 1 -P 1 -S 0 -T 1 $DEV || error
$CGPT add -i 2 -P 0 -S 1 -T 0 $DEV || error
expect_next $ROOT_A
expect_next $ROOT_B
expect_next $ROOT_B
echo "Verify that common GPT types have the correct GUID."
# This list should come directly from external documentation.
declare -A GPT_TYPES
# General GPT/UEFI types.
# See UEFI spec "5.3.3 GPT Partition Entry Array"
# http://www.uefi.org/sites/default/files/resources/2_4_Errata_A.pdf
GPT_TYPES[efi]="C12A7328-F81F-11D2-BA4B-00A0C93EC93B"
# BIOS Boot Partition for GRUB
# https://www.gnu.org/software/grub/manual/html_node/BIOS-installation.html
GPT_TYPES[bios]="21686148-6449-6E6F-744E-656564454649"
# MS Windows basic data
GPT_TYPES[mswin-data]="EBD0A0A2-B9E5-4433-87C0-68B6B72699C7"
# General Linux types.
# http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs
# http://www.freedesktop.org/software/systemd/man/systemd-gpt-auto-generator.html
# http://www.freedesktop.org/wiki/Specifications/BootLoaderSpec/
GPT_TYPES[linux-data]="0FC63DAF-8483-4772-8E79-3D69D8477DE4"
GPT_TYPES[linux-swap]="0657FD6D-A4AB-43C4-84E5-0933C84B4F4F"
GPT_TYPES[linux-boot]="BC13C2FF-59E6-4262-A352-B275FD6F7172"
GPT_TYPES[linux-home]="933AC7E1-2EB4-4F13-B844-0E14E2AEF915"
GPT_TYPES[linux-lvm]="E6D6D379-F507-44C2-A23C-238F2A3DF928"
GPT_TYPES[linux-raid]="A19D880F-05FC-4D3B-A006-743F0F84911E"
GPT_TYPES[linux-reserved]="8DA63339-0007-60C0-C436-083AC8230908"
GPT_TYPES[data]=${GPT_TYPES[linux-data]}
get_guid() {
$SGDISK --info $1 ${DEV} | awk '/^Partition GUID code:/ {print $4}'
}
if [[ -n "$SGDISK" ]]; then
for type_name in "${!GPT_TYPES[@]}"; do
type_guid="${GPT_TYPES[$type_name]}"
$CGPT create ${DEV}
$CGPT add -t $type_name -b 100 -s 1 ${DEV}
cgpt_guid=$(get_guid 1)
if [[ $cgpt_guid != $type_guid ]]; then
echo "$type_name should be $type_guid" >&2
echo "instead got $cgpt_guid" >&2
error "Invalid GUID for $type_name!"
fi
done
else
echo "Skipping GUID tests because sgdisk wasn't found"
fi
echo "Test the cgpt prioritize command..."
# Input: sequence of priorities
# Output: ${DEV} has coreos-rootfs partitions with the given priorities
make_pri() {
local idx=0
$CGPT create ${DEV}
for pri in "$@"; do
idx=$((idx+1))
$CGPT add -t coreos-rootfs -l "root$idx" -b $((100 + 2 * $idx)) -s 1 -P $pri ${DEV}
done
}
# Output: returns string containing priorities of all kernels
get_pri() {
echo $(
for idx in $($CGPT find -t coreos-rootfs ${DEV} | sed -e s@${DEV}@@); do
$CGPT show -i $idx -P ${DEV}
done
)
}
# Input: list of priorities
# Operation: expects ${DEV} to contain those kernel priorities
assert_pri() {
local expected="$*"
local actual=$(get_pri)
[ "$actual" = "$expected" ] || \
error 1 "expected priority \"$expected\", actual priority \"$actual\""
}
# no coreos-rootfs at all. This should do nothing.
$CGPT create ${DEV}
$CGPT add -t rootfs -b 100 -s 1 ${DEV}
$CGPT prioritize ${DEV}
assert_pri ""
# common install/upgrade sequence
make_pri 2 0 0
$CGPT prioritize -i 1 ${DEV}
assert_pri 1 0 0
$CGPT prioritize -i 2 ${DEV}
assert_pri 1 2 0
$CGPT prioritize -i 1 ${DEV}
assert_pri 2 1 0
$CGPT prioritize -i 2 ${DEV}
assert_pri 1 2 0
# lots of coreos-rootfs, all same starting priority, should go to priority 1
make_pri 8 8 8 8 8 8 8 8 8 8 8 0 0 8
$CGPT prioritize ${DEV}
assert_pri 1 1 1 1 1 1 1 1 1 1 1 0 0 1
# now raise them all up again
$CGPT prioritize -P 4 ${DEV}
assert_pri 4 4 4 4 4 4 4 4 4 4 4 0 0 4
# set one of them higher, should leave the rest alone
$CGPT prioritize -P 5 -i 3 ${DEV}
assert_pri 4 4 5 4 4 4 4 4 4 4 4 0 0 4
# set one of them lower, should bring the rest down
$CGPT prioritize -P 3 -i 4 ${DEV}
assert_pri 1 1 2 3 1 1 1 1 1 1 1 0 0 1
# raise a group by including the friends of one partition
$CGPT prioritize -P 6 -i 1 -f ${DEV}
assert_pri 6 6 4 5 6 6 6 6 6 6 6 0 0 6
# resurrect one, should not affect the others
make_pri 0 0 0 0 0 0 0 0 0 0 0 0 0 0
$CGPT prioritize -i 2 ${DEV}
assert_pri 0 1 0 0 0 0 0 0 0 0 0 0 0 0
# resurrect one and all its friends
make_pri 0 0 0 0 0 0 0 0 1 2 0 0 0 0
$CGPT prioritize -P 5 -i 2 -f ${DEV}
assert_pri 5 5 5 5 5 5 5 5 3 4 5 5 5 5
# no options should maintain the same order
$CGPT prioritize ${DEV}
assert_pri 3 3 3 3 3 3 3 3 1 2 3 3 3 3
# squish all the ranks
make_pri 1 1 2 2 3 3 4 4 5 5 0 6 7 7
$CGPT prioritize -P 6 ${DEV}
assert_pri 1 1 1 1 2 2 3 3 4 4 0 5 6 6
# squish the ranks by not leaving room
make_pri 1 1 2 2 3 3 4 4 5 5 0 6 7 7
$CGPT prioritize -P 7 -i 3 ${DEV}
assert_pri 1 1 7 1 2 2 3 3 4 4 0 5 6 6
# squish the ranks while bringing the friends along
make_pri 1 1 2 2 3 3 4 4 5 5 0 6 7 7
$CGPT prioritize -P 6 -i 3 -f ${DEV}
assert_pri 1 1 6 6 1 1 2 2 3 3 0 4 5 5
# squish them pretty hard
make_pri 1 1 2 2 3 3 4 4 5 5 0 6 7 7
$CGPT prioritize -P 2 ${DEV}
assert_pri 1 1 1 1 1 1 1 1 1 1 0 1 2 2
# squish them really really hard (nobody gets reduced to zero, though)
make_pri 1 1 2 2 3 3 4 4 5 5 0 6 7 7
$CGPT prioritize -P 1 -i 3 ${DEV}
assert_pri 1 1 1 1 1 1 1 1 1 1 0 1 1 1
# squish if we try to go too high
make_pri 15 15 14 14 13 13 12 12 11 11 10 10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 0
$CGPT prioritize -i 3 ${DEV}
assert_pri 14 14 15 13 12 12 11 11 10 10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 1 1 0
$CGPT prioritize -i 5 ${DEV}
assert_pri 13 13 14 12 15 11 10 10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 1 1 1 1 0
# but if I bring friends I don't have to squish
$CGPT prioritize -i 1 -f ${DEV}
assert_pri 15 15 13 12 14 11 10 10 9 9 8 8 7 7 6 6 5 5 4 4 3 3 2 2 1 1 1 1 1 1 0
# Now make sure that we don't need write access if we're just looking.
if [ "$(id -u)" -eq 0 ]; then
echo "Skipping read vs read-write access tests (doesn't work as root)"
else
echo "Test read vs read-write access..."
chmod 0444 ${DEV} || error
# These should fail
$CGPT create -z ${DEV} 2>/dev/null && error
$CGPT add -i 2 -P 3 ${DEV} 2>/dev/null && error
$CGPT repair ${DEV} 2>/dev/null && error
$CGPT prioritize -i 3 ${DEV} 2>/dev/null && error
# Most 'boot' usage should fail too.
$CGPT boot -p ${DEV} 2>/dev/null && error
dd if=/dev/zero of=fake_mbr.bin bs=100 count=1 2>/dev/null || error
$CGPT boot -b fake_mbr.bin ${DEV} 2>/dev/null && error
$CGPT boot -i 2 ${DEV} 2>/dev/null && error
# These shoulfd pass
$CGPT boot ${DEV} >/dev/null || error
$CGPT show ${DEV} >/dev/null || error
$CGPT find -t coreos-rootfs ${DEV} >/dev/null || error
echo "Done."
fi
happy "All tests passed."
|
ionave/seismograph
|
tests/run_cgpt_tests.sh
|
Shell
|
bsd-3-clause
| 16,300 |
# we create the structure to hold the html pages
mkdir content
pushd content
mkdir fs appliance team
popd
USEROPT="-u $(id -u):$(id -g) -v /etc/passwd:/etc/passwd:ro -v /etc/group:/etc/group:ro"
# we create the "main" websites using the docker image included in the repo
pushd web-team
docker build . -t web-team:latest
docker run $USEROPT -v $WORKSPACE/content/team:/build/team --rm web-team:latest bundle exec jekyll build --disable-disk-cache --destination /build/team
popd
pushd web-cernvm
docker build . -t web-cernvm:latest
docker run $USEROPT -v $WORKSPACE/content/appliance:/build/appliance --rm web-cernvm:latest bundle exec jekyll build --disable-disk-cache --destination /build/appliance
popd
pushd web-cvmfs
docker build . -t web-cvmfs:latest
docker run $USEROPT -v $WORKSPACE/content/fs:/build/fs --rm web-cvmfs:latest bundle exec jekyll build --disable-disk-cache --destination /build/fs
popd
# we copy the websites to all the folders
pushd content
cp -r team/* .
popd
|
cernvm/ci-scripts
|
web/create_website.sh
|
Shell
|
bsd-3-clause
| 990 |
# SPDX-License-Identifier: BSD-3-Clause
source helpers.sh
cleanup() {
rm -f primary.ctx decrypt.ctx key.pub key.priv key.name decrypt.out \
encrypt.out secret.dat key.dat evict.log primary.ctx key.ctx
if [ "$1" != "no-shut-down" ]; then
shut_down
fi
}
trap cleanup EXIT
start_up
cleanup "no-shut-down"
tpm2 clear -Q
tpm2 createprimary -Q -C e -g sha256 -G rsa -c primary.ctx
tpm2 create -Q -g sha256 -G aes -u key.pub -r key.priv -C primary.ctx
tpm2 load -Q -C primary.ctx -u key.pub -r key.priv -n key.name -c key.dat
# Load the context into a specific handle, delete it
tpm2 evictcontrol -Q -c key.dat 0x81010003
tpm2 evictcontrol -Q -c 0x81010003 0x81010003
# Load the context into a specific handle, delete it without an explicit -p
tpm2 evictcontrol -Q -C o -c key.dat 0x81010003
tpm2 evictcontrol -Q -C o -c 0x81010003
# Load the context into an available handle, delete it
tpm2 evictcontrol -C o -c key.dat > evict.log
phandle=$(yaml_get_kv evict.log "persistent-handle")
tpm2 evictcontrol -Q -C o -c $phandle
yaml_verify evict.log
# verify that platform hierarchy auto selection for persistent handle works
tpm2 createprimary -C p -c primary.ctx
tpm2 create -C primary.ctx -c key.ctx
tpm2 evictcontrol -C p -c key.ctx > evict.log
phandle=$(yaml_get_kv evict.log persistent-handle)
tpm2 evictcontrol -C p -c $phandle
exit 0
|
01org/tpm2.0-tools
|
test/integration/tests/evictcontrol.sh
|
Shell
|
bsd-3-clause
| 1,371 |
#!/bin/bash -ex
# phb-manage.sh runs manage.py with oblivious hand-wavey default settings
# suitable for management commands that will work despite the wildly ignorant
# nature of hand-wavey default settings. this is especially useful for running
# management commands during the docker build process.
#
# Usage: phb-manage.sh [COMMAND] [ARGS]
#
# Example: phb-manage.sh collectstatic --noinput
export DEBUG=False
export SECRET_KEY=foo
export DATABASE_URL=sqlite://
# export AUTH0_CLIENT_ID=foo
# export AUTH0_CLIENT_SECRET=foo
# export AUTH0_DOMAIN=foo
# export AUTH0_CALLBACK_URL=foo
python3 manage.py $@
|
willkg/standup
|
bin/phb-manage.sh
|
Shell
|
bsd-3-clause
| 610 |
#!/bin/bash
#
# Provisioning File
#
echo "Begin Provisioning "
uname -a
echo "Repository update and upgrade"
sudo apt-get update
sudo apt-get upgrade
echo "Install Apache2"
sudo apt-get -y install apache2
sudo cp /vagrant/.vagrant-provision/banthecan.conf /etc/apache2/sites-available
sudo cp /vagrant/.vagrant-provision/banthecan-admin.conf /etc/apache2/sites-available
sudo a2ensite banthecan.conf
sudo a2ensite banthecan-admin.conf
sudo a2dissite 000-default.conf
echo "Install PHP"
sudo apt-get -y install php7.0
sudo apt-get -y install php-xdebug
sudo apt-get install libapache2-mod-php7.0
sudo apt-get install php7.0-mysql
echo "Configure Apache"
sudo a2enmod rewrite
sudo a2enmod reqtimeout
sudo a2enmod php7.0
sudo cp /vagrant/.vagrant-provision/20-xdebug.ini /etc/php/7.0/apache2/conf.d/20-xdebug.ini
sudo service apache2 restart
echo "Install MariaDB"
sudo apt-get -y install mariadb-server
export DATABASE_PASS="root"
sudo mysqladmin -u root password "$DATABASE_PASS"
sudo mysql -u root -p"$DATABASE_PASS" -e "UPDATE mysql.user SET plugin = 'mysql_native_password' WHERE User='root'"
sudo mysql -u root -p"$DATABASE_PASS" -e "DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1')"
sudo mysql -u root -p"$DATABASE_PASS" -e "DELETE FROM mysql.user WHERE User=''"
sudo mysql -u root -p"$DATABASE_PASS" -e "DELETE FROM mysql.db WHERE Db='test' OR Db='test\_%'"
sudo mysql -u root -p"$DATABASE_PASS" -e "FLUSH PRIVILEGES"
sudo mysql -u root -p"$DATABASE_PASS" -e "CREATE DATABASE IF NOT EXISTS banthecan_demo"
sudo mysql -u root -p"$DATABASE_PASS" banthecan_demo < /vagrant/.vagrant-provision/banthecandemo.sql
echo "Provisioning Complete"
|
gooGooGaJoob/banthecan
|
.vagrant-provision/provision.sh
|
Shell
|
bsd-3-clause
| 1,689 |
#!/bin/bash
#
# Launch a docker running the CloudSQL proxy.
# Connect to a database with psql.
# When psql exits, kill the proxy.
#
# Input:
# wsm or stairway - which database to connect to
# sql script
#
# You must have run the write-config script to set up all of the connection information
scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
configdir="$( cd "${scriptdir}/../config" &> /dev/null && pwd )"
dbtype="$1"
sqlscript="$2"
# Check that there is some configuration
if [ ! -e "${configdir}/target.txt" ]; then
echo "No configuration found. Run write-config.sh"
exit 1
fi
target=$(<"${configdir}/target.txt")
# Setup depending on the input
case ${dbtype} in
wsm)
db=$(<"${configdir}/db-name.txt")
dbuser=$(<"${configdir}/db-username.txt")
dbpw=$(<"${configdir}/db-password.txt")
;;
stairway)
db=$(<"${configdir}/stairway-db-name.txt")
dbuser=$(<"${configdir}/stairway-db-username.txt")
dbpw=$(<"${configdir}/stairway-db-password.txt")
;;
*)
echo "Specify wsm or stairway to choose which database to connect to"
exit 1
;;
esac
port=5434
echo "Connecting to $db in target $target"
dod=$( "${scriptdir}/launch-sql-proxy.sh" "$port" )
echo "Launched docker container: ${dod}"
# Setup cleanup of the docker container
function kill_docker()
{
if [ -n "${dod}" ]; then
echo "Stopping the CloudSQL proxy docker"
docker kill "${dod}"
fi
}
trap kill_docker EXIT
PGPASSWORD="${dbpw}" psql "host=127.0.0.1 port=${port} sslmode=disable dbname=${db} user=${dbuser}" < "${sqlscript}"
|
DataBiosphere/terra-workspace-manager
|
scripts/run-sql-script.sh
|
Shell
|
bsd-3-clause
| 1,593 |
#!/bin/bash
set -e
export HOME="/root"
export PATH="${ROOT}/build/bin:${PATH}"
export BACKOFF_PERIOD="5s"
main() {
if [[ -n "${ROUTER_IP}" ]] && [[ -n "${DOMAIN}" ]]; then
echo "${ROUTER_IP}" \
"${DOMAIN}" \
"controller.${DOMAIN}" \
"git.${DOMAIN}" \
"images.${DOMAIN}" \
"dashboard.${DOMAIN}" \
>> /etc/hosts
fi
flynn cluster add ${CLUSTER_ADD_ARGS}
cd "${ROOT}/test"
exec /bin/flynn-test $@
}
main "$@"
|
lmars/flynn
|
test/run.sh
|
Shell
|
bsd-3-clause
| 462 |
#!/bin/bash -e
# Settings
[email protected]:pulsar-chem/Pulsar-Core.git
HTML_PATH=html
COMMIT_USER="Documentation Builder"
CHANGESET=$(git rev-parse --verify HEAD)
# Get a clean version of the HTML documentation repo.
git clone -b gh-pages "${REPO_PATH}" --single-branch ${HTML_PATH}
# rm all the files through git to prevent stale files.
cd ${HTML_PATH}
git rm -rf .
cd -
# Generate the HTML documentation.
doxygen
# Create and commit the documentation repo.
cd ${HTML_PATH}
git add .
git config user.name "${COMMIT_USER}"
git config user.email "<>"
git commit -m "Automated documentation build for changeset ${CHANGESET}."
git push origin gh-pages
cd -
|
pulsar-chem/Pulsar-Core
|
.travis/publish-doxygen.sh
|
Shell
|
bsd-3-clause
| 667 |
#!/usr/bin/env bash
#
# before_install
#
sudo dpkg -i ./epics/*.deb
|
archman/phantasy
|
ci/install_epics.sh
|
Shell
|
bsd-3-clause
| 71 |
#!/usr/bin/env bash
TESTFILE1=$(mktemp -p .)
if [ -x "$(command -v python3)" ]; then
PYTHON=$(command -v python3)
else
PYTHON=$(command -v python)
fi
${PYTHON} << END
import random as rnd
import time as time
rnd.seed(time.time())
randnum = rnd.sample(range(1,101), 18)
f1 = open("${TESTFILE1}", "w+")
for m in randnum:
for n in randnum:
line = str(m) + '_' + str(n) + '_' \
+ str(m) + '_' + str(n) + '\n'
f1.write(line)
f1.close()
END
REDUCE_X=1
REDUCE_X2=1
REDUCE_ROWS=0
REDUCE_OP=0
N_IDX=0
for i in `cat ${TESTFILE1}`
do
M=`echo ${i} | awk -F"_" '{print $1}'`
N=`echo ${i} | awk -F"_" '{print $2}'`
LDI=`echo ${i} | awk -F"_" '{print $3}'`
LDO=`echo ${i} | awk -F"_" '{print $4}'`
echo ${M} ${N} ${LDI} ${LDI}
N_ADJ=$((${N} + ${N_IDX}))
./eltwise_unary_reduce ${M} ${N_ADJ} 100 ${REDUCE_X} ${REDUCE_X2} ${REDUCE_ROWS} ${REDUCE_OP} 1 ${N_IDX} 0
done
rm ${TESTFILE1}
|
hfp/libxsmm
|
samples/eltwise/kernel_test/reduce_add_cols_x_x2_16b_gtld.sh
|
Shell
|
bsd-3-clause
| 924 |
#!/bin/bash
USE_LIBJPEG=0
USE_PNG=0
if [ "$#" -eq 0 ]; then
echo "Usage: install_submodules.sh <TIRAMISU_ROOT_PATH>"
exit 1
fi
PROJECT_SRC_DIR=$1
CMAKE=cmake
CORES=1
# For Travis build we skip LLVM installation and use a custom binary.
# Second argument specifies the custom path of the LLVM bin dir.
if [ "$2" = "" ]; then
LLVM_BIN_DIR=${PROJECT_SRC_DIR}/3rdParty/llvm/build/bin
else
LLVM_BIN_DIR="$2"
fi
set -e
. ${PROJECT_SRC_DIR}/utils/scripts/functions.sh
# Install ISL into 3rdParty and Halide into the root of the tiramisu directory
# POSSIBLE ERRORS
# 1. If you get a permissions error when trying to clone these submodules, you may not have your ssh keys set up correctly
# in github. We use ssh to clone the repos, not https.
#
# 2. If you get an error that some file in a repo was not found (such as autogen.sh in isl), you may have partially
# cloned a submodule before but not completed it, meaning some of the files are missing. Make sure to delete the .git
# folder in that submodule directory to force the clone to happen again.
echo ${PROJECT_SRC_DIR}
echo "#### Cloning submodules ####"
echo_and_run_cmd "cd ${PROJECT_SRC_DIR}"
echo_and_run_cmd "git submodule update --init --remote --recursive"
# Get ISL installed
echo "#### Installing isl ####"
echo_and_run_cmd "cd ${PROJECT_SRC_DIR}/3rdParty/isl"
if [ ! -d "build" ]; then
echo_and_run_cmd "mkdir build/"
fi
echo_and_run_cmd "touch aclocal.m4 Makefile.am Makefile.in"
echo_and_run_cmd "./configure --prefix=$PWD/build/ --with-int=imath"
echo_and_run_cmd "make -j $CORES"
echo_and_run_cmd "make install"
echo "Done installing isl"
# Get LLVM installed
if [ "$2" = "" ]; then
echo "#### Installing LLVM ####"
echo_and_run_cmd "cd ${PROJECT_SRC_DIR}/3rdParty/llvm"
if [ ! -d "build" ]; then
echo_and_run_cmd "mkdir build/"
fi
if [ ! -d "prefix" ]; then
echo_and_run_cmd "mkdir prefix/"
fi
echo_and_run_cmd "cd build"
echo_and_run_cmd "$CMAKE -DHAVE_LIBEDIT=0 -DLLVM_ENABLE_TERMINFO=OFF -DLLVM_TARGETS_TO_BUILD='X86;ARM;AArch64;Mips;NVPTX;PowerPC' -DLLVM_ENABLE_ASSERTIONS=ON -DCMAKE_BUILD_TYPE=Release .. -DCMAKE_INSTALL_PREFIX=$PWD/../prefix/ -DLLVM_EXTERNAL_CLANG_SOURCE_DIR=${PROJECT_SRC_DIR}/3rdParty/clang"
echo_and_run_cmd "make -j $CORES"
echo_and_run_cmd "make install"
else
echo "#### Skipping LLVM Installation ####"
fi
# Set LLVM_CONFIG and CLANG env variables
export CLANG=${LLVM_BIN_DIR}/clang
export LLVM_CONFIG=${LLVM_BIN_DIR}/llvm-config
# Get halide installed
echo "#### Installing Halide ####"
echo_and_run_cmd "cd ${PROJECT_SRC_DIR}/3rdParty/Halide"
echo_and_run_cmd "git checkout tiramisu_64_bit"
echo_and_run_cmd "git pull"
if [ "${USE_LIBJPEG}" = "0" ]; then
CXXFLAGS_JPEG="-DHALIDE_NO_JPEG"
fi
if [ "${USE_LIBPNG}" = "0" ]; then
CXXFLAGS_PNG="-DHALIDE_NO_PNG"
fi
echo_and_run_cmd "make clean"
make CXXFLAGS="${CXXFLAGS_JPEG} ${CXXFLAGS_PNG}" -j $CORES
cd ${PROJECT_SRC_DIR}
echo "Done installing Halide"
|
rbaghdadi/COLi
|
utils/scripts/install_submodules.sh
|
Shell
|
mit
| 3,003 |
setopt correct_all
alias man='nocorrect man'
alias mv='nocorrect mv'
alias mysql='nocorrect mysql'
alias mkdir='nocorrect mkdir'
|
xsyn/dotfiles
|
zsh/lib/correction.zsh
|
Shell
|
mit
| 130 |
#!/bin/bash
cd "$(dirname "$BASH_SOURCE")" \
&& source '../../utils.sh' \
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
main() {
# Remove packages that were automatically installed to satisfy
# dependencies for other other packages and are no longer needed
execute 'sudo apt-get autoremove -qqy' 'Autoremove'
}
main
|
gaboesquivel/dotfiles
|
os/ubuntu/installs/cleanup.sh
|
Shell
|
mit
| 365 |
#!/usr/bin/env sh
pacman -Qqe > ../pacman.txt
yay -Qqe > ../yay.txt
|
aflavio/another-dotfiles
|
scripts/bin/installed_apps.sh
|
Shell
|
mit
| 69 |
#!/bin/sh
PROJ=test_migrate
SETTINGS=--settings=tests.$PROJ.settings
# test for Issue 190
# "Best way to run in environments without connectivity to Salesforce API? #190"
# Test with two "sqlite3" databases and SalesforceModels
# Verify that tables with SalesforceModels
# - are created in "salesforce" database and
# - not created in "default" database
# The default SalesforceRouter must be used.
python manage.py makemigrations $SETTINGS $PROJ &&
python manage.py test $SETTINGS tests.$PROJ &&
python manage.py migrate $SETTINGS --verbosity=0 &&
python manage.py migrate $SETTINGS --database=salesforce --verbosity=0 &&
echo .tables | sqlite3 db_tmp_salesforce | grep -w Lead &&
echo .tables | sqlite3 db_tmp_salesforce | grep -w Contact
ret=$?
echo .tables | sqlite3 db_tmp_default | grep -w Contact
ret2=$?
# delete explicit names to see a warning if they don't exist
rm db_tmp_default
rm db_tmp_salesforce
rm tests/$PROJ/migrations/0001_initial.py
ret3=$?
rm -rf tests/$PROJ/migrations/
test $ret -eq 0 -a $ret2 -eq 1 -a $ret3 -eq 0
ret=$?
if test $ret -ne 0; then
echo "Test failed"
false
fi
|
django-salesforce/django-salesforce
|
tests/test_migrate/test.sh
|
Shell
|
mit
| 1,116 |
#!/bin/sh
case "$1" in
0)
echo AWS_DEFAULT_REGION=us-east-1
echo AWS_REGION=us-east-1
echo RACK_BUILD_INSTANCE=t2.small
;;
1)
echo AWS_DEFAULT_REGION=us-west-2
echo AWS_REGION=us-west-2
echo RACK_PRIVATE=true
;;
2)
echo AWS_DEFAULT_REGION=eu-west-1
echo AWS_REGION=eu-west-1
;;
*)
echo AWS_DEFAULT_REGION=unknown
echo AWS_REGION=unknown
;;
esac
|
mattatcha/terraform-provider-convox
|
vendor/github.com/convox/rack/ci/region.sh
|
Shell
|
mit
| 411 |
#! /bin/bash -eux
# Copyright (C) 2015 [email protected]
source ${srcdir}/common.sh
readonly faketime=/usr/lib/x86_64-linux-gnu/faketime/libfaketime.so.1
if ! [[ -e $faketime ]]; then
echo "libfaketime not found; can't execute testcases"
exit 77
fi
export LD_PRELOAD=$faketime
export FAKETIME="2014-01-01 12:00:00"
$bin --help
# WIP
exit 77
|
ajfclark/crane_gps_watch
|
test/012_dst_time.sh
|
Shell
|
gpl-2.0
| 361 |
#!/bin/bash
# Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
./stop_myblockchaind.sh
# need some extra time
#for ((i=0; i<1; i++)) ; do printf "." ; sleep 1 ; done
./stop_ndb.sh
|
MrDunne/myblockchain
|
storage/ndb/test/crund/scripts/stop_cluster.sh
|
Shell
|
gpl-2.0
| 866 |
#!/bin/bash
source test-generate-common.sh
source test-common.sh
source test-config.sh
source test-generate-hadoop-helper.sh
source test-generate-zookeeper-helper.sh
__GenerateHiveStandardTests_StandardPerformanceEval() {
local hiveversion=$1
local hadoopversion=$2
local zookeeperversion=$3
local javaversion=$4
cp ../submission-scripts/script-${submissiontype}/magpie.${submissiontype}-hadoop-and-hive magpie.${submissiontype}-hadoop-and-hive-hadoop-${hadoopversion}-hive-${hiveversion}-zookeeper-${zookeeperversion}-hdfsoverlustre-zookeeper-not-shared-zookeeper-networkfs-run-hivetestbench
sed -i \
-e 's/export HADOOP_VERSION="\(.*\)"/export HADOOP_VERSION="'"${hadoopversion}"'"/' \
-e 's/export HIVE_VERSION="\(.*\)"/export HIVE_VERSION="'"${hiveversion}"'"/' \
-e 's/export ZOOKEEPER_VERSION="\(.*\)"/export ZOOKEEPER_VERSION="'"${zookeeperversion}"'"/' \
magpie.${submissiontype}-hadoop-and-hive-hadoop-${hadoopversion}-hive-${hiveversion}-zookeeper-${zookeeperversion}*
SetupHDFSoverLustreStandard `ls \
magpie.${submissiontype}-hadoop-and-hive-hadoop-${hadoopversion}-hive-${hiveversion}-zookeeper-${zookeeperversion}*hdfsoverlustre*`
}
GenerateHiveStandardTests() {
cd ${MAGPIE_SCRIPTS_HOME}/testsuite/
echo "Making Hive Standard Tests"
for testfunction in __GenerateHiveStandardTests_StandardPerformanceEval
do
for testgroup in ${hive_test_groups}
do
local hadoopversion="${testgroup}_hadoopversion"
local zookeeperversion="${testgroup}_zookeeperversion"
local javaversion="${testgroup}_javaversion"
if ! CheckForDependency "Hive" "Hadoop" ${!hadoopversion}
then
continue
fi
if ! CheckForDependency "Hive" "Zookeeper" ${!zookeeperversion}
then
continue
fi
for testversion in ${!testgroup}
do
${testfunction} ${testversion} ${!hadoopversion} ${!zookeeperversion} ${!javaversion}
done
done
done
}
__GenerateHiveDependencyTests_Dependency() {
local hiveversion=$1
local hadoopversion=$2
local zookeeperversion=$3
local javaversion=$4
# HDFS over Lustre
cp ../submission-scripts/script-${submissiontype}/magpie.${submissiontype}-hadoop-and-hive magpie.${submissiontype}-hadoop-and-hive-DependencyHive-hadoop-${hadoopversion}-hive-${hiveversion}-zookeeper-${zookeeperversion}-hdfsoverlustre-run-hivetestbench
# HDFS over networkFS
cp ../submission-scripts/script-${submissiontype}/magpie.${submissiontype}-hadoop-and-hive magpie.${submissiontype}-hadoop-and-hive-DependencyHive-hadoop-${hadoopversion}-hive-${hiveversion}-zookeeper-${zookeeperversion}-hdfsovernetworkfs-run-hivetestbench
sed -i \
-e 's/export HADOOP_VERSION="\(.*\)"/export HADOOP_VERSION="'"${hadoopversion}"'"/' \
-e 's/export HIVE_VERSION="\(.*\)"/export HIVE_VERSION="'"${hiveversion}"'"/' \
-e 's/export ZOOKEEPER_VERSION="\(.*\)"/export ZOOKEEPER_VERSION="'"${zookeeperversion}"'"/' \
magpie.${submissiontype}-hadoop-and-hive-DependencyHive-hadoop-${hadoopversion}-hive-${hiveversion}-zookeeper-${zookeeperversion}*
SetupZookeeperNetworkFSDependency "Hive" ${hiveversion} "hdfsoverlustre" `ls \
magpie.${submissiontype}-hadoop-and-hive-DependencyHive-hadoop-${hadoopversion}-hive-${hiveversion}-zookeeper-${zookeeperversion}*hdfsoverlustre*`
SetupZookeeperNetworkFSDependency "Hive" ${hiveversion} "hdfsovernetworkfs" `ls \
magpie.${submissiontype}-hadoop-and-hive-DependencyHive-hadoop-${hadoopversion}-hive-${hiveversion}-zookeeper-${zookeeperversion}*hdfsovernetworkfs*`
SetupHDFSoverLustreDependency "Hive" ${hiveversion} `ls \
magpie.${submissiontype}-hadoop-and-hive-DependencyHive-hadoop-${hadoopversion}-hive-${hiveversion}-zookeeper-${zookeeperversion}*hdfsoverlustre*`
SetupHDFSoverNetworkFSDependency "Hive" ${hiveversion} `ls \
magpie.${submissiontype}-hadoop-and-hive-DependencyHive-hadoop-${hadoopversion}-hive-${hiveversion}-zookeeper-${zookeeperversion}*hdfsovernetworkfs*`
JavaCommonSubstitution ${javaversion} `ls magpie.${submissiontype}-hadoop-and-hive-DependencyHive-hadoop-${hadoopversion}-hive-${hiveversion}-zookeeper-${zookeeperversion}*`
}
GenerateHiveDependencyTests() {
cd ${MAGPIE_SCRIPTS_HOME}/testsuite/
echo "Making Hive Dependency Tests"
for testfunction in __GenerateHiveDependencyTests_Dependency
do
for testgroup in ${hive_test_groups}
do
local hadoopversion="${testgroup}_hadoopversion"
local zookeeperversion="${testgroup}_zookeeperversion"
local javaversion="${testgroup}_javaversion"
if ! CheckForDependency "Hive" "Hadoop" ${!hadoopversion}
then
continue
fi
if ! CheckForHadoopDecomissionMinimum ${testfunction} "Hive" "Hadoop" ${!hadoopversion} ${hadoop_decomissionhdfs_minimum}
then
continue
fi
if ! CheckForDependency "Hive" "Zookeeper" ${!zookeeperversion}
then
continue
fi
for testversion in ${!testgroup}
do
${testfunction} ${testversion} ${!hadoopversion} ${!zookeeperversion} ${!javaversion}
done
done
done
}
GenerateHivePostProcessing () {
files=`find . -maxdepth 1 -name "magpie.${submissiontype}*run-hivetestbench*"`
if [ -n "${files}" ]
then
sed -i -e "s/FILENAMESEARCHREPLACEKEY/run-hivetestbench-FILENAMESEARCHREPLACEKEY/" ${files}
fi
}
|
chu11/magpie
|
testsuite/test-generate-hive.sh
|
Shell
|
gpl-2.0
| 5,718 |
dd if=/dev/zero of=floppya.img bs=512 count=2880
dd if=bootload of=floppya.img bs=512 count=1 conv=notrunc
bcc -ansi -c -o kernel.o kernel.c
as86 kernel.asm -o kernel_asm.o
ld86 -o kernel -d kernel.o kernel_asm.o
dd if=kernel of=floppya.img bs=512 conv=notrunc seek=3
bochs -f opsys.bxrc
|
jmibarrad/scratchOS
|
ProjectB/compileOS.sh
|
Shell
|
gpl-2.0
| 300 |
#!/bin/bash
#
# cppcheck.sh
# Script to run CppCheck Static Analyzer.
# http://cppcheck.sourceforge.net/
#
# $Id: cppcheck.sh 45542 2012-10-14 19:30:33Z eapache $
#
# Wireshark - Network traffic analyzer
# By Gerald Combs <[email protected]>
# Copyright 2012 Gerald Combs
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
CPPCHECK=`which cppcheck`
CPPCHECK_DIR=`dirname $0`
THREADS=4
QUIET="--quiet"
SUPPRESSIONS="--suppressions-list=$CPPCHECK_DIR/suppressions"
INCLUDES="--includes-file=$CPPCHECK_DIR/includes"
MODE="gcc"
while getopts "ahj:v" OPTCHAR ; do
case $OPTCHAR in
a) SUPPRESSIONS=" " ;;
h) MODE="html" ;;
j) THREADS="$OPTARG" ;;
v) QUIET=" " ;;
esac
done
shift $(($OPTIND-1))
if [ "$MODE" = "gcc" ]; then
TEMPLATE="gcc"
elif [ "$MODE" = "html" ]; then
echo "<html><body><table border=1>"
echo "<tr><th>File</th><th>Line</th><th>Severity</th>"
echo "<th>Message</th><th>ID</th></tr>"
TEMPLATE="<tr><td>{file}</td><td>{line}</td><td>{severity}</td><td>{message}</td><td>{id}</td></tr>"
fi
if [ $# -eq 0 ]; then
TARGET="."
else
TARGET=$@
fi
# Use a little-documented feature of the shell to pass SIGINTs only to the
# child process (cppcheck in this case). That way the final 'echo' still
# runs and we aren't left with broken HTML.
trap : INT
$CPPCHECK --force --enable=style $QUIET \
$SUPPRESSIONS $INCLUDES \
--template=$TEMPLATE \
-j $THREADS $TARGET 2>&1
if [ "$MODE" = "html" ]; then
echo "</table></body></html>"
fi
#
# Editor modelines - http://www.wireshark.org/tools/modelines.html
#
# Local variables:
# c-basic-offset: 4
# tab-width: 8
# indent-tabs-mode: nil
# End:
#
# vi: set shiftwidth=4 tabstop=8 expandtab:
# :indentSize=4:tabSize=8:noTabs=true:
#
|
MavEtJu/wireshark-lean
|
tools/cppcheck/cppcheck.sh
|
Shell
|
gpl-2.0
| 2,469 |
#!/bin/sh
#
# $Id$
# OWFS setup routines for SUSE systems
# Written by Paul Alfille and others.
# udev routines by Peter Kropf
# GPL v2 license (like all of OWFS)
# copyrite 12/2006 Paul H Alfille
#
### ------------------
### -- Constants -----
### ------------------
OWFS_GROUP=ow
#
#
### -----------------
### -- Group --------
### -----------------
groupadd $OWFS_GROUP
#
### -----------------
### -- Links --------
### -----------------
# Put all the ninaries in /usr/bin
# make them part of the "ow" group
# and let only their owner and group read or execute them
OWFS_bin="owfs owhttpd owftpd owserver owread owwrite owpresent owdir"
for x in $OWFS_bin
do
ln -sfv /opt/owfs/bin/$x /usr/bin/$x
done
#
### -----------------
### -- Rules --------
### -----------------
cat >/etc/udev/rules.d/46_ds2490.rules << RULES
BUS=="usb", SYSFS=="04fa", SYSFS=="2490", GROUP="users", MODE="0774", PROGRAM="/bin/sh -c 'K=%k; K=\$\$; printf bus/usb/%%03i/%%03i \$\$ \$\$'", NAME="%c", RUN="/etc/udev/ds2490 '%c'"
RULES
#
### -----------------
### -- Shell --------
### -----------------
cat >/etc/udev/ds2490 << SHELL
#! /bin/sh -x
/sbin/rmmod ds9490r
MATCH="no"
if [ "\$1" != "" ]; then
if [ -f /proc/\$1 ]; then
chgrp $OWFS_GROUP /proc/\$1 && \
chmod g+rw /proc/\$1 && \
logger ow udev: group set to $OWFS_GROUP and permission g+rw on /proc/\$1
MATCH="yes"
fi
if [ -e /dev/\$1 ]; then
chgrp $OWFS_GROUP /dev/\$1 && \
chmod g+rw /dev/\$1 && \
logger ow udev: group set to $OWFS_GROUP and permission g+rw on /dev/\$1
MATCH="yes"
fi
fi
if [ "\$MATCH" = "no" ]; then
echo ow udev: no device file found for "\$1"
logger ow udev: no device file found for "\$1"
fi
SHELL
chmod 755 /etc/udev/ds2490
|
bootc/owfs-cvsimport
|
src/scripts/usb/suse_setup.sh
|
Shell
|
gpl-2.0
| 1,872 |
#!/bin/bash
# Script to report the platform.
platform='unknown'
unamestr=`uname`
if [[ "$unamestr" == 'Darwin' ]]; then
platform='MAC'
elif [[ "$unamestr" == *CYGWIN* ]]; then
platform='WIN'
elif [[ "$unamestr" == "Linux" ]]; then
platform='LINUX'
fi
echo ${platform}
|
teamfx/openjfx-10-dev-rt
|
modules/jdk.packager/src/test/examples/whichplatform.sh
|
Shell
|
gpl-2.0
| 280 |
#!/bin/bash
cd $(dirname $(readlink -f $0))
java -Dfile.encoding=UTF-8 -jar PhantomBot.jar
|
gmt2001/PhantomBot
|
res/launch-service.sh
|
Shell
|
gpl-3.0
| 93 |
#!/bin/sh
./runctest.sh tBlobSTL
|
jjdmol/LOFAR
|
LCS/Blob/test/tBlobSTL.sh
|
Shell
|
gpl-3.0
| 33 |
#!/bin/bash
loops="${1:-1}"
prefix="${2:-codeclonedetection}"
th="${3:-8}"
# naive with overlap
for ((c=1;c<=$loops;c++))
do
echo "running java -jar dist/noindex.CloneDetector.jar $prefix $th overlap"
java -Xms4g -Xmx4g -jar dist/noindex.CloneDetector.jar $prefix $th overlap
done
# naive with jaccard
for ((c=1;c<=$loops;c++))
do
echo "running java -jar dist/noindex.CloneDetector.jar $prefix $th jaccard"
java -Xms4g -Xmx4g -jar dist/noindex.CloneDetector.jar $prefix $th jaccard
done
# with prefix overlap
for ((c=1;c<=$loops;c++))
do
echo "running java -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_none overlap"
java -Xms4g -Xmx4g -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_none overlap
done
# with prefix and jaccard
for ((c=1;c<=$loops;c++))
do
echo "running java -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_none jaccard"
java -Xms4g -Xmx4g -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_none jaccard
done
# with prefix and position
for ((c=1;c<=$loops;c++))
do
echo "running java -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_cv overlap"
java -Xms4g -Xmx4g -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_cv overlap
done
# with prefix and position with jaccard
for ((c=1;c<=$loops;c++))
do
echo "running java -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_cv jaccard"
java -Xms4g -Xmx4g -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_cv jaccard
done
# with prefix and position_at_candidate with overlap
for ((c=1;c<=$loops;c++))
do
echo "running java -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_c overlap"
java -Xms4g -Xmx4g -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_c overlap
done
# with prefix and position_at_candidate with jaccard
for ((c=1;c<=$loops;c++))
do
echo "running java -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_c jaccard"
java -Xms4g -Xmx4g -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_c jaccard
done
# with prefix and position_at_validation with overlap
for ((c=1;c<=$loops;c++))
do
echo "running java -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_v overlap"
java -Xms4g -Xmx4g -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_v overlap
done
# with prefix and position_at_validation with jaccard
for ((c=1;c<=$loops;c++))
do
echo "running java -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_v jaccard"
java -Xms4g -Xmx4g -jar dist/noindex.CloneDetectorWithFilter.jar $prefix $th 0 pos_filter_v jaccard
done
|
Hexcles/SourcererCC
|
clone-detector/run.sh
|
Shell
|
gpl-3.0
| 2,772 |
#!/bin/sh
#
# config.sh - multi-language support configuration script
# Definition of absolute paths etc.
# This file is 'included' in all scripts.
#
# Arduino main folder:
if [ -z "$ARDUINO" ]; then
export ARDUINO=C:/arduino-1.8.5
fi
#
# Arduino builder:
export BUILDER=$ARDUINO/arduino-builder
#
# AVR gcc tools:
export OBJCOPY=$ARDUINO/hardware/tools/avr/bin/avr-objcopy
export OBJDUMP=$ARDUINO/hardware/tools/avr/bin/avr-objdump
#
# Output folder:
if [ -z "$OUTDIR" ]; then
export OUTDIR="../../Prusa-Firmware-build"
fi
#
# Objects folder:
export OBJDIR="$OUTDIR/sketch"
#
# Generated elf file:
export INOELF="$OUTDIR/Firmware.ino.elf"
#
# Generated hex file:
export INOHEX="$OUTDIR/Firmware.ino.hex"
echo "config.sh started" >&2
_err=0
echo -n " Arduino main folder: " >&2
if [ -e $ARDUINO ]; then echo 'OK' >&2; else echo 'NG!' >&2; _err=1; fi
echo -n " Arduino builder: " >&2
if [ -e $BUILDER ]; then echo 'OK' >&2; else echo 'NG!' >&2; _err=2; fi
echo " AVR gcc tools:" >&2
echo -n " objcopy " >&2
if [ -e $OBJCOPY ]; then echo 'OK' >&2; else echo 'NG!' >&2; _err=3; fi
echo -n " objdump " >&2
if [ -e $OBJDUMP ]; then echo 'OK' >&2; else echo 'NG!' >&2; _err=4; fi
echo -n " Output folder: " >&2
if [ -e $OUTDIR ]; then echo 'OK' >&2; else echo 'NG!' >&2; _err=5; fi
echo -n " Objects folder: " >&2
if [ -e $OBJDIR ]; then echo 'OK' >&2; else echo 'NG!' >&2; _err=6; fi
echo -n " Generated elf file: " >&2
if [ -e $INOELF ]; then echo 'OK' >&2; else echo 'NG!' >&2; _err=7; fi
echo -n " Generated hex file: " >&2
if [ -e $INOHEX ]; then echo 'OK' >&2; else echo 'NG!' >&2; _err=8; fi
if [ $_err -eq 0 ]; then
echo "config.sh finished with success" >&2
export CONFIG_OK=1
else
echo "config.sh finished with errors!" >&2
export CONFIG_OK=0
fi
|
prusa3d/Prusa-Firmware
|
lang/config.sh
|
Shell
|
gpl-3.0
| 1,779 |
#!/bin/sh
# Copyright (c) 2009-2016 The Open Source Geospatial Foundation.
# Licensed under the GNU LGPL.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License,
# or any later version. This library is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY, without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details, either
# in the "LICENSE.LGPL.txt" file distributed with this software or at
# web page "http://www.fsf.org/licenses/lgpl.html".
# About:
# =====
# This script will install tinyows in ubuntu
./diskspace_probe.sh "`basename $0`" begin
BUILD_DIR=`pwd`
####
if [ -z "$USER_NAME" ] ; then
USER_NAME="user"
fi
USER_HOME="/home/$USER_NAME"
TMP_DIR=/tmp/build_tinyows
if [ ! -d "$TMP_DIR" ] ; then
mkdir "$TMP_DIR"
fi
#Download sample data and add to PostgreSQL
cd "$TMP_DIR"
wget -c --tries=3 --progress=dot:mega \
"https://github.com/mapserver/tinyows/raw/master/demo/france.dbf"
wget -c --tries=3 --progress=dot:mega \
"https://github.com/mapserver/tinyows/raw/master/demo/france.shp"
wget -c --tries=3 --progress=dot:mega \
"https://github.com/mapserver/tinyows/raw/master/demo/france.shx"
sudo -u $USER_NAME createdb tinyows_demo
sudo -u $USER_NAME psql tinyows_demo -c 'create extension postgis;'
sudo -u $USER_NAME shp2pgsql -s 27582 -I -W latin1 ./france.shp france > france.sql
sudo -u $USER_NAME psql tinyows_demo < france.sql
rm -rf france.*
#Install packages
apt-get -q update
apt-get --assume-yes install tinyows
if [ $? -ne 0 ] ; then
echo 'ERROR: Package install failed! Aborting.'
exit 1
fi
#Setup sample config
### HB: put into /usr/local/share/tinyows/ and not /etc?
cd "$BUILD_DIR"
cp ../app-conf/tinyows/tinyows.xml /etc/
####
"$BUILD_DIR"/diskspace_probe.sh "`basename $0`" end
|
guygriffiths/OSGeoLive
|
bin/install_tinyows.sh
|
Shell
|
lgpl-2.1
| 2,019 |
#!/bin/bash
# add Anaconda 2.0.1 (for GDAL and scipy)
export PATH="/work/users/thare/python/anaconda/bin:$PATH"
echo "gdal_CTX_RoverSlope_crop.pl $1" | msub -V -S /bin/bash -d `pwd` -l walltime=72:00:00 -j oe -o LOG_baselineSlope.txt
|
USGS-Astrogeology/GDAL_scripts
|
gdal_baseline_slope/run_gdal_CTX_RoverSlope_crop.sh
|
Shell
|
unlicense
| 240 |
#!/bin/bash
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
set -e
usage() {
echo "install_z3_yices.sh download_dir install_dir"
exit 1
}
if [ "$#" -ne "2" ]; then
usage
fi
DOWNLOAD_DIR=$1
INSTALL_DIR=$2
mkdir -p "$DOWNLOAD_DIR"
cd "$DOWNLOAD_DIR"
#download z3 and yices
curl --retry 3 https://s3-us-west-2.amazonaws.com/s2n-public-test-dependencies/yices-2.6.1-x86_64-pc-linux-gnu-static-gmp.tar.gz --output yices.tar.gz
tar -xf yices.tar.gz
curl --retry 3 https://s3-us-west-2.amazonaws.com/s2n-public-test-dependencies/z3-2017-04-04-Ubuntu14.04-64 --output z3
mkdir -p "$INSTALL_DIR"/bin
mv z3 "$INSTALL_DIR"/bin
mv yices-2.6.1/bin/* "$INSTALL_DIR"/bin
chmod +x "$INSTALL_DIR"/bin/*
"$INSTALL_DIR"/bin/yices-smt2 --version
"$INSTALL_DIR"/bin/yices --version
"$INSTALL_DIR"/bin/z3 --version
|
colmmacc/s2n
|
codebuild/bin/install_z3_yices.sh
|
Shell
|
apache-2.0
| 1,316 |
#!/bin/bash
###############################################################################
##
## Description:
## What does this script?
## What's the result the case expected?
##
###############################################################################
##
## Revision:
## v1.0 - xiaofwan - 1/6/2017 - Draft shell script as test script.
##
###############################################################################
dos2unix utils.sh
#
# Source utils.sh
#
. utils.sh || {
echo "Error: unable to source utils.sh!"
exit 1
}
#
# Source constants file and initialize most common variables
#
UtilsInit
###############################################################################
##
## Put your test script here
## NOTES:
## 1. Please use LogMsg to output log to terminal.
## 2. Please use UpdateSummary to output log to summary.log file.
## 3. Please use SetTestStateFailed, SetTestStateAborted, SetTestStateCompleted,
## and SetTestStateRunning to mark test status.
##
###############################################################################
LogMsg "$(uname -a)"
UpdateSummary "$(uname -a)"
SetTestStateCompleted
|
VirtQE-S1/ESX-LISA
|
remote-scripts/debug_demo.sh
|
Shell
|
apache-2.0
| 1,152 |
#!/usr/bin/env bash
bin=`readlink "$0"`
if [ "$bin" == "" ]; then
bin=$0
fi
bin=`dirname "$bin"`
bin=`cd "$bin"; pwd`
. "$bin"/chorus-config.sh
if [ -f $SOLR_PID_FILE ]; then
if kill -0 `cat $SOLR_PID_FILE` > /dev/null 2>&1; then
log_inline "stopping solr "
kill `cat $SOLR_PID_FILE` && rm $SOLR_PID_FILE
wait_for_stop $SOLR_PID_FILE
rm -f $SOLR_PID_FILE
else
log "could not stop solr. check that process `cat $SOLR_PID_FILE` exists"
exit 0
fi
else
log "no solr to stop"
fi
|
mpushpav/chorus
|
packaging/stop-solr.sh
|
Shell
|
apache-2.0
| 509 |
#!/bin/bash
set -o nounset
set -o errexit
cd "$(dirname "$0")"
source $INSTALLER_ROOT/utils/mac_utils.sh
mkdir -p "$BUNDLE_CONTENTS/Lib"
#cp -pr $QT_DIR/lib/libQt5MultimediaWidgets.so* $PWD/../data/lib/
#cp -pr $QT_DIR/lib/libQt5OpenGL.so* $PWD/../data/lib/
git clone --depth 1 https://github.com/trikset/trik-desktop-gamepad.git gamepad-build
cd gamepad-build
$QT_DIR/bin/qmake CONFIG+=release
make -j4
cd ..
mkdir -p "$PWD/../data/"
rsync -av gamepad-build/gamepad.app/ "$PWD/../data/gamepad.app/"
copy_qt_lib QtMultimediaWidgets
ln -vsf "../TRIK Studio.app/Contents/Lib" "$PWD/../data/gamepad.app/Contents/Lib"
#rm -rf gamepad-build
|
RomanBelkov/qreal
|
installer/packages/trik-studio/ru.qreal.root.trik.gamepad/meta/prebuild-mac.sh
|
Shell
|
apache-2.0
| 645 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
root_path=$(cd "$(dirname "$0")"; pwd)
cd ${root_path}
#sed -i "s|^runmode.*=.*$|runmode = prod|g" conf/app.conf
./service-center > start-sc.log 2>&1 &
|
little-cui/service-center
|
scripts/release/start_scripts/darwin/start-service-center.sh
|
Shell
|
apache-2.0
| 966 |
#!/bin/bash -ue
# Copyright (C) 2013 Quark Security, Inc
# Copyright (C) 2013 Cubic Corporation
#
# Authors: Spencer Shimko <[email protected]>
check_and_create_repo_dir ()
{
repo_name=$1
repo_path=`sed -rn "s/^$repo_name = (.*)/\\1/p" CONFIG_REPOS`
if [ ! -d $repo_path ]; then
/bin/echo "$repo_name repo directory: $repo_path does not exist. Creating the directory."
/usr/bin/sudo /bin/mkdir -p $repo_path
fi
if [ ! -r $repo_path ] || [ ! -x $repo_path ]; then
/bin/echo "$repo_path does not have proper permissions to continue. Please change the permissions on the directory and any parent directories and try again."
exit
fi
}
rsync_and_createrepo ()
{
repo_path=$1
usage="y - update the directory with RPMs from a provided path\n\
q - abort the bootstrap process\n\
? - print help\n"
if [ -d $repo_path/repodata ]; then
return
fi
/bin/echo "Repodata is missing in $repo_path. Running createrepo."
while :; do
/bin/echo "Would you like to update your RPM directory with the latest RPMs [y, q, ?]: "
read user_input
if [ x"${user_input,,}" == x"y" ]; then
break
elif [ x"${user_input,,}" == x"q" ]; then
exit
elif [ x"${user_input,,}" == x"?" ]; then
/bin/echo -e $usage
else
/bin/echo -e $usage
fi
done
while :; do
/bin/echo -e "Please provide a full path where to rsync RPMs from.\n
If you enter 'rhel', we will try to find, mount, and copy RPMs from your CD/DVD drive\n
Please ensure your RHEL DVD is inserted into the disk drive if you select 'rhel'\n"
read user_input
[ x"$user_input" == "x" ] && break
if [ x"${user_input,,}" == x"rhel" ]; then
tmpdir=`/bin/mktemp -d`
/usr/bin/sudo /usr/bin/mount /dev/sr0 $tmpdir
/usr/bin/sudo /usr/bin/rsync -r --progress $tmpdir/Packages/ $repo_path/
/usr/bin/sudo /usr/bin/umount $tmpdir
/usr/bin/sudo /bin/rm -rf $tmpdir
else
/usr/bin/sudo /usr/bin/rsync -r --progress $user_input $repo_path/
fi
break
done
/usr/bin/sudo /usr/bin/createrepo -d $repo_path/
}
prompt_to_enter_repo_path ()
{
local originalname=${1-}
local originalpath=${2-}
if [ -z $originalpath ]; then
/bin/echo -e "
There is no default path set for the [ $originalname ] repo. You must provide a path for the [ $originalname ] repo to be created on your system
or the script will exit immediately. For example: [ /home/`whoami`/$originalname/ ]"
/bin/echo -e "
Enter a fully qualified path for the $originalname repo.\n"
read path
if [ x"$path" == "x" ]; then
/bin/echo -e"
No default path exists for the $originalname repo and none provided - Exiting"
exit
else
tmpfile=`/bin/mktemp`
/bin/sed -r "s/^($originalname.*)$/#\1/" CONFIG_REPOS > $tmpfile
/bin/echo "$originalname = $path" >> $tmpfile
/bin/mv $tmpfile CONFIG_REPOS
fi
else
/bin/echo -e "
Enter a fully qualified path for the $originalname repo. If you do not enter a path then the default path
will be used in CONFIG_REPOS. The default path for the $originalname yum repo is $originalpath\n"
/bin/echo -e "
Enter a fully qualified path for the [ $originalname ] repo [ default: $originalpath ]\n"
read path
if [ ! x"$path" == "x" ]; then
tmpfile=`/bin/mktemp`
/bin/sed -r "s/^($originalname.*)$/#\1/" CONFIG_REPOS > $tmpfile
/bin/echo "$originalname = $path" >> $tmpfile
/bin/mv $tmpfile CONFIG_REPOS
fi
fi
}
check_and_build_rpm ()
{
name=$1
version=$2
/usr/bin/rpm -q $name | /usr/bin/grep -q $version && LATEST_INSTALLED=1 || LATEST_INSTALLED=0
if [ $LATEST_INSTALLED -eq 0 ]; then
/bin/echo "need to roll $name"
/usr/bin/make $name-rpm
pushd . > /dev/null
cd repos/clip-repo
/usr/bin/sudo /usr/bin/yum localinstall -y $name*
popd > /dev/null
fi
}
/bin/echo -e "Creating an environment for building software and ISOs can be a little
complicated. This script will automate some of those tasks. Keep in mind that
this script isn't exhaustive; depending on a variety of factors you may have to
install some additional packages.\n\nYour user *must* have sudo access for any
of this to work.\n\n"
/bin/echo -e "CLIP uses yum repositories for building packages and generting ISOs.
These must be directories of packages, not RHN channels. E.g. a directory with
a bunch of packages and a repodata/ sub-directory. If you do not have yum
repositories like this available CLIP will not work! Please see Help-FAQ.txt!\n\n"
/bin/echo "Checking if $USER is in the sudoers file"
/usr/bin/sudo -l -U $USER | grep -q "User $USER is not allowed to run sudo" && /usr/sbin/sudoers adduser $USER sudo
/bin/echo "Checking if registered with RHN. We will attempt to register if we are not current. Please enter your RHN credentials if prompted."
/usr/bin/sudo /usr/bin/subscription-manager status | grep -q "Current" || /usr/bin/sudo /usr/bin/subscription-manager --auto-attach register
arch=`rpm --eval %_host_cpu`
# TODO Using the yum variable $releasever evaluates to 7Server which is incorrect.
# For now, this variable needs to be incremented during each major RHEL release until we
# find a better way to get the release version to set for EPEL
releasever="7"
# always have the latest epel rpm
/bin/echo "Checking if epel is installed and updating to the latest version if our version is older"
/bin/echo "
[epel]
name=Bootstrap EPEL
mirrorlist=https://mirrors.fedoraproject.org/mirrorlist?repo=epel-$releasever&arch=$arch
failovermethod=priority
enabled=1
gpgcheck=0
" | /usr/bin/sudo tee --append /etc/yum.repos.d/epel.repo
/usr/bin/sudo yum --enablerepo=epel -y install epel-release
PACKAGES="mock pigz createrepo repoview rpm-build make python-kid"
/usr/bin/sudo /usr/bin/yum install -y $PACKAGES
# get the name/path for any existing yum repos from CONFIG_REPO
rhelreponame=rhel
rhelrepopath=`/bin/sed -rn 's/^rhel = (.*)/\1/p' CONFIG_REPOS`
optreponame=opt
optrepopath=`/bin/sed -rn 's/^opt = (.*)/\1/p' CONFIG_REPOS`
# prompt user for rhel/opt path
prompt_to_enter_repo_path $rhelreponame $rhelrepopath
prompt_to_enter_repo_path $optreponame $optrepopath
# prompt the user to add additional yum repos if necessary
/bin/echo -e "
Adding additional yum repos if necessary"
while :; do
/bin/echo -e "
Enter a name for this yum repo. Just leave empty if you are done adding, or don't wish to change the repositories.\n"
read name
[ x"$name" == "x" ] && break
/bin/echo -e "
Enter a fully qualified path for this yum repo. Just leave empty if you are done adding, or don't wish to change, the repositories.\n"
read path
[ x"$path" == "x" ] && break
/bin/echo -e "# INSERTED BY BOOTSTRAP.SH\n$name = $path" >> CONFIG_REPOS
done
check_and_create_repo_dir "rhel"
check_and_create_repo_dir "opt"
# Refresh repo variables
rhelrepopath=`/bin/sed -rn 's/^rhel = (.*)/\1/p' CONFIG_REPOS`
optrepopath=`/bin/sed -rn 's/^opt = (.*)/\1/p' CONFIG_REPOS`
rsync_and_createrepo $rhelrepopath
/bin/echo "Checking if RHEL optional repo is enabled..."
/usr/bin/sudo /bin/yum repolist enabled | /usr/bin/grep -q rhel-7-server-optional-rpms && OPT_SUBSCRIBED=1 || OPT_SUBSCRIBED=0
if [ $OPT_SUBSCRIBED -eq 0 ]; then
/bin/echo "RHEL optional channel is disabled...enabling"
/usr/bin/sudo /usr/bin/subscription-manager repos --enable=rhel-7-server-optional-rpms
else
/bin/echo "RHEL optional channel is already enabled"
fi
# pull opt package versions from pkglist.opt. Otherwise just download the newest
# versions available
OPT_PACKAGES="anaconda-dracut at-spi tigervnc-server-module bitmap-fangsongti-fonts \
GConf2-devel"
CONF=./conf/pkglist.opt
VERSIONED_LIST=
if [ -s $CONF ]; then
for pkg in $OPT_PACKAGES; do
pkg=`/bin/sed -rn "s/$pkg-(.*).rpm/$pkg-\1/p" $CONF`
VERSIONED_LIST="$VERSIONED_LIST $pkg"
done
else
VERSIONED_LIST=$OPT_PACKAGES
fi
/usr/bin/sudo /bin/yumdownloader --destdir $optrepopath $VERSIONED_LIST
/usr/bin/sudo /usr/bin/createrepo -d $optrepopath
/usr/bin/sudo /usr/sbin/usermod -aG mock `id -un`
if [ x"`cat /sys/fs/selinux/enforce`" == "x1" ]; then
/bin/echo -e "This is embarassing but due to a bug (bz #861281) you must do builds in permissive.\nhttps://bugzilla.redhat.com/show_bug.cgi?id=861281"
/bin/echo "So this is a heads-up we're going to configure your system to run in permissive mode. Sorry!"
/bin/echo "You can bail by pressing ctrl-c or hit enter to continue."
read user_input
/usr/bin/sudo /usr/sbin/setenforce 0
/usr/bin/sudo /bin/sed -i -e 's/^SELINUX=.*/SELINUX=permissive/' /etc/selinux/config
fi
# Roll lorax
check_and_build_rpm "lorax" "lorax-19.6.45-6.el7"
# Roll pungi
check_and_build_rpm "pungi" "pungi-2.13-4.el7"
if ! rpm -q "livecd-tools-20.6-1.el7.x86_64" > /dev/null; then
if rpm -q "livecd-tools" > /dev/null; then
/bin/echo "You have livecd-tools installed, but not our version. Our version contains
fixes for generating live media. We will compile our version and replace your
version free of charge.
Press the any key to continue or ctrl-c to exit.
"
read user_input
/usr/bin/sudo /usr/bin/yum remove livecd-tools 2>/dev/null || true
/usr/bin/sudo /usr/bin/yum remove python-imgcreate 2>/dev/null || true
fi
/usr/bin/sudo /usr/bin/yum install -y syslinux-extlinux dumpet 2>/dev/null || true
/usr/bin/make livecd-tools-rpm
pushd . > /dev/null
cd repos/clip-repo
/usr/bin/sudo /usr/bin/yum localinstall -y livecd-tools* and python-imgcreate*
popd > /dev/null
fi
if ! rpm -q "openscap-1.2.1-1.el7.x86_64" > /dev/null; then
if rpm -q "openscap" > /dev/null; then
/bin/echo "You have openscap installed, but not our version. Our version is
required to build scap-security-guide.
Press the any key to continue or ctrl-c to exit.
"
read user_input
/usr/bin/sudo /usr/bin/yum remove openscap* 2>/dev/null || true
fi
/usr/bin/make openscap-rpm
pushd . > /dev/null
cd repos/clip-repo
/usr/bin/sudo /usr/bin/yum localinstall -y openscap*
popd > /dev/null
fi
/bin/echo -e "Basic bootstrapping of build host is complete.\nRunning 'make clip-rhel7-iso'"
/usr/bin/make clip-rhel7-iso
|
rprevette/clip
|
bootstrap.sh
|
Shell
|
apache-2.0
| 10,225 |
#!/bin/bash
# Testing mc config hosts
# 1. Get number of previous hosts
# 2. Add a host
# 3. Assert the increase in number of hosts by one.
# 4. Delete the new host
initial_count=`mc config host list | wc -l`
add_test_result=`mc config --json host add testdisk https://storage.googleapis.com BKIKJAA5BMMU2RHO6IBB V8f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12 S3v2`
final_count=`mc config host list | wc -l`
remove_test_result=`mc config host remove testdisk`
if [ "$add_test_result" == "Added โtestdiskโ successfully." ]; then
echo "mc config host add Test Passed";
else
echo "mc config host add Test Failed";
fi
if [ $((initial_count + 1)) -ne $final_count ]; then
echo "mc config host list Test Failed";
else
echo "mc config host list Test Passed";
fi
if [ "$remove_test_result" == "Removed โtestdiskโ successfully." ]; then
echo "mc config host remove Test Passed";
else
echo "mc config host remove Test Failed";
fi
|
harshavardhana/mc
|
tests/tests/config-host-test.sh
|
Shell
|
apache-2.0
| 955 |
#!/bin/bash
if [ "$#" -ne 1 ]; then
echo "First argument must be the shell used"
exit 1
fi
sudo apt-get update -y
sudo apt-get -y install software-properties-common python-software-properties
sudo add-apt-repository ppa:git-core/ppa
sudo apt-get update -y
sudo apt-get -y install curl git vim zsh ruby1.9.3
mkdir /tmp/beetest && cd /tmp/beetest
export VERLABEL=LATEST
curl -sSL https://raw.githubusercontent.com/usermindinc/beekeeper/master/install.sh | bash
if [ $1 == 'bash' ]; then
sudo chsh -s /bin/bash vagrant
echo 'source $(/tmp/beetest/beekeeper/bin/bootstrap) > /dev/null' | cat - /home/vagrant/.bashrc > /tmp/.bashrc
mv /tmp/.bashrc /home/vagrant/.bashrc
elif [ $1 == 'zsh' ]; then
sudo chsh -s /bin/zsh vagrant
echo 'source $(/tmp/beetest/beekeeper/bin/bootstrap) > /dev/null' > /home/vagrant/.zshenv
else
echo 'unknown shell'
exit 1
fi
|
mirceal/beekeeper
|
test/e2e/beetest.sh
|
Shell
|
apache-2.0
| 884 |
#!/bin/sh
#
# Copyright (c) 2003 Dan Nelson
# All rights reserved.
#
# Please see src/share/examples/etc/bsd-style-copyright.
#
# $FreeBSD: soc2013/dpl/head/usr.sbin/mtree/test/test02.sh 121841 2003-10-31 13:39:19Z phk $
#
set -e
TMP=/tmp/mtree.$$
rm -rf ${TMP}
mkdir -p ${TMP} ${TMP}/mr ${TMP}/mt
touch -t 199901020304 ${TMP}/mr/oldfile
touch ${TMP}/mt/oldfile
mtree -c -p ${TMP}/mr > ${TMP}/_
mtree -U -r -p ${TMP}/mt < ${TMP}/_ > /dev/null
x=x`(cd ${TMP}/mr ; ls -l 2>&1) || true`
y=x`(cd ${TMP}/mt ; ls -l 2>&1) || true`
if [ "$x" != "$y" ] ; then
echo "ERROR Update of mtime failed" 1>&2
rm -rf ${TMP}
exit 1
fi
rm -rf ${TMP}
exit 0
|
dplbsd/soc2013
|
head/usr.sbin/mtree/test/test02.sh
|
Shell
|
bsd-2-clause
| 652 |
#!/bin/sh -
# Copyright (c) 2013 Garrett Cooper
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
set -e
CURDIR=$(pwd)
ECHO=${ECHO:=echo}
tmpfile=$(mktemp osreldate.XXXXXXXX)
trap "rm -f $tmpfile" EXIT
${ECHO} creating osreldate.h from newvers.sh
export PARAMFILE="${PARAM_H:=$CURDIR/../sys/sys/param.h}"
set +e
. "${NEWVERS_SH:=$CURDIR/../sys/conf/newvers.sh}" || exit 1
set -e
cat > $tmpfile <<EOF
$COPYRIGHT
#ifdef _KERNEL
#error "<osreldate.h> cannot be used in the kernel, use <sys/param.h>"
#else
#undef __FreeBSD_version
#define __FreeBSD_version $RELDATE
#endif
EOF
chmod 644 $tmpfile
mv $tmpfile osreldate.h
|
jrobhoward/SCADAbase
|
include/mk-osreldate.sh
|
Shell
|
bsd-3-clause
| 1,875 |
#!/bin/bash
if [ $# != 1 ]
then
echo "Usage: $0 <Server-IP>"
exit 1
fi;
IP=$1
LOG_DIR=./log/`hostname`
mkdir -p $LOG_DIR
function clean_up {
# Perform program exit housekeeping
kill %1 %2 %3 %4 # kill any running child
exit
}
trap clean_up SIGINT SIGTERM SIGHUP
function run_test()
{
echo -e "\n# >> running test for $RUNTIME; LOG_FILE is: $LOG_DIR/$LOG_FILE\n"
echo "# $MSG" | tee -a $LOG_DIR/$LOG_FILE
./avner-test.sh $IP | tee -a $LOG_DIR/$LOG_FILE &
sleep $RUNTIME
kill %1
echo -e \\a #beep
}
#export VMA_PATH=/volt/avnerb/vma/libvma.so-4.5
#export VMA_PATH=/volt/avnerb/vma/libvma.so-nosend-norecv
#export VMA_PATH=/volt/avnerb/vma/libvma.so-nosocket
#export VMA_PATH=/volt/avnerb/vma/libvma.so-norecv
#export VMA_PATH=/volt/avnerb/vma/libvma.so-nosend
#export VMA_PATH=/volt/avnerb/vma/libvma.so-nosend-norecv-nobind
#export BASE_ARG="-c -t 4 -m 14"
export VMA_RX_OFFLOAD=0 VMA_UC_OFFLOAD=1
export BASE_ARG="-c -t 4 -m 14 --ping-pong --pps=max"
export VMA_PATH=/volt/avnerb/vma/libvma.so-4.5
RUNTIME=300s
LOG_FILE=0.eth_server_ucoff-0.pingmax.rx0ff-0.ucoff-1.server-vma-custom-mc-reply-uc-client-vma4.5
MSG="ETH Server-Command: VMA_IGMP=0 VMA_RX_OFFLOAD=1 VMA_UC_OFFLOAD=0 LD_PRELOAD=/volt/avnerb/vma/libvma.so-4.5 ./sockperf -s -i 226.8.8.8 --force_unicast_reply"
#run_test
export VMA_RX_OFFLOAD=1 VMA_UC_OFFLOAD=1
export BASE_ARG="-c -t 4 -m 14 --ping-pong --pps=max"
export VMA_PATH=/volt/avnerb/vma/libvma.so-4.5
RUNTIME=300s
LOG_FILE=1.eth_server_ucoff-0.pingmax.rx0ff-1.ucoff-1.server-vma-custom-mc-reply-uc-client-vma4.5
MSG="ETH Server-Command: VMA_IGMP=0 VMA_RX_OFFLOAD=1 VMA_UC_OFFLOAD=0 LD_PRELOAD=/volt/avnerb/vma/libvma.so-4.5 ./sockperf -s -i 226.8.8.8 --force_unicast_reply"
#run_test
export VMA_RX_OFFLOAD=0 VMA_UC_OFFLOAD=1
export BASE_ARG="-c -t 4 -m 14 --ping-pong --pps=max"
export VMA_PATH=/volt/avnerb/vma/libvma.so-norecv
RUNTIME=300s
LOG_FILE=2.eth_server_ucoff-0.pingmax.rx0ff-0.ucoff-1.server-vma-custom-mc-reply-uc-client-vma-norecv
MSG="ETH Server-Command: VMA_IGMP=0 VMA_RX_OFFLOAD=1 VMA_UC_OFFLOAD=0 LD_PRELOAD=/volt/avnerb/vma/libvma.so-4.5 ./sockperf -s -i 226.8.8.8 --force_unicast_reply"
#run_test
export VMA_RX_OFFLOAD=0 VMA_UC_OFFLOAD=1
export BASE_ARG="-c -t 4 -m 14 --ping-pong --pps=max"
export VMA_PATH=/volt/avnerb/vma/libvma.so-4.5
RUNTIME=300s
LOG_FILE=3.eth_server_ucoff-1.pingmax.rx0ff-0.ucoff-1.server-vma-custom-mc-reply-uc-client-vma4.5
MSG="ETH Server-Command: VMA_IGMP=0 VMA_RX_OFFLOAD=1 VMA_UC_OFFLOAD=1 LD_PRELOAD=/volt/avnerb/vma/libvma.so-4.5 ./sockperf -s -i 226.8.8.8 --force_unicast_reply"
run_test
export VMA_RX_OFFLOAD=1 VMA_UC_OFFLOAD=1
export BASE_ARG="-c -t 4 -m 14 --ping-pong --pps=max"
export VMA_PATH=/volt/avnerb/vma/libvma.so-4.5
RUNTIME=300s
LOG_FILE=4.eth_server_ucoff-1.pingmax.rx0ff-1.ucoff-1.server-vma-custom-mc-reply-uc-client-vma4.5
MSG="ETH Server-Command: VMA_IGMP=0 VMA_RX_OFFLOAD=1 VMA_UC_OFFLOAD=1 LD_PRELOAD=/volt/avnerb/vma/libvma.so-4.5 ./sockperf -s -i 226.8.8.8 --force_unicast_reply"
run_test
export VMA_RX_OFFLOAD=0 VMA_UC_OFFLOAD=1
export BASE_ARG="-c -t 4 -m 14 --ping-pong --pps=max"
export VMA_PATH=/volt/avnerb/vma/libvma.so-norecv
RUNTIME=300s
LOG_FILE=5.eth_server_ucoff-1.pingmax.rx0ff-0.ucoff-1.server-vma-custom-mc-reply-uc-client-vma-norecv
MSG="ETH Server-Command: VMA_IGMP=0 VMA_RX_OFFLOAD=1 VMA_UC_OFFLOAD=1 LD_PRELOAD=/volt/avnerb/vma/libvma.so-4.5 ./sockperf -s -i 226.8.8.8 --force_unicast_reply"
run_test
|
shekkbuilder/sockperf
|
tests/avner-master-test.sh
|
Shell
|
bsd-3-clause
| 3,461 |
#!/bin/sh
#
# VieleRETS upgrade to 1.1.6 script for MAC OSX, Unix and Linux
#
mkdir logs
chmod 777 logs
|
dkullmann/vielerets
|
upgrade.sh
|
Shell
|
mit
| 108 |
#
# AUR aliases
#
# download and build AUR package
aurb() {
git clone https://aur.archlinux.org/${1}.git && cd ${1} && makepkg --clean --install --syncdeps
}
# only download aur package; do not build
aurd() {
git clone https://aur.archlinux.org/${1}.git
}
# remove old package, rebuild, and install.
#NOTE: this is will remove any unstashed/uncommitted changes.
# due to how makepkg will update the PKGBUILD, a git pull alone will not suffice.
auru() {
git reset HEAD --hard && git pull && makepkg --clean --force --install --syncdeps --cleanbuild
}
|
Eriner/zim
|
modules/pacman/helper_aur.zsh
|
Shell
|
mit
| 565 |
#!/bin/bash
transitd-cli --set gateway.enabled=yes
/start.cli.sh
|
transitd/transitd
|
docker/start.gateway.cli.sh
|
Shell
|
mit
| 65 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.