code
stringlengths 2
1.05M
| repo_name
stringlengths 5
110
| path
stringlengths 3
922
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 2
1.05M
|
---|---|---|---|---|---|
#!/bin/sh
# Set the desired state (on|off) for ILTO 400M.
# See usage bellow
STATUS_FILE="/tmp/ILTO400M.status"
if [ -f $STATUS_FILE ]; then
mode=$(cat $STATUS_FILE | sed -n 's/.\?mode\ \?=\ \?\(manual\|auto\)/\1/p')
state=$(cat $STATUS_FILE | sed -n 's/.\?state\ \?=\ \?\(on\|off\)/\1/p')
else
echo "Error: can't read $STATUS_FILE. does it exist?" >&2
echo "Creating empty $STATUS_FILE."
touch $STATUS_FILE
exit 1
fi
case "$1$2" in
on|onauto|autoon)
if ! [ "$mode" = "manual" ]; then
(echo `date`; echo "mode=auto"; echo state="on") > $STATUS_FILE
fi
;;
off|offauto|autooff)
if ! [ "$mode" = "manual" ]; then
(echo `date`; echo "mode=auto"; echo state="off") > $STATUS_FILE
fi
;;
onmanual|manualon)
(echo `date`; echo "mode=manual"; echo state="on") > $STATUS_FILE
;;
offmanual|manualoff)
(echo `date`; echo "mode=manual"; echo state="off") > $STATUS_FILE
;;
onswitch|switchon|offswitch|switchoff|switch)
(echo `date`; echo "mode=auto"; echo "state=${state}") > $STATUS_FILE
;;
*)
(echo "Usage: set_status_ILT400M.sh on|off [auto|manual|switch]"
echo "\"manual\" has higher priority and it always overwrites the state."
echo "\"auto\" cann't reset the state if it was set in \"manual\" mode."
echo "\"switch\" does not reset the state. it sets the mode to \"auto\"."
echo "Use cron daemon or other scripts to call this scripts."
echo "GMT time is 2 hrs (7200s) behind the local time."
) >&2
result="1"
;;
esac
exit $result
|
vkonst/tele-dacha
|
toledo/usr/bin/set_status_ILTO400M.sh
|
Shell
|
gpl-3.0
| 1,584 |
docker build -t henfri/owfs ./
|
henfri/docker
|
knx/owfs/build.sh
|
Shell
|
gpl-3.0
| 31 |
#!/bin/sh
#
# get filesize, last modified and number of points for each LAS/LAZ file
#
CSV=/home/institut/rawdata/maintenance/scripts/als/logs/list_las_files.csv
echo "path;bytes;lastmod;point_type;points" > $CSV
for LASPATH in `find /home/rawdata/ -wholename "*/las/*.la[sz]" | grep -v "/raw/"`
do
# set LAS/LAZ name and meta directory
LASNAME=`basename $LASPATH`
METADIR=`dirname $LASPATH | sed s/las$/meta/`
# preset points and point type
POINTS=-
PTYPE=-
# get size and last modified date of file
BYTES_LASTMOD=`stat --printf "%s;%y" $LASPATH`
# get number of points from lasinfo output file if any
if [ -f "$METADIR/$LASNAME.info.txt" ]; then
# try to get "regular" number of points
POINTS=`grep "^ number of point records:" $METADIR/$LASNAME.info.txt | awk '{print $5}'`
if [ $POINTS -eq 0 ]; then
# try to get "extended" number of points
POINTS=`grep "^ extended number of point records:" $METADIR/$LASNAME.info.txt | awk '{print $6}'`
if [ -z $POINTS ]; then
echo "WARNING: no point count for $LASPATH"
else
PTYPE=extended
fi
else
PTYPE=regular
fi
else
echo "WARNING: $METADIR/$LASNAME.info.txt not found"
fi
# write attributes to CSV
echo "$LASPATH;$BYTES_LASTMOD;$PTYPE;$POINTS" >> $CSV
done
echo
echo "created $CSV"
|
openwebcc/ba
|
maintenance/list_las_files.sh
|
Shell
|
gpl-3.0
| 1,444 |
!Acessando o modo exec user
enable
!Acessar modo de configuração global
configure terminal
!Habilitando o recurso de roteamento IPv6 Unicast Global
ipv6 unicast-routing
!Criando a interface virtual 10
interface GigabitEthernet 0/0.10
!Configurando o endereçamento IPv6 Unicast Global
ipv6 address 2001:DB8:ACAD:10::1/64
!Configurando o endereçamento IPv6 Link Local
ipv6 address FE80::1 link-local
!Saindo da configuração da interface
exit
!Criando a interface virtual 20
interface GigabitEthernet 0/1.20
!Configurando o endereçamento IPv6 Unicast Global
ipv6 address 2001:DB8:ACAD:20::1/64
!Configurando o endereçamento IPv6 Link Local
ipv6 address FE80::1 link-local
!Saindo de todos os níveis
end
!Salvando as configurações
copy running-config startup-config
!Comandos para visualização das informações:
!Informações sobre as configuração da RAM
show running-config
!Informações sobre endereçamento IP e configurações das interfaces resumida
show ipv6 interface brief
!Informações detalhadas da interface
show interface GigabitEthernet 0/0.10
show interface GigabitEthernet 0/0.20
!Verificando a tabela de roteamente local
show ipv6 router
|
vaamonde/netacad
|
modulo-02/capitulo-04/09-Router-interfaces-ipv6.sh
|
Shell
|
gpl-3.0
| 1,299 |
#!/bin/sh
startdir="$1"
[ -n "$startdir" ] && [ -d "$startdir" ] || exit 1
cd "$(dirname "$0")"
HDIR="$startdir/liveenv"
PKGSDIR="$startdir/PKGS"
RDIR="$HDIR"/root
doinst="$HDIR"/doinst
modtxt="$HDIR"/MODIFICATIONS
cat <<EOF >> "$modtxt"
user-settings-slint
------------------
- user 'one' with common user-settings for Xfce
EOF
USERSETTINGS_SLINT="$(readlink -f "$(ls -1 "$PKGSDIR"/user-settings-slint-[0-9]*.txz 2>/dev/null)")"
if [ -n "$USERSETTINGS_SLINT" ]; then
tar -C "$RDIR" -xf "$USERSETTINGS_SLINT" etc/skel
else
echo "Cannot add $0 because package user-settings-slint is missing in PKGS." >&2
exit 1
fi
cp -r "$RDIR"/etc/skel/* "$RDIR"/etc/skel/.??* "$RDIR"/home/one/ 2>/dev/null
|
djemos/slint-scripts
|
liveenv/modules/30-user-settings-slint/add.sh
|
Shell
|
gpl-3.0
| 701 |
#!/bin/bash
#qNoise: A generator of non-Gaussian colored noise
#Copyright © 2021, Juan Ignacio Deza
#email: [email protected]
# Licence
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
ERROR=0
H=0.01
N=$1
if [ "$N" = "" ]; then
N=10
fi
tau=0.1
q=0.1
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_'"$q"'_'"$N"'.txt'
ERROR=$(($ERROR+$?))
q=0.2
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_'"$q"'_'"$N"'.txt'
ERROR=$(($ERROR+$?))
q=0.3
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_'"$q"'_'"$N"'.txt'
ERROR=$(($ERROR+$?))
q=0.4
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_'"$q"'_'"$N"'.txt'
ERROR=$(($ERROR+$?))
q=0.5
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_'"$q"'_'"$N"'.txt'
ERROR=$(($ERROR+$?))
q=0.6
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_'"$q"'_'"$N"'.txt'
ERROR=$(($ERROR+$?))
q=0.7
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_'"$q"'_'"$N"'.txt'
ERROR=$(($ERROR+$?))
q=0.8
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_'"$q"'_'"$N"'.txt'
ERROR=$(($ERROR+$?))
q=0.9
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_'"$q"'_'"$N"'.txt'
ERROR=$(($ERROR+$?))
q=1.0
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_1_'"$N"'.txt'
ERROR=$(($ERROR+$?))
q=1.1
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_'"$q"'_'"$N"'.txt'
ERROR=$(($ERROR+$?))
q=1.2
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_'"$q"'_'"$N"'.txt'
ERROR=$(($ERROR+$?))
q=1.3
echo "-----------------------------------------"
echo "tau=$tau; H=$H; q=$q; N=$N"
echo "-----------------------------------------"
./test_qNoise $tau $H $q $N
ERROR=$(($ERROR+$?))
cat 'qNoise_'"$tau"'_'"$H"'_'"$q"'_'"$N"'.txt'
ERROR=$(($ERROR+$?))
|
ignaciodeza/qNoise
|
test/runTest.sh
|
Shell
|
gpl-3.0
| 4,187 |
#!/bin/sh
MYSQLROOTPWD="${$1:root}"
f () {
cd /root
curl -s https://raw.githubusercontent.com/openemr/openemr-devops/master/packages/lightsail/launch.sh | bash -s -- -s 0
docker exec -it $(docker ps | grep mysql | cut -f 1 -d " ") mysql --password="$MYSQLROOTPWD" -e "update openemr.users set active=0 where id=1;"
cp openemr-devops/packages/express/ami/ami-rekey.sh /etc/init.d/ami-rekey
update-rc.d ami-rekey defaults
rm /root/.ssh/authorized_keys
rm /home/ubuntu/.ssh/authorized_keys
#rm /home/ubuntu/.bash_history
sync
shutdown -h now
exit 0
}
f
echo failure?
exit 1
|
openemr/openemr-devops
|
packages/express/ami/ami-config.sh
|
Shell
|
gpl-3.0
| 621 |
#!/bin/bash
create()
{
crte=./create/
isrt=./insert/
dbuser=release
dbname=release
#users and groups
psql -U $dbuser -d $dbname -f ${crte}groups.sql
psql -U $dbuser -d $dbname -f ${crte}users.sql
psql -U $dbuser -d $dbname -f ${crte}groups_users.sql
#psql -U $dbuser -d $dbname -f ${insert}admin.sql
# app configuration
psql -U $dbuser -d $dbname -f ${crte}configurations.sql
# formats for responses
psql -U $dbuser -d $dbname -f ${crte}formats.sql
psql -U $dbuser -d $dbname -f ${isrt}formats.sql
# formats for responses
psql -U $dbuser -d $dbname -f ${crte}corpuses.sql
psql -U $dbuser -d $dbname -f ${isrt}corpuses.sql
# accounts for request
psql -U $dbuser -d $dbname -f ${crte}accounts.sql
#visiblis supported languages
psql -U $dbuser -d $dbname -f ${crte}languages.sql
psql -U $dbuser -d $dbname -f ${isrt}languages.sql
#categories for semantic request (url title text)
psql -U $dbuser -d $dbname -f ${crte}categories.sql
psql -U $dbuser -d $dbname -f ${isrt}categories.sql
#keyword send by the api
psql -U $dbuser -d $dbname -f ${crte}keywords.sql
#the semantic request
psql -U $dbuser -d $dbname -f ${crte}semantic_requests.sql
#the results for keyword to a request
psql -U $dbuser -d $dbname -f ${crte}keyword_link_requests.sql
#some metrics send by the api
psql -U $dbuser -d $dbname -f ${crte}semantic_responses.sql
#if new install (to do 0.1->0.2)
#Request for command & http status code for doc http codes
psql -U $dbuser -d $dbname -f ${crte}notification_texts.sql
psql -U $dbuser -d $dbname -f ${crte}notifications.sql
#Request for command & http status code for doc http codes
psql -U $dbuser -d $dbname -f ${crte}request_for_comments.sql
psql -U $dbuser -d $dbname -f ${isrt}request_for_comments.sql
psql -U $dbuser -d $dbname -f ${crte}http_status_codes.sql
psql -U $dbuser -d $dbname -f ${isrt}http_status_codes.sql
psql -U $dbuser -d $dbname -f ${crte}cocoon_categories.sql
psql -U $dbuser -d $dbname -f ${isrt}cocoon_categories.sql
psql -U $dbuser -d $dbname -f ${crte}semantic_cocoons.sql
psql -U $dbuser -d $dbname -f ${crte}queue_elements.sql
psql -U $dbuser -d $dbname -f ${crte}semantic_cocoon_responses.sql
psql -U $dbuser -d $dbname -f ${crte}semantic_cocoon_links.sql
psql -U $dbuser -d $dbname -f ${crte}semantic_cocoon_urls.sql
#if new install (to do 0.2->0.3)
}
create
|
WildTurtles/illumination
|
scripts/sql/0.0.0/db.sh
|
Shell
|
gpl-3.0
| 2,325 |
#!/bin/sh
# Makes the MLV App icon from scratch, you must have Blender for this to work
# Remove old stuff
rm icon.icns; rm icon.png;
# Render the icon. THIS WILL TAKE MINUTES.
/Applications/Blender.app/Contents/MacOS/blender -b icon.blend -o icon# -f 1;
# NOTE: I have left the blend file set on CPU render, but switch to GPU if you have an NVIDIA card
# QyK REnAM3
mv icon1.png icon.png;
# Run png2icns
./png2icns.sh icon.png icon.icns;
|
ilia3101/MLV-App
|
src/icon/makeicon.sh
|
Shell
|
gpl-3.0
| 443 |
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# If this scripted is run out of /usr/bin or some other system bin directory
# it should be linked to and not copied. Things like java jar files are found
# relative to the canonical path of this script.
#
# USE the trap if you need to also do manual cleanup after the service is stopped,
# or need to start multiple services in the one container
# Set environment variables.
JETTY_PREFIX=${JETTY_PREFIX:-/opt/jetty}
JETTY_HTTP_PORT=${JETTY_HTTP_PORT:-5601}
# Show environment variables.
echo "JETTY_PREFIX=${JETTY_PREFIX}"
echo "JETTY_HTTP_PORT=${JETTY_HTTP_PORT}"
# Start function
function start() {
# Change http port.
sed -e "s/^# jetty.http.port=.*/jetty.http.port=${JETTY_HTTP_PORT}/" ${JETTY_PREFIX}/start.ini > ${JETTY_PREFIX}/start.ini.tmp
mv ${JETTY_PREFIX}/start.ini.tmp ${JETTY_PREFIX}/start.ini
# Start Jetty.
${JETTY_PREFIX}/bin/jetty.sh start
}
trap "docker-stop.sh; exit 1" TERM KILL INT QUIT
# Start
start
# Start infinitive loop
while true
do
tail -F /dev/null & wait ${!}
done
|
bigcontainer/bigcont
|
banana/docker-run.sh
|
Shell
|
gpl-3.0
| 1,824 |
#!/bin/bash
## Variables
CODE=$(bash <(cat /etc/os-release; echo 'echo ${UBUNTU_CODENAME/*, /}'))
## Shell tweaks
bash common/shell-tweaks.sh
## Packages and updates
# Atom editor repo
sudo add-apt-repository --yes ppa:webupd8team/atom
# Nextcloud client repo
sudo apt-add-repository --yes ppa:nextcloud-devs/client
# Node.js repo
curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -
# Noobslab theme repos
sudo add-apt-repository --yes ppa:noobslab/themes
sudo add-apt-repository --yes ppa:snwh/pulp
# Opera repo
echo 'deb https://deb.opera.com/opera-stable/ stable non-free' | sudo tee -a /etc/apt/sources.list.d/opera-stable.list
wget -qO - https://deb.opera.com/archive.key | sudo apt-key add -
# Runescape Unix Client repo
sudo add-apt-repository --yes ppa:hikariknight/unix-runescape-client
# Sublime Text 3 repo
echo "deb https://download.sublimetext.com/ apt/stable/" | sudo tee /etc/apt/sources.list.d/sublime-text.list
wget -qO - https://download.sublimetext.com/sublimehq-pub.gpg | sudo apt-key add -
# Virtualbox repo
echo "deb http://download.virtualbox.org/virtualbox/debian $CODE contrib" | sudo tee -a /etc/apt/sources.list.d/virtualbox.list
wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -
# Vivaldi repo
echo 'deb http://repo.vivaldi.com/stable/deb/ stable main' | sudo tee -a /etc/apt/sources.list.d/vivaldi.list
wget -qO - http://repo.vivaldi.com/stable/linux_signing_key.pub | sudo apt-key add -
# Update our repositories and make sure the system is up-to-date
sudo apt update && sudo apt --yes upgrade
# install our packages
sudo apt -y install \
audacity \
arc-theme \
atom \
chromium-browser \
cinnamon-desktop-environment \
clementine \
clusterssh \
composer \
dark-aurora \
dkms \
filezilla \
fonts-roboto \
gcc \
gimp \
git \
gnome-shell \
gnome-tweak-tool \
golang \
guvcview \
nextcloud-client \
nmap \
nodejs \
openjdk-8-jre \
openjdk-8-jdk \
opera-stable \
paper-cursor-theme \
paper-gtk-theme \
paper-icon-theme \
php7.0-cli \
php7.0-ldap \
php7.0-mbstring \
php7.0-mysql \
php7.0-pgsql \
python-pip \
python3 \
python3-pip \
remmina \
ruby \
ruby-dev \
rustc \
sqlitebrowser \
steam \
sublime-text \
synapse \
thunderbird \
transmission \
unix-runescape-client \
vim \
virt-manager \
virtualbox-5.1 \
vivaldi-stable \
vlc \
wireshark \
xclip \
zlib1g-dev
## Install extra, language-specific packages
bash common/extra-packages.sh
## Set up my git directory
bash common/git-setup.sh
## Other system tweaks
# set systemd to use local timezone
sudo timedatectl set-local-rtc 1
# Remove any uneeded packages
sudo apt autoremove
|
Ascendings/startup-scripts
|
ubuntu-setup.sh
|
Shell
|
mpl-2.0
| 2,846 |
#!/bin/bash
# Usage:
## 1 Modify the string of the variables TRIGGERED_COMMAND and then run the
## the following commands to trigger the commands every 5 minutes.
## 2. crontab -e
## 3. */5 * * * * path/to/opencog/docker/clone/buildbot/cronjobs.sh ~
# Environment Variables
## Name of script.
SELF_NAME=$(basename $0)
## Path to a repo clone
CLONE_PATH=""
## Path from which the script is invoked
CWD=$(pwd)
## File name where the output of this script is logged to. It would be in the
## same directory from which the script is invoked.
LOG_FILE="$SELF_NAME.log"
# Functions
## Check if the given repo has been cloned to the `cronjobs` directory. If it
## hasn't then makes a shallow clone from github into `cronjobs` directory.
## $1 : github opencog repository name. For eg. ocpkg for opencog/ocpkg repo.
set_workspace(){
CLONE_PATH=$WORKSPACE/cronjobs/$1
REPO_URL=https://github.com/opencog/$1.git
if [ -d $CLONE_PATH ]; then
printf "Changing directory to %s \n" "$CLONE_PATH"
cd $CLONE_PATH
if [ "$(git rev-parse --is-inside-work-tree)" == true ] ; then
printf "%s contains a git repository \n" "$CLONE_PATH"
# Just b/c it is named as the repo doesn't mean it has the given
# repo
REMOTE_ORIGIN="https:$(git remote show origin \
| grep -i "Fetch URL" | cut -d ":" -f 3)"
if [ $REMOTE_ORIGIN == $REPO_URL ]; then
printf "%s already cloned to %s \n" "$REPO_URL" "$CLONE_PATH"
else
printf "The repository in %s is not from %s \n" \
"$CLONE_PATH" "$REPO_URL"
fi
else
printf "%s does not contain a git repository \n" "$CLONE_PATH"
cd -
rm -rf $CLONE_PATH
printf "cloning %s to %s \n" "$1" "$CLONE_PATH"
git clone --depth 1 $REPO_URL $CLONE_PATH
cd $CWD
fi
else
printf "cloning %s to %s \n" "$1" "$CLONE_PATH"
git clone --depth 1 $REPO_URL $CLONE_PATH
cd $CWD
fi
}
## Check if the given repo's remote master has changed. If it has run the given
## command.
## $1 : github opencog repository name. For eg. ocpkg for opencog/ocpkg repo.
## $2 : a string of the command to be triggered for the repo.
trigger_command(){
# If the workspace haven't been set, set it
set_workspace $1
cd $CLONE_PATH
#REPO_NAME="$(basename \"$(git rev-parse --show-toplevel)\")"
# fetch origin/upstream depending on the repo being dealt with
git fetch origin
ORIGIN_MASTER_HEAD=$(git log -1 --pretty="%H" origin/master)
# Only check the state of the master branch
git stash; git checkout master
CURRENT_HEAD=$(git log -1 --pretty="%H")
if [ $ORIGIN_MASTER_HEAD != $CURRENT_HEAD ] ; then
# Trigger the command
eval $2
# Log every trigger
printf "
********************************************************
%s repository: triggered on orgin/master commit-hash = %s
******************************************************** \n" \
"$1" "$ORIGIN_MASTER_HEAD"
# update the origin
git pull origin
else
printf "Did nothing b/c their hasn't been any change to %s repo \n" "$1"
fi
printf ">>>> %s repository: completed \n\n\n" "$1"
cd $CWD
}
# Main Execution
## Redirect all stdout and stderr outputs to the $LOG_FILE.
exec &>>$LOG_FILE
printf "%s [%s] Starting ----------------------\n" "$(date)" "$SELF_NAME"
## Convert the argument passed to an abslute path
cd $1
WORKSPACE=$(pwd)
cd $CWD
## prevent the scripts from running unless a path is passed to it.
if [ -z $1 ]; then
printf "No argument passed \n"
cd $CWD
printf "%s [%s] Finished ----------------------\n" "$(date)" "$SELF_NAME"
exit 1
fi
## check if the directory given fits for a workspace. If it isn't a git
## repostiroy then it fits.
cd $WORKSPACE
if [ "$(git rev-parse --is-inside-work-tree)" == true ] ; then
cd $WORKSPACE
printf "%s contains a git repository. Use another directory \n" "$(pwd)"
cd $CWD
printf "%s [%s] Finished ----------------------\n" "$(date)" "$SELF_NAME"
exit 1
fi
## Command to be run when condition are met. Modify it to suit your case.
## For updating docker images go to the `Build Settings` tab on hub.docker.com
## and use one of the curl `Build Trigger` options listed in the examples.
## For opencog/opencog-deps docker image an example trigger command is,
## curl -H "Content-Type: application/json" --data '{"build": true}' -X POST https://registry.hub.docker.com/u/opencog/opencog-deps/trigger/45dfbf0e-9412-4c6b-b3fd-a864e92ee9f6/
## For opencog/opencog-deps docker image
TRIGGERED_COMMAND='echo replace with the command for ocpkg repo'
trigger_command ocpkg "$TRIGGERED_COMMAND"
## For opencog/cogutils docker image
TRIGGERED_COMMAND="echo replace with the command for cogutils repo"
trigger_command cogutils "$TRIGGERED_COMMAND"
## For opencog/opencog-dev:cli docker image
TRIGGERED_COMMAND="echo replace with the command for atomspace repo"
trigger_command atomspace "$TRIGGERED_COMMAND"
printf "%s [%s] Finished ----------------------\n\n" "$(date)" "$SELF_NAME"
exit 0
|
linas/opencog-docker
|
buildbot/cronjobs.sh
|
Shell
|
agpl-3.0
| 5,285 |
#!/bin/sh
tar -zxf cplex.tar.gz -C /
make clean
make tour
|
yuguang/tsp
|
setup.sh
|
Shell
|
agpl-3.0
| 58 |
#!/bin/bash
echo -e "Sample\tRead pairs in millions\tRead pairs after QC\t% Read pairs retained\t% Read Aligned\tgenome coverage at >= 1x\tgenome coverage at >= 2x\tgenome coverage at >= 5x\tgenome coverage at >= 10x\tgenome coverage at >= 15x\tBases not covered\tAverage depth\tInsert Size Range" > QC_final_complete.txt
#COUNTER=0
line=$1
name=$(echo $line | sed 's/:.*//g')
min_ins=$(echo $line | sed 's/:/\t/g'| awk '{print $2}')
max_ins=$(echo $line | sed 's/:/\t/g'| awk '{print $3}')
#if [ ! -d "$name" ]; then
# echo "Folder: $name not found!"
# exit 1
#fi
mkdir $name\_QC_complete
cd $name\_QC_complete
#cp ../$name*.gz .
ln -s ../$name*R1*fastq.gz Read1.gz
ln -s ../$name*R2*fastq.gz Read2.gz
#ln -s `ls | grep '.*R1.*.fastq.gz$'` Read1.gz
#ln -s `ls | grep '.*R2.*.fastq.gz$'` Read2.gz
#sed -i "s/\(MAX_INSERT_SIZE=\).*/\\1$max_ins/" $QCBIN/complete_qc.bpipe
#sed -i "s/\(MIN_INSERT_SIZE=\).*/\\1$min_ins/" $QCBIN/complete_qc.bpipe
$QCBIN/bpipe/bin/bpipe run $QCBIN/complete_qc.bpipe > log
rea1=`grep -n '^File.*Read1.gz$' log | sed 's/:/ /' | awk '{print $1}'`
rea11=`expr $rea1 + 15`
rea2=`grep -n '^File.*Read2.gz$' log | sed 's/:/ /' | awk '{print $1}'`
rea21=`expr $rea2 + 15`
afrea1=`grep -n '^File.*Read1.gz.qtrim' log | sed 's/:/ /' | awk '{print $1}'`
afrea11=`expr $afrea1 + 15`
afrea2=`grep -n '^File.*Read1.gz.2.qtrim' log | sed 's/:/ /' | awk '{print $1}'`
afrea21=`expr $afrea2 + 15`
afrea3=`grep -n '^File.*Read1.gz.3.qtrim' log | sed 's/:/ /' | awk '{print $1}'`
afrea31=`expr $afrea3 + 15`
align=`grep 'overall alignment rate' log | awk '{print $1}'`
g1=`grep 'genome coverage at >= 1x' log | sed 's/.*:/:/' | awk '{print $2}'`
g2=`grep 'genome coverage at >=2 x' log | sed 's/.*:/:/' | awk '{print $2}'`
g5=`grep 'genome coverage at >= 5x' log | sed 's/.*://' | awk '{print $1}'`
g10=`grep 'genome coverage at >= 10x' log | sed 's/.*:/:/' | awk '{print $2}'`
g15=`grep 'genome coverage at >= 15x' log | sed 's/.*:/:/' | awk '{print $2}'`
gbsn=`grep 'bases not covered' log | sed 's/.*:/:/' | awk '{print $2}'`
avg_dep=`grep 'Avg depth' log | sed 's/.*:/:/' | awk '{print $2}'`
head -$rea11 log | tail -15 | sed 's/\(.*\):.*/\1/' > filewa
head -$rea11 log | tail -15 | sed 's/^/\t/' | sed 's/.*:\(.*\)/\1/' > file1
head -$rea21 log | tail -15 | sed 's/^/\t/' | sed 's/.*:\(.*\)/\1/' > file2
head -$afrea11 log | tail -15 | sed 's/^/\t/' | sed 's/.*:\(.*\)/\1/' > file3
head -$afrea21 log | tail -15 | sed 's/^/\t/' | sed 's/.*:\(.*\)/\1/' > file4
head -$afrea31 log | tail -15 | sed 's/^/\t/' | sed 's/.*:\(.*\)/\1/' > file5
paste filewa file1 file2 file3 file4 file5 > file
echo -e "$name\tRead1\tRead2\tRead1 after QC\tRead2 after QC\tSingleton" >> detailed_qc_complete.txt
cat file >> detailed_qc_complete.txt
cp detailed_qc_complete.txt ../$name\_detailed_qc_complete.txt
rm file*
p1=`head -3 detailed_qc_complete.txt | tail -1 | awk '{print $3}'`
p2=`head -3 detailed_qc_complete.txt | tail -1 | awk '{print $5}'`
#COUNTER=$(expr $COUNTER '+' 1)
#echo -e "$COUNTER \n"
#rm ./*.fastq.gz ./Read*.gz
#gzip Read1.gz.qtrim
#gzip Read1.gz.2.qtrim
#gzip Read1.gz.3.qtrim
cic=`echo "$p2 * 100 / $p1 " | bc -l`
p3=`echo "scale=2;$cic * 10/10 " | bc -l`
ne1=`echo "scale=3;$p1 / 1000000 " | bc -l`
ne2=`echo "scale=3;$p2 / 1000000 " | bc -l`
echo -e "$name\t$ne1\t$ne2\t$p3\t$align\t$g1\t$g2\t$g5\t$g10\t$g15\t$gbsn\t$avg_dep\t$min_ins-$max_ins" >> ../QC_final_complete.txt
cd ../
|
CEG-ICRISAT/NGS-QCbox
|
qcbin/batchqc_complete.sh
|
Shell
|
agpl-3.0
| 3,416 |
#!/bin/bash -e
source "$HOME/.bashrc"
cd "$HOME/workspace/factotum/"
(cd tokyo ; bash startup.sh)
MEM="-Xms2g -Xmx2g"
GC="-XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:CMSInitiatingOccupancyFraction=70"
export MAVEN_OPTS="-ea -esa -server -XX:+DoEscapeAnalysis $MEM $GC"
mvn "-Djetty.port=4277" "-Dfactotum.mode=$1" jetty:run
|
dnikulin/factotum
|
scripts/startup.sh
|
Shell
|
agpl-3.0
| 326 |
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED=ON -DCMAKE_INSTALL_PREFIX=/usr/local -DBUILD_TESTS=ON ../../
|
semerlin/tlog
|
build/release/configure.sh
|
Shell
|
lgpl-2.1
| 109 |
#!/bin/sh
uniset2-start.sh -f ./uniset2-rtuexchange --confile test.xml \
--smemory-id SharedMemory \
--rs-dev /dev/cbsideA0 \
--rs-name RTUExchange \
--rs-speed 115200 \
--rs-filter-field rs \
--rs-filter-value 4 \
--dlog-add-levels info,crit,warn,level4,level3 \
--rs-force 0 \
--rs-force-out 0 \
--rs-polltime 500 \
--rs-set-prop-prefix \
#,level3
# --rs-force 1 \
|
Etersoft/uniset2
|
extensions/ModbusMaster/start_fg_mbrtu.sh
|
Shell
|
lgpl-2.1
| 380 |
#!/bin/bash
lt_per_a_comparar="resultats/lt/diccionari.txt"
#lt_per_a_comparar="/home/jaume/github/spanish-dict-tools/tmp/mots-no-trobats.txt"
#lt_per_a_comparar="other-dicts/tagger-2/diccionari.txt"
#lt_per_a_comparar="other-dicts/tagger-2/verbs.txt"
dir_programes="test-lt-fdic-lt"
dir_intermedi="test-lt-fdic-lt/intermedi"
dir_resultat="resultats/test-lt-fdic-lt"
mkdir $dir_intermedi
echo "Separant i ordenant diccionari LT"
perl $dir_programes/separa-reordena-lt.pl $lt_per_a_comparar $dir_intermedi
for i in $dir_intermedi/ordenats-*.txt
do
export LC_ALL=C && sort $i -o $i
done
echo "Adjectius: de LT a FDIC"
perl lt-to-fdic/lt-to-fdic.pl adjectius $dir_intermedi
echo "Noms: de LT a FDIC"
perl lt-to-fdic/lt-to-fdic.pl noms $dir_intermedi
echo "Verbs: de LT a FDIC"
mkdir $dir_intermedi/models-verbals
rm $dir_intermedi/models-verbals/*
perl lt-to-fdic/extrau-verbs-i-models.pl lt-to-fdic $dir_intermedi
#exit
echo "Adjectius: de FDIC a LT..."
perl fdic-to-lt/flexiona.pl $dir_intermedi/noms-fdic.txt $dir_intermedi/noms-lt.txt
echo "Noms: de FDIC a LT..."
perl fdic-to-lt/flexiona.pl $dir_intermedi/adjectius-fdic.txt $dir_intermedi/adjectius-lt.txt
echo "Verbs: de FDIC a LT..."
perl fdic-to-lt/conjuga-verbs.pl $dir_intermedi/verbs-fdic.txt $dir_intermedi/verbs-lt.txt $dir_intermedi/models-verbals/
echo "Comprovant diferències"
echo "*** DIFERÈNCIES ***" > $dir_resultat/diff.txt
for i in noms adjectius verbs
do
echo "** Compara $i **" >> $dir_resultat/diff.txt
export LC_ALL=C && sort $dir_intermedi/$i.txt -o $dir_intermedi/$i.txt
export LC_ALL=C && sort $dir_intermedi/$i-lt.txt -o $dir_intermedi/$i-lt.txt
diff $dir_intermedi/$i.txt $dir_intermedi/$i-lt.txt >> $dir_resultat/diff.txt
done
echo "** Altres errors **" >> $dir_resultat/diff.txt
grep "#" $lt_per_a_comparar >> $dir_resultat/diff.txt
grep "ERROR" $lt_per_a_comparar >> $dir_resultat/diff.txt
#rm -rf $dir_intermedi
echo "Fet! Resultats en $dir_resultat"
|
jaumeortola/spanish-dict-tools
|
make-test-lt-fdic.sh
|
Shell
|
lgpl-2.1
| 1,964 |
#!/bin/bash
if [ $# -lt 2 ]; then
echo "usage: $0 <data folder> <order file>"
echo
echo "data folder: should contain separate files, each containgin a"
echo " request/response pair in wireshark \"follow tcp stream\" format."
echo " files must be named 'x_action' where x is a number determining the order"
echo " of request and action is the soapAction found in the reqeust header, e.g. 1_logOn"
echo
echo "order file: a file containing a number with no new line."
echo " this number is used use to determine which file to use in the data folder."
echo
echo "hint: use script with socat to act as a http server. the script"
echo " will listen for requests on stdin and send a responses on stdout"
echo
echo "example: socat TCP4-LISTEN:8080,fork,tcpwrap=script EXEC:\"$0 logOn logon.order\""
echo
echo " test with: telnet localhost 8080"
echo
exit 1
fi
VERBOSE=1
debug()
{
if [ $VERBOSE = 1 ]; then
echo "$1" >&2
fi
}
debug ""
debug "PID:$$ Waiting for request"
soapaction=""
read -r -t 1
contentlength=0
while [ $? -eq 0 ] && [ ${#REPLY} -ne 1 ]; do
debug "PID:$$ Request: $REPLY"
# If we get a soapaction, store it
case $REPLY in
SOAPAction*)
soapaction="$(echo $REPLY | cut -d ':' -f 2 | tr -d "\r\n\" ")"
;;
Content-Length*)
contentlength="$(echo $REPLY | cut -d ':' -f 2 | tr -d "\r\n\" ")"
;;
esac
HEADERS="${HEADERS}\r\n${REPLY}"
read -r -t 1
done
debug "PID:$$ Reading $contentlength bytes of request body $REPLY"
# read body char by char
while [ $contentlength -gt 0 ]; do
read -r -n 1 -t 1 || break
BODY="${BODY}${REPLY}"
contentlength=$(($contentlength - 1))
done
debug "PID:$$ Request-body: $BODY"
debug ""
debug "PID:$$ Request for \"$soapaction\" complete"
if [ "$soapaction" == "" ]; then
debug "PID:$$ Empty soapaction, cannot respond"
exit
fi
# internal server error response
internalServerError="HTTP/1.1 500 Internal Service Error
Date: Fri, 05 Aug 2011 08:32:58 GMT
Server: Apache/2.2.9 (Debian) PHP/5.2.6-1+lenny13 with Suhosin-Patch mod_ssl/2.2.9 OpenSSL/0.9.8g mod_perl/2.0.4 P
erl/v5.10.0
X-Powered-By: PHP/5.2.6-1+lenny13
Cache-Control: no-store, no-cache, must-revalidate, post-check=0, pre-check=0
Pragma: no-cache
Content-Length: 411
Connection: close
Content-Type: text/xml; charset=utf-8
<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV=\"http://schemas.xmlsoap.org/soap/envelope/\" xmlns:ns1=\"http://www.daisy.org/ns/daisy-online/\"><SOAP-ENV:Body><SOAP-ENV:Fault><faultcode>SOAP-ENV:Server</faultcode><faultstring>Internal Server Error</faultstring><faultactor></faultactor><detail><ns1:internalServerErrorFault/></detail></SOAP-ENV:Fault></SOAP-ENV:Body></SOAP-ENV:Envelope>"
# find response file
datafolder="$1"
if [ ! -d "$datafolder" ]; then
debug "PID:$$ Data folder '$datafolder' not found, returning InternalServerErrorFault"
echo "$internalServerError"
exit
fi
orderfile=$2
order=`cat $orderfile`
if [ $# -ge 3 ] && [ $3 = 'stresstest' ]; then
# compare body in request with body in datafolder
datafile="$datafolder/request"
if [ ! -f "$datafile" ]; then
debug "PID:$$ Data file '$datafile' not found, returning InternalServerErrorFault"
echo "$internalServerError"
exit
fi
data=$(cat $datafile)
if [ "$data" != "$BODY" ]; then
debug "PID:$$ body in request does not match with body in '$datafile', returning InternalServerErrorFault"
echo "$internalServerError"
exit
fi
order="1"
fi
operation=`echo $soapaction | tr -d '/'`
responsefile="${datafolder}/${order}_${operation}"
if [ ! -f "$responsefile" ]; then
debug "PID:$$ response file '$responsefile' not found, returning InternalServerErrorFault"
echo "$internalServerError"
exit
fi
# update order file for additional requests, if any?
neworder=`expr $order + 1`
echo -n $neworder > $orderfile
debug "PID:$$ Sending response from file $responsefile"
response=`sed "s/.*\(HTTP\/1.1 [0-9][0-9][0-9]\)/\n\1/g" "$responsefile" | awk '/^HTTP/ {f=1}f' | sed 's/\(.*\)/PID:'$$' Response: \1/g'`
debug "$response"
sed "s/.*\(HTTP\/1.1 [0-9][0-9][0-9]\)/\n\1/g" "$responsefile" | awk '/^HTTP/ {f=1}f'
|
kolibre/libkolibre-daisyonline
|
tests/fakesoapresponder.sh
|
Shell
|
lgpl-2.1
| 4,319 |
#!/bin/bash
# Finds uninitialized memory, buffer overflows, memory leaks and discovered access to deallocated memory
if [ -z "$1" ]
then
PROG="../examples/.libs/ui"
else
PROG="$1"
shift
fi
# Is the file executable?
test ! -x "$PROG" && echo "No executable file not found" && exit 1
# ELF executable file?
ELFMAGIC="$(echo -e "\\x7fELF")"
MAGIC="$(dd bs=1 count=4 if="$PROG" 2>/dev/null)"
test "$MAGIC" != "$ELFMAGIC" && echo "No ELF executable file" && exit 2
LD_LIBRARY_PATH=../final/.libs/ valgrind --tool=memcheck --suppressions=../doc/ncurses.supp --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes --track-origins=yes --log-file=./valgrind.txt "$PROG" "$@"
less ./valgrind.txt
rm -f ./valgrind.txt
|
gansm/finalcut
|
scripts/valgrind.sh
|
Shell
|
lgpl-3.0
| 735 |
#!/bin/bash
if [[ "$1" != "RUN" ]]; then
echo "This script is designed to be run inside of the Docker container provided"\
"by the Dockerfile in this directory. You most probably don't want to be running"\
"this manually. Use the 'build_and_run_in_docker.sh' script instead."
exit 1
fi
set -xe
cat | sudo tee /etc/yum.repos.d/openvdc.repo << EOS
[openvdc]
name=OpenVDC
failovermethod=priority
baseurl=https://ci.openvdc.org/repos/${BRANCH}/${RELEASE_SUFFIX}
enabled=1
gpgcheck=0
EOS
sudo yum install -y openvdc-acceptance-test
/multibox/build.sh
wait_for_port_ready() {
local ip=$1
local port=$2
local started=$(date '+%s')
while ! (echo "" | nc $ip $port) > /dev/null; do
echo "Waiting for $ip:$port starts to listen ..."
sleep 1
if [[ $(($started + 60)) -le $(date '+%s') ]]; then
echo "Timed out for ${ip}:${port} becomes ready"
return 1
fi
done
return 0
}
# gRPC API port
wait_for_port_ready 10.0.100.12 5000
dump_logs() {
local node=""
. /multibox/config.source
for node in ${NODES[@]}
do
# Ignore errors to correct logs from all nodes.
set +e
cat <<TITLE
=======================
### ${node}: Result of journalctl
=======================
TITLE
echo "journalctl" | SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" /multibox/login.sh $node
done
}
systemctl_status_all(){
local node=""
. /multibox/config.source
for node in ${NODES[@]}
do
set +e
cat <<TITLE
=======================
### ${node}: Result of systemctl status
=======================
TITLE
echo "systemctl status" | SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" /multibox/login.sh $node
done
}
# Show Zookeeper cluster status
show_zk_status(){
cat <<TITLE
=======================
### ZooKeeper cluster status
=======================
TITLE
local addr=""
. /multibox/config.source
for addr in ${ZK_NODES[@]}
do
echo "=======> ${addr}:2181"
echo "srvr" | nc $addr 2181
done
}
(
trap dump_logs EXIT
systemctl_status_all
show_zk_status
# Run the actual tests as axsh user. Root should never be required to run the openvdc command
su axsh -c "/opt/axsh/openvdc/bin/openvdc-acceptance-test -test.v"
)
|
axsh/openvdc
|
ci/citest/acceptance-test/run_tests.sh
|
Shell
|
lgpl-3.0
| 2,245 |
#!/bin/bash
# Tests Python module functions and types.
#
# Version: 20200705
EXIT_SUCCESS=0;
EXIT_FAILURE=1;
EXIT_IGNORE=77;
TEST_FUNCTIONS="support";
TEST_FUNCTIONS_WITH_INPUT="file";
OPTION_SETS="";
TEST_TOOL_DIRECTORY=".";
INPUT_GLOB="*";
test_python_function()
{
local TEST_FUNCTION=$1;
local TEST_DESCRIPTION="Testing Python-bindings functions: ${TEST_FUNCTION}";
local TEST_SCRIPT="${TEST_TOOL_DIRECTORY}/pyqcow_test_${TEST_FUNCTION}.py";
run_test_with_arguments "${TEST_DESCRIPTION}" "${TEST_SCRIPT}";
local RESULT=$?;
return ${RESULT};
}
test_python_function_with_input()
{
local TEST_FUNCTION=$1;
local TEST_DESCRIPTION="Testing Python-bindings functions: ${TEST_FUNCTION}";
local TEST_SCRIPT="${TEST_TOOL_DIRECTORY}/pyqcow_test_${TEST_FUNCTION}.py";
if ! test -d "input";
then
echo "Test input directory not found.";
return ${EXIT_IGNORE};
fi
local RESULT=`ls input/* | tr ' ' '\n' | wc -l`;
if test ${RESULT} -eq ${EXIT_SUCCESS};
then
echo "No files or directories found in the test input directory";
return ${EXIT_IGNORE};
fi
local TEST_PROFILE_DIRECTORY=$(get_test_profile_directory "input" "pyqcow");
local IGNORE_LIST=$(read_ignore_list "${TEST_PROFILE_DIRECTORY}");
RESULT=${EXIT_SUCCESS};
for TEST_SET_INPUT_DIRECTORY in input/*;
do
if ! test -d "${TEST_SET_INPUT_DIRECTORY}";
then
continue;
fi
if check_for_directory_in_ignore_list "${TEST_SET_INPUT_DIRECTORY}" "${IGNORE_LIST}";
then
continue;
fi
local TEST_SET_DIRECTORY=$(get_test_set_directory "${TEST_PROFILE_DIRECTORY}" "${TEST_SET_INPUT_DIRECTORY}");
local OLDIFS=${IFS};
# IFS="\n"; is not supported by all platforms.
IFS="
";
if test -f "${TEST_SET_DIRECTORY}/files";
then
for INPUT_FILE in `cat ${TEST_SET_DIRECTORY}/files | sed "s?^?${TEST_SET_INPUT_DIRECTORY}/?"`;
do
run_test_on_input_file_with_options "${TEST_SET_DIRECTORY}" "${TEST_DESCRIPTION}" "default" "${OPTION_SETS}" "${TEST_SCRIPT}" "${INPUT_FILE}";
RESULT=$?;
if test ${RESULT} -ne ${EXIT_SUCCESS};
then
break;
fi
done
else
for INPUT_FILE in `ls -1d ${TEST_SET_INPUT_DIRECTORY}/${INPUT_GLOB}`;
do
run_test_on_input_file_with_options "${TEST_SET_DIRECTORY}" "${TEST_DESCRIPTION}" "default" "${OPTION_SETS}" "${TEST_SCRIPT}" "${INPUT_FILE}";
RESULT=$?;
if test ${RESULT} -ne ${EXIT_SUCCESS};
then
break;
fi
done
fi
IFS=${OLDIFS};
if test ${RESULT} -ne ${EXIT_SUCCESS};
then
break;
fi
done
return ${RESULT};
}
if test -n "${SKIP_PYTHON_TESTS}";
then
exit ${EXIT_IGNORE};
fi
TEST_RUNNER="tests/test_runner.sh";
if ! test -f "${TEST_RUNNER}";
then
TEST_RUNNER="./test_runner.sh";
fi
if ! test -f "${TEST_RUNNER}";
then
echo "Missing test runner: ${TEST_RUNNER}";
exit ${EXIT_FAILURE};
fi
source ${TEST_RUNNER};
RESULT=${EXIT_IGNORE};
for TEST_FUNCTION in ${TEST_FUNCTIONS};
do
test_python_function "${TEST_FUNCTION}";
RESULT=$?;
if test ${RESULT} -ne ${EXIT_SUCCESS};
then
break;
fi
done
if test ${RESULT} -ne ${EXIT_SUCCESS} && test ${RESULT} -ne ${EXIT_IGNORE};
then
exit ${RESULT};
fi
for TEST_FUNCTION in ${TEST_FUNCTIONS_WITH_INPUT};
do
if test -d "input";
then
test_python_function_with_input "${TEST_FUNCTION}";
RESULT=$?;
else
test_python_function "${TEST_FUNCTION}";
RESULT=$?;
fi
if test ${RESULT} -ne ${EXIT_SUCCESS};
then
break;
fi
done
exit ${RESULT};
|
libyal/libqcow
|
tests/test_python_module.sh
|
Shell
|
lgpl-3.0
| 3,414 |
#!/bin/bash
multimarkdown --to=latex content.mmd > content.tex &&
omgtex.rb -o content.tex
|
flecno/bugfree-octo-ironman
|
architecture/ArchitekturBeschreibung/build.sh
|
Shell
|
unlicense
| 96 |
#!/bin/bash
# Script to download all .sra files given an experiment number.
# This is because the University of Minnesota Minnesota Supercomputing
# Institute (MSI) does not allow remote access for the SRA Toolkit.
# Requires the SRA Toolkit and lftp be installed.
# Thomas Kono
# Saint Paul, MN
set -e
set -u
set -o pipefail
USAGE="Usage:
$0 <Option> <Accession> [ -d DIR ] [ -v ] [ -h ]
Will fetch all .SRA files under the SRA accession number given by
<Accession>. If DIR is specified, then this script will create it if it does
not exist, and put everything in there. If DIR is not supplied, then the
current directory is used.
Pass -h to see available options."
HELP="Usage:
$0 <Option> <Accession> [ -d DIR ] [ -v ] [ -h ]
Available options:
Required:
-e Provided <Accession> is an Experiment number.
-r Provided <Accession> is a Run number.
-p Provided <Accession> is a Sample number.
-s Provided <Accession> is a Study number.
Optional:
-d DIR Output all .SRA files into DIR.
Switches:
-v Don't actually make directories and download, just print what is
going to happen
-h Show this message and exit.
"
# If there are no arguments passed to the script, drop the usage message and
# exit.
if [ $# == 0 ]
then echo "$USAGE"
exit 1
fi
# Parse the options
DRY_RUN="false"
DIR=$(pwd)
BASE_URL="ftp://ftp-trace.ncbi.nih.gov/sra/sra-instant/reads"
while [[ $# > 0 ]]
do
FLAG="$1"
case $FLAG in
-e)
ACC="$2"
BASE_URL="${BASE_URL}/ByExp/sra"
shift
;;
-r)
ACC="$2"
BASE_URL="${BASE_URL}/ByRun/sra"
shift
;;
-p)
ACC="$2"
BASE_URL="${BASE_URL}/BySample/sra"
shift
;;
-s)
ACC="$2"
BASE_URL="${BASE_URL}/ByStudy/sra"
shift
;;
-d)
DIR="$2"
shift
;;
-v)
DRY_RUN="true"
;;
-h)
echo "$HELP"
exit 2
;;
*)
echo "$USAGE"
exit 1
;;
esac
shift
done
# Now that we have decided what type of accession number we have, we finish
# building the rest of the URL
# As of 2015-09-17, it follow this format:
# ftp://ftp-trace.ncbi.nih.gov/sra/sra-instant/reads/[Type]/sra/[SR_/ER_/DR_]
# /[First 6 of Acc]/[Acc]/[Acc].sra
QUERY_URL="${BASE_URL}/${ACC:0:3}/${ACC:0:6}/${ACC}/"
# if we are doing a dry-run, just print out these values and exit
if [[ "${DRY_RUN}" == "true" ]]
then
echo "The following operations will be performed:"
echo "mkdir -p ${DIR}"
echo "cd ${DIR}"
echo "lftp -c mirror ${QUERY_URL} ."
echo "chmod -R +w ${DIR}*"
exit 3
else
# Make the directory and cd into it
mkdir -p ${DIR}
cd ${DIR}
# get the contents with lftp
lftp -c "mirror ${QUERY_URL} ."
chmod -R +w ${DIR}*
fi
|
TomJKono/Misc_Utils
|
SRA_Fetch.sh
|
Shell
|
unlicense
| 3,157 |
#!/usr/bin/env bash
set -ex;
MAJOR="$(echo ${GITHUB_REF} | sed -E 's/.*([0-9]+)\.([0-9]+)\.([0-9]+)$/\1/')";
MINOR="$(echo ${GITHUB_REF} | sed -E 's/.*([0-9]+)\.([0-9]+)\.([0-9]+)$/\2/')";
PATCH="$(echo ${GITHUB_REF} | sed -E 's/.*([0-9]+)\.([0-9]+)\.([0-9]+)$/\3/')";
VERSION="${MAJOR}.${MINOR}.${PATCH}";
|
ofer987/danofer
|
.github/scripts/create_version.sh
|
Shell
|
unlicense
| 310 |
#!/bin/bash
#/usr/bin/env > /tmp/who
#ulimit -m 131072 -t 60
PATH=$PATH:"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
while getopts "p:" arg
do
case $arg in
p)
/home/rails-deploy/Mechempire/compile/RobotAppearanceReader $OPTARG/libmyAI.so stdout
;;
?)
echo "unkonw argument"
exit 1
;;
esac
done
|
MechEmpire/Mechempire
|
compile/get_info.sh
|
Shell
|
apache-2.0
| 379 |
#!/bin/bash
# Colors schemes for echo:
RD='\033[0;31m' # Red
BL='\033[1;34m' # Blue
GN='\033[0;32m' # Green
MG='\033[0;95m' # Magenta
NC='\033[0m' # No Color
TERM_COLS="$(tput cols)"
ERROR_STRING="Installation error. Exiting"
CURRENT_PATH=$(pwd)
DEFAULT_PHP_VERSION="php7.4"
CURRENT_KERNEL=$(grep -w ID /etc/os-release | cut -d "=" -f 2 | tr -d '"')
CURRENT_OS=$(grep -e VERSION_ID /etc/os-release | cut -d "=" -f 2 | cut -d "." -f 1 | tr -d '"')
ERROR_STRING="Installation error. Exiting"
echo_with_color() {
case $1 in
Red | RED | red)
echo -e "${NC}${RD} $2 ${NC}"
;;
Green | GREEN | green)
echo -e "${NC}${GN} $2 ${NC}"
;;
Magenta | MAGENTA | magenta)
echo -e "${NC}${MG} $2 ${NC}"
;;
Blue | BLUE | blue)
echo -e "${NC}${BL} $2 ${NC}"
;;
*)
echo -e "${NC} $2 ${NC}"
;;
esac
}
## Puts text in the center of the terminal, just for layout / making things pretty
print_centered() {
[[ $# == 0 ]] && return 1
declare -i TERM_COLS="$(tput cols)"
declare -i str_len="${#1}"
[[ $str_len -ge $TERM_COLS ]] && {
echo "$1";
return 0;
}
declare -i filler_len="$(( (TERM_COLS - str_len) / 2 ))"
[[ $# -ge 2 ]] && ch="${2:0:1}" || ch=" "
filler=""
for (( i = 0; i < filler_len; i++ )); do
filler="${filler}${ch}"
done
printf "%s%s%s" "$filler" "$1" "$filler"
[[ $(( (TERM_COLS - str_len) % 2 )) -ne 0 ]] && printf "%s" "${ch}"
printf "\n"
return 0
}
## Used for each of the individual components to be installed
run_process () {
while true; do echo -n . >&5; sleep 1; done &
trap 'kill $BGPID; exit' INT
BGPID=$!
echo -n "$1" >&5
$2
echo done >&5
kill $!
}
clear
# Make sure script run as sudo
if ((EUID != 0)); then
echo -e "${RD}\nPlease run script with root privileges: sudo ./dfsetup.run \n"
exit 1
fi
#### Check Current OS is compatible with the installer ####
case $CURRENT_KERNEL in
ubuntu)
if ((CURRENT_OS != 18)) && ((CURRENT_OS != 20)); then
echo_with_color red "The installer only supports Ubuntu 18 and 20. Exiting...\n"
exit 1
fi
;;
debian)
if ((CURRENT_OS != 9)) && ((CURRENT_OS != 10)); then
echo_with_color red "The installer only supports Debian 9 and 10. Exiting...\n"
exit 1
fi
;;
centos | rhel)
if ((CURRENT_OS != 7)) && ((CURRENT_OS != 8)); then
echo_with_color red "The installer only supports Rhel (Centos) 7 and 8. Exiting...\n"
exit 1
fi
;;
fedora)
if ((CURRENT_OS != 32)) && ((CURRENT_OS != 33)) && ((CURRENT_OS != 34)); then
echo_with_color red "The installer only supports Fedora 32, 33, and 34. Exiting...\n"
exit 1
fi
;;
*)
echo_with_color red "Installer only supported on Ubuntu, Debian, Rhel (Centos) and Fedora. Exiting...\n"
exit 1
;;
esac
print_centered "-" "-"
print_centered "-" "-"
print_centered "Welcome to DreamFactory!"
print_centered "-" "-"
print_centered "-" "-"
print_centered "Thank you for choosing DreamFactory. By default this installer will install the latest version of DreamFactory with a preconfigured Nginx web server. Additional options are available in the menu below:"
print_centered "-" "-"
echo -e ""
echo -e "[0] Default Installation (latest version of DreamFactory with Nginx Server)"
echo -e "[1] Install driver and PHP extensions for Oracle DB"
echo -e "[2] Install driver and PHP extensions for IBM DB2"
echo -e "[3] Install driver and PHP extensions for Cassandra DB"
echo -e "[4] Install Apache2 web server for DreamFactory (Instead of Nginx)"
echo -e "[5] Install MariaDB as the default system database for DreamFactory"
echo -e "[6] Install a specfic version of DreamFactory"
echo -e "[7] Run Installation in debug mode (logs will output to /tmp/dreamfactory_installer.log)\n"
print_centered "-" "-"
echo_with_color magenta "Input '0' and press Enter to run the default installation. To install additional options, type the corresponding number (e.g. '1,5' for Oracle and a MySql system database) from the menu above and press Enter"
read -r INSTALLATION_OPTIONS
print_centered "-" "-"
if [[ $INSTALLATION_OPTIONS == *"1"* ]]; then
ORACLE=TRUE
echo_with_color green "Oracle selected."
fi
if [[ $INSTALLATION_OPTIONS == *"2"* ]]; then
DB2=TRUE
echo_with_color green "DB2 selected."
fi
if [[ $INSTALLATION_OPTIONS == *"3"* ]]; then
CASSANDRA=TRUE
echo_with_color green "Cassandra selected."
fi
if [[ $INSTALLATION_OPTIONS == *"4"* ]]; then
APACHE=TRUE
echo_with_color green "Apache selected."
fi
if [[ $INSTALLATION_OPTIONS == *"5"* ]]; then
MYSQL=TRUE
echo_with_color green "MariaDB System Database selected."
fi
if [[ $INSTALLATION_OPTIONS == *"6"* ]]; then
echo_with_color magenta "What version of DreamFactory would you like to install? (E.g. 4.9.0)"
read -r -p "DreamFactory Version: " DREAMFACTORY_VERSION_TAG
echo_with_color green "DreamFactory Version ${DREAMFACTORY_VERSION_TAG} selected."
fi
if [[ $INSTALLATION_OPTIONS == *"7"* ]]; then
DEBUG=TRUE
echo_with_color green "Running in debug mode. Run this command: tail -f /tmp/dreamfactory_installer.log in a new terminal session to follow logs during installation"
fi
if [[ ! $DEBUG == TRUE ]]; then
exec 5>&1 # Save a copy of STDOUT
exec >/dev/null 2>&1 # Redirect STDOUT to Null
else
exec 5>&1 # Save a copy of STDOUT. Used because all echo redirects output to 5.
exec >/tmp/dreamfactory_installer.log 2>&1
fi
# Retrieve executing user's username
CURRENT_USER=$(logname)
if [[ -z $SUDO_USER ]] && [[ -z $CURRENT_USER ]]; then
echo_with_color red "Enter username for installation DreamFactory:" >&5
read -r CURRENT_USER
if [[ $CURRENT_KERNEL == "debian" ]]; then
su "${CURRENT_USER}" -c "echo 'Checking user availability'" >&5
if (($? >= 1)); then
echo 'Please provide another user' >&5
exit 1
fi
fi
fi
if [[ -n $SUDO_USER ]]; then
CURRENT_USER=${SUDO_USER}
fi
# Sudo should be used to run the script, but CURRENT_USER themselves should not be root (i.e should be another user running with sudo),
# otherwise composer will get annoyed. If the user wishes to continue as root, then an environment variable will be set when 'composer install' is run later on in the script.
if [[ $CURRENT_USER == "root" ]]; then
echo -e "WARNING: Although this script must be run with sudo, it is not recommended to install DreamFactory as root (specifically 'composer' commands) Would you like to:\n [1] Continue as root\n [2] Provide username for installing DreamFactory" >&5
read -r INSTALL_AS_ROOT
if [[ $INSTALL_AS_ROOT == 1 ]]; then
echo -e "Continuing installation as root" >&5
else
echo -e "Enter username for installing DreamFactory" >&5
read -r CURRENT_USER
echo -e "User: ${CURRENT_USER} selected. Continuing" >&5
fi
fi
echo -e "${CURRENT_KERNEL^} ${CURRENT_OS} detected. Installing DreamFactory...\n" >&5
#Go into the individual scripts here
case $CURRENT_KERNEL in
ubuntu)
source ./ubuntu.sh
;;
debian)
source ./debian.sh
;;
centos | rhel)
source ./centos.sh
;;
fedora)
source ./fedora.sh
;;
esac
#### INSTALLER ####
### STEP 1. Install system dependencies
echo_with_color blue "Step 1: Installing system dependencies...\n" >&5
run_process " Updating System" system_update
run_process " Installing System Dependencies" install_system_dependencies
echo_with_color green "\nThe system dependencies have been successfully installed.\n" >&5
### Step 2. Install PHP
echo_with_color blue "Step 2: Installing PHP...\n" >&5
run_process " Installing PHP" install_php
echo_with_color green "\nPHP installed.\n" >&5
### Step 3. Install Apache
if [[ $APACHE == TRUE ]]; then ### Only with key --apache
echo_with_color blue "Step 3: Installing Apache...\n" >&5
# Check Apache installation status
check_apache_installation_status
if ((CHECK_APACHE_PROCESS == 0)) || ((CHECK_APACHE_INSTALLATION == 0)); then
echo_with_color red "Apache2 detected. Skipping installation. Configure Apache2 manually.\n" >&5
else
# Install Apache
# Check if running web server on port 80
lsof -i :80 | grep LISTEN
if (($? == 0)); then
echo_with_color red "Port 80 taken.\n " >&5
echo_with_color red "Skipping installation Apache2. Install Apache2 manually.\n " >&5
else
run_process " Installing Apache" install_apache
run_process " Restarting Apache" restart_apache
echo_with_color green "\nApache2 installed.\n" >&5
fi
fi
else
echo_with_color blue "Step 3: Installing Nginx...\n" >&5 ### Default choice
# Check nginx installation in the system
check_nginx_installation_status
if ((CHECK_NGINX_PROCESS == 0)) || ((CHECK_NGINX_INSTALLATION == 0)); then
echo_with_color red "Nginx detected. Skipping installation. Configure Nginx manually.\n" >&5
else
# Install nginx
# Checking running web server
lsof -i :80 | grep LISTEN
if (($? == 0)); then
echo_with_color red "Port 80 taken.\n " >&5
echo_with_color red "Skipping Nginx installation. Install Nginx manually.\n " >&5
else
run_process " Installing Nginx" install_nginx
run_process " Restarting Nginx" restart_nginx
echo_with_color green "\nNginx installed.\n" >&5
fi
fi
fi
### Step 4. Configure PHP development tools
echo_with_color blue "Step 4: Configuring PHP Extensions...\n" >&5
## Install PHP PEAR
run_process " Installing PHP PEAR" install_php_pear
echo_with_color green " PHP PEAR installed\n" >&5
### Install ZIP
if [[ $CURRENT_KERNEL == "fedora" ]]; then
php -m | grep -E "^zip"
if (($? >= 1)); then
run_process " Installing zip" install_zip
php -m | grep -E "^zip"
if (($? >= 1)); then
echo_with_color red "\nExtension Zip has errors..." >&5
else
echo_with_color green " Zip installed\n" >&5
fi
fi
fi
### Install MCrypt
php -m | grep -E "^mcrypt"
if (($? >= 1)); then
run_process " Installing Mcrypt" install_mcrypt
php -m | grep -E "^mcrypt"
if (($? >= 1)); then
echo_with_color red "\nMcrypt installation error." >&5
else
echo_with_color green " Mcrypt installed\n" >&5
fi
fi
### Install MongoDB drivers
php -m | grep -E "^mongodb"
if (($? >= 1)); then
run_process " Installing Mongodb" install_mongodb
php -m | grep -E "^mongodb"
if (($? >= 1)); then
echo_with_color red "\nMongoDB installation error." >&5
else
echo_with_color green " MongoDB installed\n" >&5
fi
fi
### Install MS SQL Drivers
php -m | grep -E "^sqlsrv"
if (($? >= 1)); then
run_process " Installing MS SQL Server" install_sql_server
run_process " Installing pdo_sqlsrv" install_pdo_sqlsrv
php -m | grep -E "^sqlsrv"
if (($? >= 1)); then
echo_with_color red "\nMS SQL Server extension installation error." >&5
else
echo_with_color green " MS SQL Server extension installed\n" >&5
fi
php -m | grep -E "^pdo_sqlsrv"
if (($? >= 1)); then
echo_with_color red "\nCould not install pdo_sqlsrv extension" >&5
else
echo_with_color green " pdo_sqlsrv installed\n" >&5
fi
fi
### DRIVERS FOR ORACLE ( ONLY WITH KEY --with-oracle )
php -m | grep -E "^oci8"
if (($? >= 1)); then
if [[ $ORACLE == TRUE ]]; then
echo_with_color magenta "Enter absolute path to the Oracle drivers, complete with trailing slash: [/] " >&5
read -r DRIVERS_PATH
if [[ -z $DRIVERS_PATH ]]; then
DRIVERS_PATH="."
fi
if [[ $CURRENT_KERNEL == "ubuntu" || $CURRENT_KERNEL == "debian" ]]; then
unzip "$DRIVERS_PATH/instantclient-*.zip" -d /opt/oracle
else
ls -f $DRIVERS_PATH/oracle-instantclient19.*.rpm
fi
if (($? == 0)); then
run_process " Drivers Found. Installing Oracle Drivers" install_oracle
php -m | grep -E "^oci8"
if (($? >= 1)); then
echo_with_color red "\nCould not install oci8 extension." >&5
else
echo_with_color green " Oracle drivers and oci8 extension installed\n" >&5
fi
else
echo_with_color red "Drivers not found. Skipping...\n" >&5
fi
unset DRIVERS_PATH
fi
fi
### DRIVERS FOR IBM DB2 PDO ( ONLY WITH KEY --with-db2 )
php -m | grep -E "^pdo_ibm"
if (($? >= 1)); then
if [[ $DB2 == TRUE ]]; then
echo_with_color magenta "Enter absolute path to the IBM DB2 drivers, complete with trailing slash: [/] " >&5
read -r DRIVERS_PATH
if [[ -z $DRIVERS_PATH ]]; then
DRIVERS_PATH="."
fi
tar xzf $DRIVERS_PATH/ibm_data_server_driver_package_linuxx64_v11.5.tar.gz -C /opt/
if (($? == 0)); then
run_process " Drivers Found. Installing DB2" install_db2
php -m | grep pdo_ibm
if (($? >= 1)); then
echo_with_color red "\nCould not install pdo_ibm extension." >&5
else
### DRIVERS FOR IBM DB2 ( ONLY WITH KEY --with-db2 )
php -m | grep -E "^ibm_db2"
if (($? >= 1)); then
run_process " Installing ibm_db2 extension" install_db2_extension
php -m | grep ibm_db2
if (($? >= 1)); then
echo_with_color red "\nCould not install ibm_db2 extension." >&5
else
echo_with_color green " IBM DB2 installed\n" >&5
fi
fi
fi
else
echo_with_color red "Drivers not found. Skipping...\n" >&5
fi
unset DRIVERS_PATH
cd "${CURRENT_PATH}" || exit 1
rm -rf /opt/PDO_IBM-1.3.4-patched
fi
fi
### DRIVERS FOR CASSANDRA ( ONLY WITH KEY --with-cassandra )
php -m | grep -E "^cassandra"
if (($? >= 1)); then
if [[ $CASSANDRA == TRUE ]]; then
run_process " Installing Cassandra" install_cassandra
php -m | grep cassandra
if (($? >= 1)); then
echo_with_color red "\nCould not install cassandra extension." >&5
else
echo_with_color green " Cassandra installed\n" >&5
fi
cd "${CURRENT_PATH}" || exit
rm -rf /opt/cassandra
fi
fi
### INSTALL IGBINARY EXT.
php -m | grep -E "^igbinary"
if (($? >= 1)); then
run_process " Installing igbinary" install_igbinary
php -m | grep igbinary
if (($? >= 1)); then
echo_with_color red "\nCould not install igbinary extension." >&5
else
echo_with_color green " igbinary installed\n" >&5
fi
fi
### INSTALL PYTHON BUNCH
run_process " Installing python2" install_python2
check_bunch_installation
if (($? >= 1)); then
run_process " Installing bunch" install_bunch
check_bunch_installation
if (($? >= 1)); then
echo_with_color red "\nCould not install python bunch extension." >&5
else
echo_with_color green " python2 installed\n" >&5
fi
fi
### INSTALL PYTHON3 MUNCH
run_process " Installing python3" install_python3
check_munch_installation
if (($? >= 1)); then
run_process " Installing munch" install_munch
check_munch_installation
if (($? >= 1)); then
echo_with_color red "\nCould not install python3 munch extension." >&5
else
echo_with_color green " python3 installed\n" >&5
fi
fi
### Install Node.js
node -v
if (($? >= 1)); then
run_process " Installing node" install_node
echo_with_color green " node installed\n" >&5
fi
### INSTALL PCS
php -m | grep -E "^pcs"
if (($? >= 1)); then
run_process " Installing pcs" install_pcs
php -m | grep pcs
if (($? >= 1)); then
echo_with_color red "\nCould not install pcs extension." >&5
else
echo_with_color green " pcs installed\n" >&5
fi
fi
### INSTALL Snowlake
if [[ $CURRENT_KERNEL == "debian" || $CURRENT_KERNEL == "ubuntu" ]]; then
if [[ $APACHE == TRUE ]]; then ### Only with key --apache
ls /etc/php/${PHP_VERSION_INDEX}/apache2/conf.d | grep "snowflake"
if (($? >= 1)); then
run_process " Installing snowflake" install_snowflake_apache
echo_with_color green " Snowflake installed\n" >&5
fi
else
ls /etc/php/${PHP_VERSION_INDEX}/fpm/conf.d | grep "snowflake"
if (($? >= 1)); then
run_process " Installing snowflake" install_snowflake_nginx
echo_with_color green " Snowflake installed\n" >&5
fi
fi
else
#fedora / centos
ls /etc/php.d | grep "snowflake"
if (($? >= 1)); then
if ((CURRENT_OS == 7)); then
# pdo_snowflake requires gcc 5.2 to install, centos7 only has 4.8 available
echo_with_color red "Snowflake only supported on CentOS / RHEL 8. Skipping...\n" >&5
else
run_process " Installing Snowflake" install_snowflake
echo_with_color green " snowflake installed\n" >&5
fi
fi
fi
### INSTALL Hive ODBC Driver
php -m | grep -E "^odbc"
if (($? >= 1)); then
run_process " Installing hive odbc" install_hive_odbc
if ((HIVE_ODBC_INSTALLED != "odbc")); then
echo_with_color red "\nCould not build hive odbc driver." >&5
else
echo_with_color green " hive odbc installed\n" >&5
fi
fi
if [[ $APACHE == TRUE ]]; then
if [[ $CURRENT_KERNEL == "ubuntu" || $CURRENT_KERNEL == "debian" ]]; then
service apache2 reload
else
#fedora / centos
service httpd restart
fi
else
if [[ $CURRENT_KERNEL == "ubuntu" || $CURRENT_KERNEL == "debian" ]]; then
service ${PHP_VERSION}-fpm reload
else
#fedora / centos
service php-fpm restart
fi
fi
echo_with_color green "PHP Extensions configured.\n" >&5
### Step 5. Installing Composer
echo_with_color blue "Step 5: Installing Composer...\n" >&5
run_process " Installing Composer" install_composer
echo_with_color green "Composer installed.\n" >&5
### Step 6. Installing MySQL
if [[ $MYSQL == TRUE ]]; then ### Only with key --with-mysql
echo_with_color blue "Step 6: Installing System Database for DreamFactory...\n" >&5
run_process " Checking for existing MySqlDatabase" check_mysql_exists
if ((CHECK_MYSQL_PROCESS == 0)) || ((CHECK_MYSQL_INSTALLATION == 0)) || ((CHECK_MYSQL_PORT == 0)); then
echo_with_color red "MySQL Database detected in the system. Skipping installation. \n" >&5
DB_FOUND=TRUE
else
if [[ $CURRENT_KERNEL == "ubuntu" || $CURRENT_KERNEL == "debian" ]]; then
run_process " Adding mariadb repo" add_mariadb_repo
run_process " Updating System" system_update
fi
echo_with_color magenta "Please choose a strong MySQL root user password: " >&5
read -r -s DB_PASS
if [[ -z $DB_PASS ]]; then
until [[ -n $DB_PASS ]]; do
echo_with_color red "The password can't be empty!" >&5
read -r -s DB_PASS
done
fi
echo_with_color green "\nPassword accepted.\n" >&5
if [[ $CURRENT_KERNEL == "ubuntu" || $CURRENT_KERNEL == "debian" ]]; then
# Disable interactive mode in installation mariadb. Set generated above password.
export DEBIAN_FRONTEND="noninteractive"
debconf-set-selections <<<"mariadb-server mysql-server/root_password password $DB_PASS"
debconf-set-selections <<<"mariadb-server mysql-server/root_password_again password $DB_PASS"
fi
run_process " Installing MariaDB" install_mariadb
fi
echo_with_color green "Database for DreamFactory installed.\n" >&5
### Step 7. Configuring DreamFactory system database
echo_with_color blue "Step 7: Configure DreamFactory system database.\n" >&5
DB_INSTALLED=FALSE
# The MySQL database has already been installed, so let's configure
# the DreamFactory system database.
if [[ $DB_FOUND == TRUE ]]; then
echo_with_color magenta "Is DreamFactory MySQL system database already configured? [Yy/Nn] " >&5
read -r DB_ANSWER
if [[ -z $DB_ANSWER ]]; then
DB_ANSWER=Y
fi
if [[ $DB_ANSWER =~ ^[Yy]$ ]]; then
DB_INSTALLED=TRUE
# MySQL system database is not installed, but MySQL is, so let's
# prompt the user for the root password.
else
echo_with_color magenta "\n Enter MySQL root password: " >&5
read -r DB_PASS
# Test DB access
mysql -h localhost -u root "-p$DB_PASS" -e"quit"
if (($? >= 1)); then
ACCESS=FALSE
TRYS=0
until [[ $ACCESS == TRUE ]]; do
echo_with_color red "\nPassword incorrect!\n " >&5
echo_with_color magenta "Enter root user password:\n " >&5
read -r -s DB_PASS
mysql -h localhost -u root "-p$DB_PASS" -e"quit"
if (($? == 0)); then
ACCESS=TRUE
fi
TRYS=$((TRYS + 1))
if ((TRYS == 3)); then
echo_with_color red "\nExit.\n" >&5
exit 1
fi
done
fi
fi
fi
# If the DreamFactory system database not already installed,
# let's install it.
if [[ $DB_INSTALLED == FALSE ]]; then
# Test DB access
mysql -h localhost -u root "-p$DB_PASS" -e"quit"
if (($? >= 1)); then
echo_with_color red "Connection to Database failed. Exit \n" >&5
exit 1
fi
echo_with_color magenta "\n What would you like to name your system database? (e.g. dreamfactory) " >&5
read -r DF_SYSTEM_DB
if [[ -z $DF_SYSTEM_DB ]]; then
until [[ -n $DF_SYSTEM_DB ]]; do
echo_with_color red "\nThe name can't be empty!" >&5
read -r DF_SYSTEM_DB
done
fi
echo "CREATE DATABASE ${DF_SYSTEM_DB};" | mysql -u root "-p${DB_PASS}" 2>&5
if (($? >= 1)); then
echo_with_color red "\nCreating database error. Exit" >&5
exit 1
fi
echo_with_color magenta "\n Please create a MySQL DreamFactory system database user name (e.g. dfadmin): " >&5
read -r DF_SYSTEM_DB_USER
if [[ -z $DF_SYSTEM_DB_USER ]]; then
until [[ -n $DF_SYSTEM_DB_USER ]]; do
echo_with_color red "The name can't be empty!" >&5
read -r DF_SYSTEM_DB_USER
done
fi
echo_with_color magenta "\n Please create a secure MySQL DreamFactory system database user password: " >&5
read -r -s DF_SYSTEM_DB_PASSWORD
if [[ -z $DF_SYSTEM_DB_PASSWORD ]]; then
until [[ -n $DF_SYSTEM_DB_PASSWORD ]]; do
echo_with_color red "The password can't be empty!" >&5
read -r -s DF_SYSTEM_DB_PASSWORD
done
fi
# Generate password for user in DB
echo "GRANT ALL PRIVILEGES ON ${DF_SYSTEM_DB}.* to \"${DF_SYSTEM_DB_USER}\"@\"localhost\" IDENTIFIED BY \"${DF_SYSTEM_DB_PASSWORD}\";" | mysql -u root "-p${DB_PASS}" 2>&5
if (($? >= 1)); then
echo_with_color red "\nCreating new user error. Exit" >&5
exit 1
fi
echo "FLUSH PRIVILEGES;" | mysql -u root "-p${DB_PASS}"
echo -e "\nDatabase configuration finished.\n" >&5
else
echo_with_color green "Skipping...\n" >&5
fi
else
echo_with_color green "Step 6: Skipping DreamFactory system database installation.\n" >&5
echo_with_color green "Step 7: Skipping DreamFactory system database configuration.\n" >&5
fi
### Step 8. Install DreamFactory
echo_with_color blue "Step 8: Installing DreamFactory...\n " >&5
ls -d /opt/dreamfactory
if (($? >= 1)); then
run_process " Cloning DreamFactory repository" clone_dreamfactory_repository
else
echo_with_color red "DreamFactory detected.\n" >&5
DF_CLEAN_INSTALLATION=FALSE
fi
if [[ $DF_CLEAN_INSTALLATION == FALSE ]]; then
ls /opt/dreamfactory/composer.{json,lock,json-dist}
if (($? == 0)); then
echo_with_color red "Would you like to upgrade your instance? [Yy/Nn]" >&5
read -r LICENSE_FILE_ANSWER
if [[ -z $LICENSE_FILE_ANSWER ]]; then
LICENSE_FILE_ANSWER=N
fi
LICENSE_FILE_EXIST=TRUE
fi
fi
if [[ $LICENSE_FILE_EXIST == TRUE ]]; then
if [[ $LICENSE_FILE_ANSWER =~ ^[Yy]$ ]]; then
echo_with_color magenta "\nEnter absolute path to license files, complete with trailing slash: [/]" >&5
read -r LICENSE_PATH
if [[ -z $LICENSE_PATH ]]; then
LICENSE_PATH="."
fi
ls $LICENSE_PATH/composer.{json,lock,json-dist}
if (($? >= 1)); then
echo_with_color red "\nLicenses not found. Skipping.\n" >&5
else
cp $LICENSE_PATH/composer.{json,lock,json-dist} /opt/dreamfactory/
LICENSE_INSTALLED=TRUE
echo_with_color green "\nLicenses file installed. \n" >&5
echo_with_color blue "Installing DreamFactory...\n" >&5
fi
else
echo_with_color red "\nSkipping...\n" >&5
fi
else
echo_with_color magenta "Do you have a commercial DreamFactory license? [Yy/Nn] " >&5
read -r LICENSE_FILE_ANSWER
if [[ -z $LICENSE_FILE_ANSWER ]]; then
LICENSE_FILE_ANSWER=N
fi
if [[ $LICENSE_FILE_ANSWER =~ ^[Yy]$ ]]; then
echo_with_color magenta "\nEnter absolute path to license files, complete with trailing slash: [/]" >&5
read -r LICENSE_PATH
if [[ -z $LICENSE_PATH ]]; then
LICENSE_PATH="."
fi
ls $LICENSE_PATH/composer.{json,lock,json-dist}
if (($? >= 1)); then
echo_with_color red "\nLicenses not found. Skipping.\n" >&5
echo_with_color red "Installing DreamFactory OSS version...\n" >&5
else
cp $LICENSE_PATH/composer.{json,lock,json-dist} /opt/dreamfactory/
LICENSE_INSTALLED=TRUE
echo_with_color green "\nLicense files installed. \n" >&5
echo_with_color blue "Installing DreamFactory...\n" >&5
fi
else
echo_with_color red "\nInstalling DreamFactory OSS version.\n" >&5
fi
fi
chown -R "$CURRENT_USER" /opt/dreamfactory && cd /opt/dreamfactory || exit 1
run_process " Installing DreamFactory" run_composer_install
### Shutdown silent mode because php artisan df:setup and df:env will get troubles with prompts.
exec 1>&5 5>&-
if [[ $DB_INSTALLED == FALSE ]]; then
sudo -u "$CURRENT_USER" bash -c "php artisan df:env -q \
--db_connection=mysql \
--db_host=127.0.0.1 \
--db_port=3306 \
--db_database=${DF_SYSTEM_DB} \
--db_username=${DF_SYSTEM_DB_USER} \
--db_password=${DF_SYSTEM_DB_PASSWORD//\'/}"
sed -i 's/\#DB\_CHARSET\=/DB\_CHARSET\=utf8/g' .env
sed -i 's/\#DB\_COLLATION\=/DB\_COLLATION\=utf8\_unicode\_ci/g' .env
echo -e "\n"
MYSQL_INSTALLED=TRUE
elif [[ ! $MYSQL == TRUE && $DF_CLEAN_INSTALLATION == TRUE ]] || [[ $DB_INSTALLED == TRUE ]]; then
sudo -u "$CURRENT_USER" bash -c "php artisan df:env"
if [[ $DB_INSTALLED == TRUE ]]; then
sed -i 's/\#DB\_CHARSET\=/DB\_CHARSET\=utf8/g' .env
sed -i 's/\#DB\_COLLATION\=/DB\_COLLATION\=utf8\_unicode\_ci/g' .env
fi
fi
if [[ $DF_CLEAN_INSTALLATION == TRUE ]]; then
sudo -u "$CURRENT_USER" bash -c "php artisan df:setup"
fi
if [[ $LICENSE_INSTALLED == TRUE || $DF_CLEAN_INSTALLATION == FALSE ]]; then
php artisan migrate --seed
sudo -u "$CURRENT_USER" bash -c "php artisan config:clear -q"
if [[ $LICENSE_INSTALLED == TRUE ]]; then
grep DF_LICENSE_KEY .env >/dev/null 2>&1 # Check for existing key.
if (($? == 0)); then
echo_with_color red "\nThe license key is already installed. Do you want to install a new key? [Yy/Nn]"
read -r KEY_ANSWER
if [[ -z $KEY_ANSWER ]]; then
KEY_ANSWER=N
fi
NEW_KEY=TRUE
fi
if [[ $NEW_KEY == TRUE ]]; then
if [[ $KEY_ANSWER =~ ^[Yy]$ ]]; then #Install new key
CURRENT_KEY=$(grep DF_LICENSE_KEY .env)
echo_with_color magenta "\nPlease provide your new license key:"
read -r LICENSE_KEY
size=${#LICENSE_KEY}
if [[ -z $LICENSE_KEY ]]; then
until [[ -n $LICENSE_KEY ]]; do
echo_with_color red "\nThe field can't be empty!"
read -r LICENSE_KEY
size=${#LICENSE_KEY}
done
elif ((size != 32)); then
until ((size == 32)); do
echo_with_color red "\nInvalid License Key provided"
echo_with_color magenta "\nPlease provide your license key:"
read -r LICENSE_KEY
size=${#LICENSE_KEY}
done
fi
###Change license key in .env file
sed -i "s/$CURRENT_KEY/DF_LICENSE_KEY=$LICENSE_KEY/" .env
else
echo_with_color red "\nSkipping..." #Skip if key found in .env file and no need to update
fi
else
echo_with_color magenta "\nPlease provide your license key:" #Install key if not found existing key.
read -r LICENSE_KEY
size=${#LICENSE_KEY}
if [[ -z $LICENSE_KEY ]]; then
until [[ -n $LICENSE_KEY ]]; do
echo_with_color red "The field can't be empty!"
read -r LICENSE_KEY
size=${#LICENSE_KEY}
done
elif ((size != 32)); then
until ((size == 32)); do
echo_with_color red "\nInvalid License Key provided"
echo_with_color magenta "\nPlease provide your license key:"
read -r LICENSE_KEY
size=${#LICENSE_KEY}
done
fi
###Add license key to .env file
echo -e "\nDF_LICENSE_KEY=${LICENSE_KEY}" >>.env
fi
fi
fi
if [[ $APACHE == TRUE ]]; then
chmod -R 2775 /opt/dreamfactory/
if [[ $CURRENT_KERNEL == "debian" || $CURRENT_KERNEL == "ubuntu" ]]; then
chown -R "www-data:$CURRENT_USER" /opt/dreamfactory/
else
chown -R "apache:$CURRENT_USER" /opt/dreamfactory/
fi
fi
### Uncomment nodejs in .env file
grep -E "^#DF_NODEJS_PATH" .env >/dev/null
if (($? == 0)); then
sed -i "s,\#DF_NODEJS_PATH=/usr/local/bin/node,DF_NODEJS_PATH=$NODE_PATH," .env
fi
### Ubuntu 20, centos8 and fedora uses the python2 command instead of python. So we need to update our .env
if [[ ! $CURRENT_KERNEL == "debian" ]]; then
sed -i "s,\#DF_PYTHON_PATH=/usr/local/bin/python,DF_PYTHON_PATH=$(which python2)," .env
fi
sudo -u "$CURRENT_USER" bash -c "php artisan cache:clear -q"
#Add rules if SELinux enabled, redhat systems only
if [[ $CURRENT_KERNEL == "centos" || $CURRENT_KERNEL == "rhel" || $CURRENT_KERNEL == "fedora" ]]; then
sestatus | grep SELinux | grep enabled >/dev/null
if (($? == 0)); then
setsebool -P httpd_can_network_connect_db 1
chcon -t httpd_sys_content_t storage -R
chcon -t httpd_sys_content_t bootstrap/cache/ -R
chcon -t httpd_sys_rw_content_t storage -R
chcon -t httpd_sys_rw_content_t bootstrap/cache/ -R
fi
fi
### Add Permissions and Ownerships
if [[ ! $APACHE == TRUE ]]; then
echo_with_color blue "Adding Permissions and Ownerships...\n"
echo_with_color blue " Creating user 'dreamfactory'"
useradd dreamfactory
if [[ $CURRENT_KERNEL == "ubuntu" || $CURRENT_KERNEL == "debian" ]]; then
PHP_VERSION_NUMBER=$(php --version 2>/dev/null | head -n 1 | cut -d " " -f 2 | cut -c 1,2,3)
fi
echo_with_color blue " Updating php-fpm user, group, and owner"
if [[ $CURRENT_KERNEL == "ubuntu" || $CURRENT_KERNEL == "debian" ]]; then
sed -i "s,www-data,dreamfactory," /etc/php/$PHP_VERSION_NUMBER/fpm/pool.d/www.conf
else
# centos, fedora
sed -i "s,;listen.owner = nobody,listen.owner = dreamfactory," /etc/php-fpm.d/www.conf
sed -i "s,;listen.group = nobody,listen.group = dreamfactory," /etc/php-fpm.d/www.conf
sed -i "s,;listen.mode = 0660,listen.mode = 0660\nuser = dreamfactory\ngroup = dreamfactory," /etc/php-fpm.d/www.conf
sed -i "s,listen.acl_users,;listen.acl_users," /etc/php-fpm.d/www.conf
fi
if (($? == 0)); then
if [[ $CURRENT_KERNEL == "ubuntu" || $CURRENT_KERNEL == "debian" ]]; then
usermod -a -G dreamfactory www-data
else
# centos, fedora
usermod -a -G dreamfactory nginx
fi
echo_with_color blue " Changing ownership and permission of /opt/dreamfactory to 'dreamfactory' user"
chown -R dreamfactory:dreamfactory /opt/dreamfactory
chmod -R u=rwX,g=rX,o= /opt/dreamfactory
echo_with_color blue " Restarting nginx and php-fpm"
service nginx restart
if (($? >= 1)); then
echo_with_color red "nginx failed to restart\n"
exit 1
else
if [[ $CURRENT_KERNEL == "ubuntu" || $CURRENT_KERNEL == "debian" ]]; then
service php$PHP_VERSION_NUMBER-fpm restart
else
# centos, fedora
service php-fpm restart
fi
if (($? >= 1)); then
echo_with_color red "php-fpm failed to restart\n"
exit 1
fi
echo_with_color green "Done! Ownership and Permissions changed to user 'dreamfactory'\n"
fi
else
echo_with_color red "Unable to update php-fpm www.conf file. Please check the file location of www.conf"
fi
fi
echo_with_color green "Installation finished! DreamFactory has been installed in /opt/dreamfactory "
if [[ $DEBUG == TRUE ]]; then
echo_with_color red "\nThe log file saved in: /tmp/dreamfactory_installer.log "
fi
### Summary table
if [[ $MYSQL_INSTALLED == TRUE ]]; then
echo -e "\n "
echo_with_color magenta "******************************"
echo -e " DB for system table: mysql "
echo -e " DB host: 127.0.0.1 "
echo -e " DB port: 3306 "
if [[ ! $DB_FOUND == TRUE ]]; then
echo -e " DB root password: $DB_PASS"
fi
echo -e " DB name: $DF_SYSTEM_DB"
echo -e " DB user: $DF_SYSTEM_DB_USER"
echo -e " DB password: $DF_SYSTEM_DB_PASSWORD"
echo -e "******************************\n"
fi
exit 0
|
dreamfactorysoftware/dreamfactory
|
installers/source/setup.sh
|
Shell
|
apache-2.0
| 32,708 |
#!/bin/bash
# Define a bunch of functions and set a bunch of variables
TEST_DIR=$(readlink -f `dirname "${BASH_SOURCE[0]}"` | grep -o '.*/oshinko-s2i/test/e2e')
source $TEST_DIR/common
SCRIPT_DIR=$(readlink -f `dirname "${BASH_SOURCE[0]}"`)
source $SCRIPT_DIR/../../builddc
SPARKLYR_DIR=$(readlink -f `dirname "${BASH_SOURCE[0]}"` | grep -o '.*/oshinko-s2i')/templates
RESOURCE_DIR=$TEST_DIR/resources
cp $SPARKLYR_DIR/sparklyrbuilddc.json $RESOURCE_DIR/sparklyrbuilddc.json
fix_template $RESOURCE_DIR/sparklyrbuilddc.json radanalyticsio/radanalytics-r-spark $S2I_TEST_IMAGE_SPARKLYR
set_template $RESOURCE_DIR/sparklyrbuilddc.json
set_git_uri https://github.com/tmckayus/r-openshift-ex.git
set_worker_count $S2I_TEST_WORKERS
set_fixed_app_name sparklyr-build
os::test::junit::declare_suite_start "$MY_SCRIPT"
echo "++ check_image"
check_image $S2I_TEST_IMAGE_SPARKLYR
# Do this first after check_image, becaue it involves deleting all the existing buildconfigs
echo "++ test_no_app_name"
test_no_app_name
echo "++ test_app_lang"
test_app_lang r
echo "++ run_complete"
test_run_complete
echo "++ test_exit"
test_exit
echo "++ test_cluster_name"
test_cluster_name
echo "++ test_del_cluster"
test_del_cluster
echo "++ test_app_args"
test_app_args
echo "++ test_pod_info"
test_podinfo
echo "++ test_named_config"
test_named_config
echo "++ test_driver_config"
test_driver_config
echo "++ test_spark_options"
test_spark_options
echo "++ test_driver_host"
test_driver_host
echo "++ test_no_source_or_image"
test_no_source_or_image
echo "++ test_app_file app.R"
test_app_file app.R
echo "++ test_git_ref"
test_git_ref $GIT_URI 4acf0e83a8817ff4bc9922584d9cec689748305f
os::test::junit::declare_suite_end
|
rimolive/oshinko-s2i
|
test/e2e/templates/sparklyr/builddc/sparklyrbuilddc.sh
|
Shell
|
apache-2.0
| 1,720 |
#!/bin/bash
# Copyright 2019 The gVisor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xeo pipefail
declare -r CONTAINERD_VERSION=${1:-1.3.0}
declare -r CONTAINERD_MAJOR="$(echo ${CONTAINERD_VERSION} | awk -F '.' '{ print $1; }')"
declare -r CONTAINERD_MINOR="$(echo ${CONTAINERD_VERSION} | awk -F '.' '{ print $2; }')"
declare -r CRITOOLS_VERSION=${CRITOOLS_VERSION:-1.18.0}
if [[ "${CONTAINERD_MAJOR}" -eq 1 ]] && [[ "${CONTAINERD_MINOR}" -le 4 ]]; then
# We're running Go 1.16, but using pre-module containerd and cri-tools.
export GO111MODULE=off
fi
# containerd < 1.4 doesn't work with cgroupv2 setup, so we check for that here
if [[ "$(stat -f -c %T /sys/fs/cgroup 2>/dev/null)" == "cgroup2fs" && "${CONTAINERD_MAJOR}" -eq 1 && "${CONTAINERD_MINOR}" -lt 4 ]]; then
echo "containerd < 1.4 does not work with cgroup2"
exit 1
fi
# Helper for Go packages below.
install_helper() {
declare -r PACKAGE="${1}"
declare -r TAG="${2}"
# Clone the repository.
mkdir -p "${GOPATH}"/src/$(dirname "${PACKAGE}") && \
git clone https://"${PACKAGE}" "${GOPATH}"/src/"${PACKAGE}"
# Checkout and build the repository.
(cd "${GOPATH}"/src/"${PACKAGE}" && \
git checkout "${TAG}" && \
make && \
make install)
}
# Figure out were btrfs headers are.
#
# Ubuntu 16.04 has only btrfs-tools, while 18.04 has a transitional package,
# and later versions no longer have the transitional package.
source /etc/os-release
declare BTRFS_DEV
if [[ "${VERSION_ID%.*}" -le "18" ]]; then
BTRFS_DEV="btrfs-tools"
else
BTRFS_DEV="libbtrfs-dev"
fi
readonly BTRFS_DEV
# Install dependencies for the crictl tests.
export DEBIAN_FRONTEND=noninteractive
while true; do
if (apt-get update && apt-get install -y \
"${BTRFS_DEV}" \
libseccomp-dev); then
break
fi
result=$?
if [[ $result -ne 100 ]]; then
exit $result
fi
done
# Install containerd & cri-tools.
declare -rx GOPATH=$(mktemp -d --tmpdir gopathXXXXX)
install_helper github.com/containerd/containerd "v${CONTAINERD_VERSION}"
install_helper github.com/kubernetes-sigs/cri-tools "v${CRITOOLS_VERSION}"
# Configure containerd-shim.
declare -r shim_config_path=/etc/containerd/runsc/config.toml
mkdir -p $(dirname ${shim_config_path})
cat > ${shim_config_path} <<-EOF
log_path = "/tmp/shim-logs/"
log_level = "debug"
[runsc_config]
debug = "true"
debug-log = "/tmp/runsc-logs/"
strace = "true"
file-access = "shared"
EOF
# Configure CNI.
(cd "${GOPATH}" && src/github.com/containerd/containerd/script/setup/install-cni)
cat <<EOF | sudo tee /etc/cni/net.d/10-bridge.conf
{
"cniVersion": "0.3.1",
"name": "bridge",
"type": "bridge",
"bridge": "cnio0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"ranges": [
[{"subnet": "10.200.0.0/24"}]
],
"routes": [{"dst": "0.0.0.0/0"}]
}
}
EOF
cat <<EOF | sudo tee /etc/cni/net.d/99-loopback.conf
{
"cniVersion": "0.3.1",
"type": "loopback"
}
EOF
# Configure crictl.
cat <<EOF | sudo tee /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
EOF
# Cleanup.
rm -rf "${GOPATH}"
|
google/gvisor
|
tools/install_containerd.sh
|
Shell
|
apache-2.0
| 3,647 |
#!/bin/bash -e
# Spin indefinitely until our mgt net and subnet show up in neutron. This will
# be timed out by deployment_tasks if it does not succeed.
source /root/openrc
source $(dirname $0)/functions
if ! which neutron; then
sudo apt-get -y install python-neutronclient
fi
mgt_name=${1:-"astara_mgmt"}
mgt_prefix=${2:-"fdca:3ba5:a17a:acda::/64"}
while [[ -z "$net_id" ]]; do
net_id="$(neutron net-list | grep " $mgt_name " | awk '{ print $2 }')"
if [[ -z "$net_id" ]]; then
echo "Still waiting on mgt net"
sleep 1
else
echo "Found astara mgt net: $net_id"
break
fi
done
while [[ -z "$subnet_id" ]]; do
subnet_id="$(neutron subnet-list | grep " $mgt_prefix" | awk '{ print $2 }')"
if [[ -z "$subnet_id" ]]; then
echo "Still waiting on mgt subnet"
sleep 1
else
echo "Found astara mgt subnet: $subnet_id"
break
fi
done
iniset /etc/astara/orchestrator.ini DEFAULT management_network_id $net_id
iniset /etc/astara/orchestrator.ini DEFAULT management_subnet_id $subnet_id
|
akanda/fuel-plugin-astara
|
deployment_scripts/scripts/set_neutron_networks_config.sh
|
Shell
|
apache-2.0
| 1,005 |
#! /bin/sh
set -e
export HERE=$(dirname $(readlink -f $0))
. ${HERE}/settings.sh
CONTINENTS=
CONTINENTS="${CONTINENTS} africa"
CONTINENTS="${CONTINENTS} antarctica"
CONTINENTS="${CONTINENTS} asia"
CONTINENTS="${CONTINENTS} australia-oceania"
CONTINENTS="${CONTINENTS} central-america"
CONTINENTS="${CONTINENTS} europe"
CONTINENTS="${CONTINENTS} north-america"
CONTINENTS="${CONTINENTS} south-america"
extract_data() {
continent=$1
type=$2
religion=$3
# if [ ${type} == "all" ]
# then
# tag="*"
# else
# tag=${type}
# fi
EXTRACT=${TMPDIR}/${continent}-religion-${religion}-${type}.osm
${HOME}/bin/osmconvert ${WORLD_FILE} -B=${POLY_FILE} -o=${EXTRACT}
mkdir -p ${STORAGE}/by-continent/${continent}/${MONTH}/${DAY}
mkdir -p ${WEBDATA}/by-continent
cp -f \
${EXTRACT} \
${STORAGE}/by-continent/${continent}/${MONTH}/${DAY}/${continent}-religion-${religion}-${type}.osm
cp -f \
${EXTRACT} \
${WEBDATA}/by-continent/${continent}-all-religion-${religion}-${type}.osm
# Keep a copy without the continent name prefix too
cp -f \
${EXTRACT} \
${WEBDATA}/by-continent/${continent}-all-religion-${religion}-${type}.osm
}
# HERE=${PWD}/$(dirname $0)
HERE=$(dirname $0)
mkdir -p ${TMPDIR}
cd ${TMPDIR}
# Assume the world file is already downloaded. Or less than 4 hours old
for type in node way relation
do
WORLD_FILE=${TMPDIR}/world-religion-muslim-${type}.osm
# wget "http://www.overpass-api.de/api/interpreter" \
# --post-file=${HERE}/data/query-${type}-religion-muslim.xml \
# -O ${WORLD_FILE} \
# > ${WORLD_FILE}.out 2> ${WORLD_FILE}.err
MONTH=$(date +%Y%m --reference ${WORLD_FILE})
DAY=$(date +%Y%m%d --reference ${WORLD_FILE})
# mkdir -p ${STORAGE}/world/${MONTH}/${DAY}
# cp ${WORLD_FILE} ${STORAGE}/world/${MONTH}/${DAY}
done
WORLD_FILE=${TMPDIR}/world-religion-muslim-node.osm
for continent in ${CONTINENTS}
do
POLY_URL=http://download.geofabrik.de/${continent}.poly
POLY_FILE=${TMPDIR}/${continent}.poly
wget "${POLY_URL}" -O ${POLY_FILE} \
> ${POLY_FILE}.out 2> ${POLY_FILE}.err
MONTH=$(date +%Y%m --reference ${POLY_FILE})
DAY=$(date +%Y%m%d --reference ${POLY_FILE})
mkdir -p ${STORAGE}/by-continent/${continent}/${MONTH}/${DAY}
cp ${POLY_FILE} ${STORAGE}/by-continent/${continent}/${MONTH}/${DAY}
for type in node # way relation
do
WORLD_FILE=${TMPDIR}/world-religion-muslim-${type}.osm
if [ -a ${WORLD_FILE} ]
then
if [ -s ${WORLD_FILE} ]
then
:
extract_data ${continent} ${type} muslim
fi
fi
done
find ${STORAGE}/by-continent/${continent} -type f -a -mtime +2 | xargs --no-run-if-empty rm
find ${STORAGE}/by-continent/${continent} -type d -a -empty | xargs --no-run-if-empty rmdir
done
# FINI
|
osmmosques/osm-mosques-scripts
|
antique/extract-places-osm-continents.sh
|
Shell
|
apache-2.0
| 2,946 |
#
# Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# @test
# @bug 4990825 7092186
# @run shell/timeout=90 jstatdExternalRegistry.sh
# @summary Test functionality of 'jstatd -p<port>&' with an external RMI registry
. ${TESTSRC-.}/../../jvmstat/testlibrary/utils.sh
setup
verify_os
cleanup() {
kill_proc ${RMIREGISTRY_PID}
kill_proc ${JSTATD_PID}
}
trap 'cleanup' 0 HUP INT QUIT TERM
RMIREGISTRY="${TESTJAVA}/bin/rmiregistry"
JSTATD="${TESTJAVA}/bin/jstatd"
JPS="${TESTJAVA}/bin/jps"
JSTAT="${TESTJAVA}/bin/jstat"
HOSTNAME=`uname -n`
PORT=2099
RMIREGISTRY_OUT="rmiregistry_$$.out"
JSTATD_OUT="jstatd_$$.out"
${RMIREGISTRY} ${PORT} > ${RMIREGISTRY_OUT} 2>&1 &
RMIREGISTRY_PID=$!
echo "rmiregistry started on port ${PORT} as pid ${RMIREGISTRY_PID}"
sleep 3
${JSTATD} -J-Djava.security.policy=${TESTSRC}/all.policy -p ${PORT} > ${JSTATD_OUT} 2>&1 &
JSTATD_PID=$!
echo "jstatd started as pid ${JSTATD_PID}"
sleep 3
${JPS} ${HOSTNAME}:${PORT} 2>&1 | awk -f ${TESTSRC}/jpsOutput1.awk
if [ $? -ne 0 ]
then
echo "Output of jps differs from expected output. Failed."
exit 1
fi
# get the process id for the target app (jstatd). note, don't rely
# on JSTATD_PID as mks interposes a shell when starting a process in
# the background
TARGET_PID=`${JPS} | grep "Jstatd" | cut -d" " -f1`
${JSTAT} -gcutil ${TARGET_PID}@${HOSTNAME}:${PORT} 250 5 2>&1 | awk -f ${TESTSRC}/jstatGcutilOutput1.awk
RC=$?
if [ ${RC} -ne 0 ]
then
echo "jstat output differs from expected output"
fi
if [ -s ${JSTATD_OUT} ]
then
echo "jstatd generated unexpected output: see ${JSTATD_OUT}"
RC=1
fi
if [ -s ${RMIREGISTRY_OUT} ]
then
echo "rmiregistry generated unexpected output: see ${RMIREGISTRY_OUT}"
RC=1
fi
exit ${RC}
|
andreagenso/java2scala
|
test/J2s/java/openjdk-6-src-b27/jdk/test/sun/tools/jstatd/jstatdExternalRegistry.sh
|
Shell
|
apache-2.0
| 2,715 |
#!/bin/sh
#
# More details at
# http://www.osehra.org/wiki/importing-osehra-code-base-gtm
#
# This script is no longer needed, given that CMake perform this importing
# while it is running a Dashboard build.
#
export VistADir=$HOME/VistA-Instance
export gtm_dist=/usr/lib/fis-gtm/V5.5-000_x86_64
export gtmprofilefile=$gtm_dist/gtmprofile
source $gtmprofilefile
export gtmgbldir=$VistADir/database
export gtmroutines="$VistADir/o($VistADir/r) $gtm_dist/libgtmutil.so"
$gtm_dist/gtm
#
# DO ^%RI
#
# Formfeed delimited <No>? No
# Input device: <terminal>: $HOME/OSEHRA/VistA-FOIA/routines.ro
# Output directory : $HOME/VistA/r/
#
#
# DO LIST^ZGI("$HOME/OSEHRA/VistA-FOIA/globals.lst","$HOME/OSEHRA/VistA-FOIA/")
#
# D ^ZTMGRSET
#
# ZTMGRSET Version 8.0 Patch level **34,36,69,94,121,127,136,191,275,355,446**
# HELLO! I exist to assist you in correctly initializing the current account.
# Which MUMPS system should I install?
#
# 1 = VAX DSM(V6), VAX DSM(V7)
# 2 = MSM-PC/PLUS, MSM for NT or UNIX
# 3 = Cache (VMS, NT, Linux), OpenM-NT
# 4 = Datatree, DTM-PC, DT-MAX
# 5 =
# 6 =
# 7 = GT.M (VMS)
# 8 = GT.M (Unix)
# System: 8
#
#
# NAME OF MANAGER'S UCI,VOLUME SET: VAH,ROU// PLA,PLA
# The value of PRODUCTION will be used in the GETENV api.
# PRODUCTION (SIGN-ON) UCI,VOLUME SET: VAH,ROU// PLA,PLA
# The VOLUME name must match the one in PRODUCTION.
# NAME OF VOLUME SET: PLA//PLA
# The temp directory for the system: '/tmp/'//
# Want to rename the FileMan routines: No//Y
#
|
luisibanez/VistA-installation-scripts
|
Scripts/importingVistAFOIAintoGTM.sh
|
Shell
|
apache-2.0
| 1,494 |
#!/bin/sh
scriptPos=${0%/*}
if [ -z "$JAVACMD" ] ; then
if [ -n "$JAVA_HOME" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
JAVACMD="$JAVA_HOME/jre/sh/java"
elif [ -x "$JAVA_HOME/jre/bin/java" ] ; then
JAVACMD="$JAVA_HOME/jre/bin/java"
else
JAVACMD="$JAVA_HOME/bin/java"
fi
else
JAVACMD=`which java 2> /dev/null `
if [ -z "$JAVACMD" ] ; then
JAVACMD=java
fi
fi
fi
if [ ! -x "$JAVACMD" ] ; then
echo "Error: JAVA_HOME is not defined correctly."
echo " We cannot execute $JAVACMD"
exit 1
fi
args=
for arg in "$@";
do
args="$args \"$arg\""
done
exec_command="exec \"$JAVACMD\" -cp \"$scriptPos/lib/*\" \"-Dlogback.configurationFile=$W4A_IMP_HOME/logback.xml\" de.oth.keycloak.InitKeycloakServer"
eval $exec_command $args
|
OkieOth/keycloakTests
|
InitKeycloakServer/src/main/resources/bin/InitKeycloakServer.sh
|
Shell
|
apache-2.0
| 863 |
#!/bin/bash -e
usage() {
echo "Usage: $0 %number_of_coreos_nodes%"
}
if [ "$1" == "" ]; then
echo "Cluster size is empty"
usage
exit 1
fi
if ! [[ $1 =~ ^[0-9]+$ ]]; then
echo "'$1' is not a number"
usage
exit 1
fi
LIBVIRT_PATH=/var/lib/libvirt/images/coreos
USER_DATA_TEMPLATE=$LIBVIRT_PATH/user_data
ETCD_DISCOVERY=$(curl -s "https://discovery.etcd.io/new?size=$1")
CHANNEL=stable
RELEASE=current
RAM=1024
CPUs=1
IMG_NAME="coreos_${CHANNEL}_${RELEASE}_qemu_image.img"
if [ ! -d $LIBVIRT_PATH ]; then
mkdir -p $LIBVIRT_PATH || (echo "Can not create $LIBVIRT_PATH directory" && exit 1)
fi
if [ ! -f $USER_DATA_TEMPLATE ]; then
echo "$USER_DATA_TEMPLATE template doesn't exist"
exit 1
fi
for SEQ in $(seq 1 $1); do
COREOS_HOSTNAME="coreos$SEQ"
if [ ! -d $LIBVIRT_PATH/$COREOS_HOSTNAME/openstack/latest ]; then
mkdir -p $LIBVIRT_PATH/$COREOS_HOSTNAME/openstack/latest || (echo "Can not create $LIBVIRT_PATH/$COREOS_HOSTNAME/openstack/latest directory" && exit 1)
fi
if [ ! -f $LIBVIRT_PATH/$IMG_NAME ]; then
wget http://${CHANNEL}.release.core-os.net/amd64-usr/${RELEASE}/coreos_production_qemu_image.img.bz2 -O - | bzcat > $LIBVIRT_PATH/$IMG_NAME || (rm -f $LIBVIRT_PATH/$IMG_NAME && echo "Failed to download image" && exit 1)
fi
if [ ! -f $LIBVIRT_PATH/$COREOS_HOSTNAME.qcow2 ]; then
qemu-img create -f qcow2 -b $LIBVIRT_PATH/$IMG_NAME $LIBVIRT_PATH/$COREOS_HOSTNAME.qcow2
fi
sed "s#%HOSTNAME%#$COREOS_HOSTNAME#g;s#%DISCOVERY%#$ETCD_DISCOVERY#g" $USER_DATA_TEMPLATE > $LIBVIRT_PATH/$COREOS_HOSTNAME/openstack/latest/user_data
virt-install --connect qemu:///system --import --name $COREOS_HOSTNAME --ram $RAM --vcpus $CPUs --os-type=linux --os-variant=virtio26 --disk path=$LIBVIRT_PATH/$COREOS_HOSTNAME.qcow2,format=qcow2,bus=virtio --filesystem $LIBVIRT_PATH/$COREOS_HOSTNAME/,config-2,type=mount,mode=squash --vnc --noautoconsole
done
|
mjg59/docs
|
os/deploy_coreos_libvirt.sh
|
Shell
|
apache-2.0
| 2,048 |
# creates the dev cluster
gcloud container clusters create "dev-cluster" \
--machine-type "g1-small" \
--image-type "GCI" \
--disk-size "10" \
--scopes "https://www.googleapis.com/auth/compute","https://www.googleapis.com/auth/devstorage.read_write","https://www.googleapis.com/auth/datastore","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append" \
--num-nodes "1" \
--network "default" \
--enable-cloud-logging \
--no-enable-cloud-monitoring
# config kubectl to point to the cluster
gcloud container clusters get-credentials dev-cluster
|
cilliemalan/naked-node
|
scripts/provision-cluster.sh
|
Shell
|
apache-2.0
| 735 |
#!/bin/bash
echo community default is 16.0
function query() {
openstack hypervisor show -c vcpus -c vcpus_used -f json $HYPERVISOR 2>/dev/null | tee /tmp/out
echo
echo "scale=2; $(cat /tmp/out | jq -r '.vcpus_used') / $(cat /tmp/out | jq -r '.vcpus')" | bc
echo
}
echo 1
HYPERVISOR=1
query
echo 2
HYPERVISOR=2
query
echo 3
HYPERVISOR=3
query
|
jinsenglin/openstack
|
sa/cpu_allocation_ratio_by_hypervisor.sh
|
Shell
|
apache-2.0
| 349 |
set -e
BUNDLE=${BUNDLE-"bundle exec"}
MVN=${MVN-"mvn"}
if [ 'GNU' != "$(tar --help | grep GNU | head -1 | awk '{print $1}')" ]; then
echo 'Unable to release: make sure to use GNU tar'
exit 1
fi
if $(ruby -e'require "java"'); then
# Good
echo 'Detected JRuby'
else
echo 'Unable to release: make sure to use JRuby'
exit 1
fi
VERSION=`grep -E '<version>([0-9]+\.[0-9]+\.[0-9]+)</version>' pom.xml | sed 's/[\t \n]*<version>\(.*\)<\/version>[\t \n]*/\1/'`
if [[ -z "$NO_RELEASE" && "$VERSION" != "$(cat $PWD/VERSION)" ]]; then
echo 'Unable to release: make sure the versions in pom.xml and VERSION match'
exit 1
fi
echo 'Cleaning up'
$BUNDLE rake killbill:clean
echo 'Building gem'
$BUNDLE rake build
if [[ -z "$NO_RELEASE" ]]; then
echo 'Pushing the gem to Rubygems'
$BUNDLE rake release
fi
echo 'Building artifact'
$BUNDLE rake killbill:package
ARTIFACT="$PWD/pkg/killbill-braintree_blue-$VERSION.tar.gz"
echo "Pushing $ARTIFACT to Maven Central"
if [[ -z "$NO_RELEASE" ]]; then
GOAL=gpg:sign-and-deploy-file
REPOSITORY_ID=ossrh-releases
URL=https://oss.sonatype.org/service/local/staging/deploy/maven2/
else
GOAL=deploy:deploy-file
REPOSITORY_ID=sonatype-nexus-snapshots
URL=https://oss.sonatype.org/content/repositories/snapshots/
VERSION="$VERSION-SNAPSHOT"
fi
$MVN $GOAL \
-DgroupId=org.kill-bill.billing.plugin.ruby \
-DartifactId=braintree_blue-plugin \
-Dversion=$VERSION \
-Dpackaging=tar.gz \
-DrepositoryId=$REPOSITORY_ID \
-Durl=$URL \
-Dfile=$ARTIFACT \
-DpomFile=pom.xml
|
killbill/killbill-braintree-blue-plugin
|
release.sh
|
Shell
|
apache-2.0
| 1,567 |
#!/bin/bash -x
set -eu -o pipefail
CF_RELEASE=${CF_RELEASE:-false}
AWS_KEY_NAME=${AWS_KEY_NAME:-}
if [[ $CF_RELEASE == "true" ]]; then
git checkout v$OPENVIDU_PRO_VERSION
fi
export AWS_DEFAULT_REGION=eu-west-1
DATESTAMP=$(date +%s)
TEMPJSON=$(mktemp -t cloudformation-XXX --suffix .json)
# Get Latest Ubuntu AMI id from specified region
# Parameters
# $1 Aws region
getUbuntuAmiId() {
local AMI_ID=$(
aws --region ${1} ec2 describe-images \
--filters "Name=name,Values=*ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*" \
--query "sort_by(Images, &CreationDate)" \
| jq -r 'del(.[] | select(.ImageOwnerAlias != null)) | .[-1].ImageId'
)
echo $AMI_ID
}
AMIEUWEST1=$(getUbuntuAmiId 'eu-west-1')
AMIUSEAST1=$(getUbuntuAmiId 'us-east-1')
# Copy templates to feed
cp cfn-mkt-kms-ami.yaml.template cfn-mkt-kms-ami.yaml
cp cfn-mkt-ov-ami.yaml.template cfn-mkt-ov-ami.yaml
## Setting Openvidu Version and Ubuntu Latest AMIs
if [[ ! -z ${AWS_KEY_NAME} ]]; then
sed -i "s/ KeyName: AWS_KEY_NAME/ KeyName: ${AWS_KEY_NAME}/g" cfn-mkt-ov-ami.yaml
sed -i "s/ KeyName: AWS_KEY_NAME/ KeyName: ${AWS_KEY_NAME}/g" cfn-mkt-kms-ami.yaml
else
sed -i '/ KeyName: AWS_KEY_NAME/d' cfn-mkt-ov-ami.yaml
sed -i '/ KeyName: AWS_KEY_NAME/d' cfn-mkt-kms-ami.yaml
fi
sed -i "s/AWS_KEY_NAME/${AWS_KEY_NAME}/g" cfn-mkt-ov-ami.yaml
sed -i "s/USE_MASTER_DOCKER_IMAGE/${USE_MASTER_DOCKER_IMAGE}/g" cfn-mkt-ov-ami.yaml
sed -i "s/OPENVIDU_VERSION/${OPENVIDU_PRO_VERSION}/g" cfn-mkt-ov-ami.yaml
sed -i "s/AWS_DOCKER_TAG/${AWS_DOCKER_TAG}/g" cfn-mkt-ov-ami.yaml
sed -i "s/OPENVIDU_RECORDING_DOCKER_TAG/${OPENVIDU_RECORDING_DOCKER_TAG}/g" cfn-mkt-ov-ami.yaml
sed -i "s/AMIEUWEST1/${AMIEUWEST1}/g" cfn-mkt-ov-ami.yaml
sed -i "s/AMIUSEAST1/${AMIUSEAST1}/g" cfn-mkt-ov-ami.yaml
sed -i "s/AWS_KEY_NAME/${AWS_KEY_NAME}/g" cfn-mkt-kms-ami.yaml
sed -i "s/USE_MASTER_DOCKER_IMAGE/${USE_MASTER_DOCKER_IMAGE}/g" cfn-mkt-kms-ami.yaml
sed -i "s/OPENVIDU_VERSION/${OPENVIDU_PRO_VERSION}/g" cfn-mkt-kms-ami.yaml
sed -i "s/OPENVIDU_RECORDING_DOCKER_TAG/${OPENVIDU_RECORDING_DOCKER_TAG}/g" cfn-mkt-kms-ami.yaml
sed -i "s/AMIEUWEST1/${AMIEUWEST1}/g" cfn-mkt-kms-ami.yaml
sed -i "s/AMIUSEAST1/${AMIUSEAST1}/g" cfn-mkt-kms-ami.yaml
## KMS AMI
# Copy template to S3
aws s3 cp cfn-mkt-kms-ami.yaml s3://aws.openvidu.io
TEMPLATE_URL=https://s3-eu-west-1.amazonaws.com/aws.openvidu.io/cfn-mkt-kms-ami.yaml
# Update installation script
if [[ ${UPDATE_INSTALLATION_SCRIPT} == "true" ]]; then
# Avoid overriding existing versions
# Only master and non existing versions can be overriden
if [[ ${OPENVIDU_PRO_VERSION} != "master" ]]; then
INSTALL_SCRIPT_EXISTS=true
aws s3api head-object --bucket aws.openvidu.io --key install_media_node_$OPENVIDU_PRO_VERSION.sh || INSTALL_SCRIPT_EXISTS=false
if [[ ${INSTALL_SCRIPT_EXISTS} == "true" ]]; then
echo "Aborting updating s3://aws.openvidu.io/install_media_node_${OPENVIDU_PRO_VERSION}.sh. File actually exists."
exit 1
fi
fi
aws s3 cp ../docker-compose/media-node/install_media_node.sh s3://aws.openvidu.io/install_media_node_$OPENVIDU_PRO_VERSION.sh --acl public-read
fi
aws cloudformation create-stack \
--stack-name kms-${DATESTAMP} \
--template-url ${TEMPLATE_URL} \
"$(if [ "$NIGHTLY" == "false" ]; then echo '--disable-rollback'; fi)"
aws cloudformation wait stack-create-complete --stack-name kms-${DATESTAMP}
echo "Getting instance ID"
INSTANCE_ID=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=kms-${DATESTAMP}" | jq -r ' .Reservations[] | .Instances[] | .InstanceId')
echo "Stopping the instance"
aws ec2 stop-instances --instance-ids ${INSTANCE_ID}
echo "wait for the instance to stop"
aws ec2 wait instance-stopped --instance-ids ${INSTANCE_ID}
echo "Creating AMI"
KMS_RAW_AMI_ID=$(aws ec2 create-image --instance-id ${INSTANCE_ID} --name KMS-ov-${OPENVIDU_PRO_VERSION}-${DATESTAMP} --description "Kurento Media Server" --output text)
echo "Cleaning up"
aws cloudformation delete-stack --stack-name kms-${DATESTAMP}
## OpenVidu AMI
# Copy template to S3
aws s3 cp cfn-mkt-ov-ami.yaml s3://aws.openvidu.io
TEMPLATE_URL=https://s3-eu-west-1.amazonaws.com/aws.openvidu.io/cfn-mkt-ov-ami.yaml
# Update installation script
if [[ ${UPDATE_INSTALLATION_SCRIPT} == "true" ]]; then
# Avoid overriding existing versions
# Only master and non existing versions can be overriden
if [[ ${OPENVIDU_PRO_VERSION} != "master" ]]; then
INSTALL_SCRIPT_EXISTS=true
aws s3api head-object --bucket aws.openvidu.io --key install_openvidu_pro_$OPENVIDU_PRO_VERSION.sh || INSTALL_SCRIPT_EXISTS=false
if [[ ${INSTALL_SCRIPT_EXISTS} == "true" ]]; then
echo "Aborting updating s3://aws.openvidu.io/install_openvidu_pro_${OPENVIDU_PRO_VERSION}.sh. File actually exists."
exit 1
fi
fi
aws s3 cp ../docker-compose/openvidu-server-pro/install_openvidu_pro.sh s3://aws.openvidu.io/install_openvidu_pro_$OPENVIDU_PRO_VERSION.sh --acl public-read
fi
aws cloudformation create-stack \
--stack-name openvidu-${DATESTAMP} \
--template-url ${TEMPLATE_URL} \
"$(if [ "$NIGHTLY" == "false" ]; then echo '--disable-rollback'; fi)"
aws cloudformation wait stack-create-complete --stack-name openvidu-${DATESTAMP}
echo "Getting instance ID"
INSTANCE_ID=$(aws ec2 describe-instances --filters "Name=tag:Name,Values=openvidu-${DATESTAMP}" | jq -r ' .Reservations[] | .Instances[] | .InstanceId')
echo "Stopping the instance"
aws ec2 stop-instances --instance-ids ${INSTANCE_ID}
echo "wait for the instance to stop"
aws ec2 wait instance-stopped --instance-ids ${INSTANCE_ID}
echo "Creating AMI"
OV_RAW_AMI_ID=$(aws ec2 create-image --instance-id ${INSTANCE_ID} --name OpenViduServerPro-${OPENVIDU_PRO_VERSION}-${DATESTAMP} --description "Openvidu Server Pro" --output text)
echo "Cleaning up"
aws cloudformation delete-stack --stack-name openvidu-${DATESTAMP}
# Wait for the instance
# Unfortunately, aws cli does not have a way to increase timeout
WAIT_RETRIES=0
WAIT_MAX_RETRIES=3
until [ "${WAIT_RETRIES}" -ge "${WAIT_MAX_RETRIES}" ]
do
aws ec2 wait image-available --image-ids ${OV_RAW_AMI_ID} && break
WAIT_RETRIES=$((WAIT_RETRIES+1))
sleep 5
done
# Updating the template
sed "s/OV_AMI_ID/${OV_RAW_AMI_ID}/" cfn-openvidu-server-pro-no-market.yaml.template > cfn-openvidu-server-pro-no-market-${OPENVIDU_PRO_VERSION}.yaml
sed -i "s/KMS_AMI_ID/${KMS_RAW_AMI_ID}/g" cfn-openvidu-server-pro-no-market-${OPENVIDU_PRO_VERSION}.yaml
sed -i "s/AWS_DOCKER_TAG/${AWS_DOCKER_TAG}/g" cfn-openvidu-server-pro-no-market-${OPENVIDU_PRO_VERSION}.yaml
# Update CF template
if [[ ${UPDATE_CF} == "true" ]]; then
# Avoid overriding existing versions
# Only master and non existing versions can be overriden
if [[ ${OPENVIDU_PRO_VERSION} != "master" ]]; then
CF_EXIST=true
aws s3api head-object --bucket aws.openvidu.io --key CF-OpenVidu-Pro-${OPENVIDU_PRO_VERSION}.yaml || CF_EXIST=false
if [[ ${CF_EXIST} == "true" ]]; then
echo "Aborting updating s3://aws.openvidu.io/CF-OpenVidu-Pro-${OPENVIDU_PRO_VERSION}.yaml. File actually exists."
exit 1
fi
fi
aws s3 cp cfn-openvidu-server-pro-no-market-${OPENVIDU_PRO_VERSION}.yaml s3://aws.openvidu.io/CF-OpenVidu-Pro-${OPENVIDU_PRO_VERSION}.yaml --acl public-read
fi
rm $TEMPJSON
rm cfn-mkt-kms-ami.yaml
rm cfn-mkt-ov-ami.yaml
|
OpenVidu/openvidu
|
openvidu-server/deployments/pro/aws/createAMIs.sh
|
Shell
|
apache-2.0
| 7,387 |
#!/bin/sh
set -ex
# no-op while upx in alpine appears to be broken
#upx dist/**/*
|
glassechidna/stackit
|
upx.sh
|
Shell
|
apache-2.0
| 83 |
set -o errexit;
echo "restarting postgresql";
service postgresql restart;
echo "changing into firstdraft/projfd";
cd firstdraft/projfd;
echo "making migrations";
python3 manage.py makemigrations;
echo "migrating";
python3 manage.py migrate;
echo "restarting postgresql";
service postgresql restart;
|
FirstDraftGIS/firstdraft
|
bash_scripts/setup_database_tables.sh
|
Shell
|
apache-2.0
| 303 |
#!/usr/bin/env bash
#shellcheck disable=SC2155
set -e
# This should be executed from top-level directory not from `tests` directory
# Script needs one variable to be set before execution
# 1) PULL_SECRET_PATH - path to pull secret file
set -eo pipefail
BACKEND="${1}"
LEAVE_RUNNING="${LEAVE_RUNNING:-n}" # do not teardown after successful initialization
SMOKE_TEST_OUTPUT="Never executed. Problem with one of previous stages"
[ -z ${PULL_SECRET_PATH+x} ] && (echo "Please set PULL_SECRET_PATH"; exit 1)
[ -z ${DOMAIN+x} ] && DOMAIN="tectonic-ci.de"
[ -z ${JOB_NAME+x} ] && PREFIX="${USER:-test}" || PREFIX="ci-${JOB_NAME#*/}"
CLUSTER_NAME=$(echo "${PREFIX}-$(uuidgen -r | cut -c1-5)" | tr '[:upper:]' '[:lower:]')
TECTONIC="${PWD}/tectonic-dev/installer/tectonic"
exec &> >(tee -ai "$CLUSTER_NAME.log")
function destroy() {
echo -e "\\e[34m Exiting... Destroying Tectonic...\\e[0m"
"${TECTONIC}" destroy --dir="${CLUSTER_NAME}" --continue-on-error
echo -e "\\e[36m Finished! Smoke test output:\\e[0m ${SMOKE_TEST_OUTPUT}"
echo -e "\\e[34m So Long, and Thanks for All the Fish\\e[0m"
}
echo -e "\\e[36m Starting build process...\\e[0m"
bazel build tarball smoke_tests
# In future bazel build could be extracted to another job which could be running in docker container like this:
# docker run --rm -v $PWD:$PWD:Z -w $PWD quay.io/coreos/tectonic-builder:bazel-v0.3 bazel build tarball smoke_tests
echo -e "\\e[36m Unpacking artifacts...\\e[0m"
tar -zxf bazel-bin/tectonic-dev.tar.gz
cp bazel-bin/tests/smoke/linux_amd64_pure_stripped/go_default_test tectonic-dev/smoke
chmod 755 tectonic-dev/smoke
cd tectonic-dev
### HANDLE SSH KEY ###
if [ ! -f ~/.ssh/id_rsa.pub ]; then
echo -e "\\e[36m Generating SSH key-pair...\\e[0m"
ssh-keygen -qb 2048 -t rsa -f ~/.ssh/id_rsa -N "" </dev/zero
fi
case "${BACKEND}" in
aws)
if test -z "${AWS_REGION+x}"
then
echo -e "\\e[36m Calculating the AWS region...\\e[0m"
AWS_REGION="$(aws configure get region)" ||
AWS_REGION="${AWS_REGION:-us-east-1}"
fi
export AWS_DEFAULT_REGION="${AWS_REGION}"
unset AWS_SESSION_TOKEN
### ASSUME ROLE ###
echo -e "\\e[36m Setting up AWS credentials...\\e[0m"
ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
ROLE_ARN="arn:aws:iam::${ACCOUNT_ID}:role/tf-tectonic-installer"
RES="$(aws sts assume-role --role-arn="${ROLE_ARN}" --role-session-name="jenkins-${CLUSTER_NAME}" --query Credentials --output json)" &&
export AWS_SECRET_ACCESS_KEY="$(echo "${RES}" | jq --raw-output '.SecretAccessKey')" &&
export AWS_ACCESS_KEY_ID="$(echo "${RES}" | jq --raw-output '.AccessKeyId')" &&
export AWS_SESSION_TOKEN="$(echo "${RES}" | jq --raw-output '.SessionToken')" &&
CONFIGURE_AWS_ROLES=True ||
CONFIGURE_AWS_ROLES=False
;;
libvirt)
;;
*)
echo "unrecognized backend: ${BACKEND}" >&2
echo "Use ${0} BACKEND, where BACKEND is aws or libvirt" >&2
exit 1
esac
echo -e "\\e[36m Creating Tectonic configuration...\\e[0m"
python <<-EOF >"${CLUSTER_NAME}.yaml"
import datetime
import os.path
import sys
import yaml
with open('examples/${BACKEND}.yaml') as f:
config = yaml.load(f)
config['name'] = '${CLUSTER_NAME}'
with open(os.path.expanduser(os.path.join('~', '.ssh', 'id_rsa.pub'))) as f:
config['admin']['sshKey'] = f.read()
config['baseDomain'] = '${DOMAIN}'
with open('${PULL_SECRET_PATH}') as f:
config['pullSecret'] = f.read()
if '${BACKEND}' == 'aws':
config['aws']['region'] = '${AWS_REGION}'
config['aws']['extraTags'] = {
'expirationDate': (
datetime.datetime.utcnow() + datetime.timedelta(hours=4)
).strftime('%Y-%m-%dT%H:%M+0000'),
}
if ${CONFIGURE_AWS_ROLES:-False}:
config['aws']['master']['iamRoleName'] = 'tf-tectonic-master-node'
config['aws']['worker']['iamRoleName'] = 'tf-tectonic-worker-node'
elif '${BACKEND}' == 'libvirt' and '${IMAGE_URL}':
config['libvirt']['image'] = '${IMAGE_URL}'
yaml.safe_dump(config, sys.stdout)
EOF
echo -e "\\e[36m Initializing Tectonic...\\e[0m"
"${TECTONIC}" init --config="${CLUSTER_NAME}".yaml
trap destroy EXIT
echo -e "\\e[36m Deploying Tectonic...\\e[0m"
"${TECTONIC}" install --dir="${CLUSTER_NAME}"
echo -e "\\e[36m Running smoke test...\\e[0m"
export SMOKE_KUBECONFIG="${PWD}/${CLUSTER_NAME}/generated/auth/kubeconfig"
export SMOKE_MANIFEST_PATHS="${PWD}/${CLUSTER_NAME}/generated"
case "${BACKEND}" in
aws)
export SMOKE_NODE_COUNT=5 # Sum of all nodes (boostrap + master + worker)
;;
libvirt)
export SMOKE_NODE_COUNT=4
;;
esac
exec 5>&1
if test "${LEAVE_RUNNING}" = y; then
echo "leaving running; tear down manually with: cd ${PWD} && installer/tectonic destroy --dir=${CLUSTER_NAME}"
trap - EXIT
fi
SMOKE_TEST_OUTPUT=$(./smoke -test.v --cluster | tee -i >(cat - >&5))
|
derekhiggins/installer
|
tests/run.sh
|
Shell
|
apache-2.0
| 4,803 |
#!/bin/bash
BASEDIR=$(dirname $0)
cd $BASEDIR
VERSION=$(<VERSION-PI)
FROM=$(<FROM-PI)
SPECIFIC_FILE=Dockerfile.specific-PI
IMAGE_NAME=homebridge-pi-v$VERSION
SED_COMMAND="sed -i \"/#####SPECIFIC#####/ r $SPECIFIC_FILE\" Dockerfile"
source homebridge-common.sh
|
patrickbusch/homebridge-docker
|
homebridge-pi.sh
|
Shell
|
apache-2.0
| 263 |
#!/usr/bin/env bash
set -e
print_script_name () {
local NAME="${1}"
if [ -z "${NAME}" ]; then
echo "Running '${0##*/}'..."
else
echo "Running '${NAME}'..."
fi
}
verify_bash_version () {
local REQUIRED_VERSION="${1}"
local REQUIRED_MAJOR="$(printf "${REQUIRED_VERSION}" | cut -d '.' -f 1)"
local REQUIRED_MINOR="$(printf "${REQUIRED_VERSION}" | cut -d '.' -f 2)"
local ACTUAL_MAJOR="${BASH_VERSINFO[0]}"
local ACTUAL_MINOR="${BASH_VERSINFO[1]}"
if [ "${ACTUAL_MAJOR}" -lt "${REQUIRED_MAJOR}" ]; then
printf "Bash ${REQUIRED_VERSION} is required, "
printf "but ${BASH_VERSION} is installed.\n"
exit 1
elif [ "${ACTUAL_MINOR}" -lt "${REQUIRED_MINOR}" ]; then
printf "Bash ${REQUIRED_VERSION} is required, "
printf "but ${BASH_VERSION} is installed.\n"
exit 1
fi
}
verify_variable_exists () {
# Note: This function uses 'nameref' variables, introduced in bash 4.3.
verify_bash_version "4.3"
local VARIABLE_NAME="${1}"
local -n VARIABLE_VALUE="${1}"
if [ -z "${VARIABLE_VALUE}" ]; then
echo "Variable '${VARIABLE_NAME}' is empty."
exit 1
fi
}
verify_network_connectivity () {
local RETRY_DELAY_SEC="10"
local RETRY_COUNT="60"
echo "Verifying network connectivity..."
local DNS_SERVER="$(systemd-resolve --status \
| grep -i -m 1 -o 'DNS Servers.*' \
| cut -d ' ' -f 3)"
if [ -z "${DNS_SERVER}" ]; then
echo "Unable to determine DNS server address."
exit 1
fi
until ping -c 1 -q -w 1 "${DNS_SERVER}" > /dev/null
do
if [ "${RETRY_COUNT}" -ge "1" ]; then
echo "Unable to ping DNS server ${DNS_SERVER}, pausing ${RETRY_DELAY_SEC}sec..."
echo "Retries remaining: ${RETRY_COUNT}"
sleep "${RETRY_DELAY_SEC}"
((RETRY_COUNT--))
else
echo "Network is still unavailable, aborting..."
exit 1
fi
done
}
get_ssm_parameter_value () {
local NAME="${1}"
aws ssm get-parameter \
--name "${NAME}" \
--query "Parameter.Value" \
--output "text" \
--with-decryption
}
set_aws_default_region () {
echo "Setting AWS_DEFAULT_REGION..."
local METADATA_URL="http://169.254.169.254/latest/dynamic/instance-identity/document"
export AWS_DEFAULT_REGION="$(curl -s "${METADATA_URL}" | jq -r .region)"
verify_variable_exists "AWS_DEFAULT_REGION"
}
create_key () {
local PARAMETER_NAME="${1}"
local FILE_NAME="${2}"
echo "Creating key '${FILE_NAME}' in '${DESTINATION_DIR}'..."
local VALUE="$(get_ssm_parameter_value "${PARAMETER_NAME}")"
sudo mkdir -p "${DESTINATION_DIR}/" &>/dev/null
sudo chown "${DESTINATION_OWNER}":"${DESTINATION_GROUP}" "${DESTINATION_DIR}/"
sudo chmod "${DESTINATION_DIR_PERMISSIONS}" "${DESTINATION_DIR}/"
printf "%s" "${VALUE}" | \
sed 's/- /-\n/g' | \
sed 's/ -/\n-/g' | \
sed '/^-/! s/ /\n/g' | \
sudo -u "${DESTINATION_OWNER}" \
tee "${DESTINATION_DIR}/${FILE_NAME}" &>/dev/null
sudo chmod "${DESTINATION_FILE_PERMISSIONS}" "${DESTINATION_DIR}/${FILE_NAME}"
}
main () {
DESTINATION_DIR="/home/teamcity/.ssh/"
DESTINATION_DIR_PERMISSIONS="700"
DESTINATION_FILE_PERMISSIONS="600"
DESTINATION_OWNER="teamcity"
DESTINATION_GROUP="teamcity"
print_script_name "create_ssh_keys.sh"
set_aws_default_region
create_key "/chef/keys/deploysvc" "deploysvc.pem"
create_key "/amazon/keys/go-aws-us-blu" "go_aws_us_blu.pem"
create_key "/amazon/keys/go-aws-us-gra" "go_aws_us_gra.pem"
create_key "/amazon/keys/go-aws-us-red" "go_aws_us_red.pem"
create_key "/amazon/keys/go-aws-us-grn" "go_aws_us_grn.pem"
create_key "/amazon/keys/go-aws-us-cicd" "go_aws_us_cicd.pem"
create_key "/amazon/keys/go-aws-us-cicd-cookbooks" "go_aws_us_cicd_cookbooks.pem"
}
main
|
daptiv/teamcity
|
packer/ubuntu_18.04_teamcity_agent/scripts/create_ssh_keys.sh
|
Shell
|
apache-2.0
| 3,704 |
sed -I 's/^PermitRootLogin yes/PermitRootLogin no/' /etc/ssh/sshd_config
kill -HUP `cat /var/run/sshd.pid`
|
upperstream/packer-templates
|
netbsd/provisioners/cleanup-7.0.sh
|
Shell
|
apache-2.0
| 107 |
#!/bin/bash
set -euxo pipefail
# drill installation paths and user & version details
readonly DRILL_USER=drill
readonly DRILL_USER_HOME=/var/lib/drill
readonly DRILL_HOME=/usr/lib/drill
readonly DRILL_LOG_DIR=${DRILL_HOME}/log
readonly DRILL_VERSION='1.15.0'
function err() {
echo "[$(date +'%Y-%m-%dT%H:%M:%S%z')]: $@" >&2
return 1
}
function print_err_logs() {
for i in ${DRILL_LOG_DIR}/*; do
echo ">>> $i"
cat "$i"
done
return 1
}
function create_hive_storage_plugin() {
# Create the hive storage plugin
cat >/tmp/hive_plugin.json <<EOF
{
"name": "hive",
"config": {
"type": "hive",
"enabled": true,
"configProps": {
"lazyInit": true,
"hive.metastore.uris": "${hivemeta}",
"hive.metastore.sasl.enabled": "false",
"fs.default.name": "${hdfs}"
}
}
}
EOF
curl -d@/tmp/hive_plugin.json -H 'Content-Type: application/json' -X POST http://localhost:8047/storage/hive.json
}
function create_gcs_storage_plugin() {
# Create GCS storage plugin
cat >/tmp/gcs_plugin.json <<EOF
{
"config": {
"connection": "${gs_plugin_bucket}",
"enabled": true,
"formats": {
"avro": {
"type": "avro"
},
"csv": {
"delimiter": ",",
"extensions": [
"csv"
],
"type": "text"
},
"csvh": {
"delimiter": ",",
"extensions": [
"csvh"
],
"extractHeader": true,
"type": "text"
},
"json": {
"extensions": [
"json"
],
"type": "json"
},
"parquet": {
"type": "parquet"
},
"psv": {
"delimiter": "|",
"extensions": [
"tbl"
],
"type": "text"
},
"sequencefile": {
"extensions": [
"seq"
],
"type": "sequencefile"
},
"tsv": {
"delimiter": "\t",
"extensions": [
"tsv"
],
"type": "text"
}
},
"type": "file",
"workspaces": {
"root": {
"defaultInputFormat": null,
"location": "/",
"writable": true
}
}
},
"name": "gs"
}
EOF
curl -d@/tmp/gcs_plugin.json -H 'Content-Type: application/json' -X POST http://localhost:8047/storage/gs.json
}
function create_hdfs_storage_plugin() {
# Create/Update hdfs storage plugin
cat >/tmp/hdfs_plugin.json <<EOF
{
"config": {
"connection": "${hdfs}",
"enabled": true,
"formats": {
"avro": {
"type": "avro"
},
"csv": {
"delimiter": ",",
"extensions": [
"csv"
],
"type": "text"
},
"csvh": {
"delimiter": ",",
"extensions": [
"csvh"
],
"extractHeader": true,
"type": "text"
},
"json": {
"extensions": [
"json"
],
"type": "json"
},
"parquet": {
"type": "parquet"
},
"psv": {
"delimiter": "|",
"extensions": [
"tbl"
],
"type": "text"
},
"sequencefile": {
"extensions": [
"seq"
],
"type": "sequencefile"
},
"tsv": {
"delimiter": "\t",
"extensions": [
"tsv"
],
"type": "text"
}
},
"type": "file",
"workspaces": {
"root": {
"defaultInputFormat": null,
"location": "/",
"writable": false
},
"tmp": {
"defaultInputFormat": null,
"location": "/tmp",
"writable": true
}
}
},
"name": "hdfs"
}
EOF
curl -d@/tmp/hdfs_plugin.json -H 'Content-Type: application/json' -X POST http://localhost:8047/storage/hdfs.json
}
function start_drillbit() {
# Start drillbit
sudo -u ${DRILL_USER} ${DRILL_HOME}/bin/drillbit.sh status ||
sudo -u ${DRILL_USER} ${DRILL_HOME}/bin/drillbit.sh start && sleep 60
create_hive_storage_plugin
create_gcs_storage_plugin
create_hdfs_storage_plugin
}
function main() {
# Determine the cluster name
local cluster_name=$(/usr/share/google/get_metadata_value attributes/dataproc-cluster-name)
# Determine the cluster uuid
local cluster_uuid=$(/usr/share/google/get_metadata_value attributes/dataproc-cluster-uuid)
# Change these if you have a GCS bucket you'd like to use instead.
local dataproc_bucket=$(/usr/share/google/get_metadata_value attributes/dataproc-bucket)
# Use a GCS bucket for Drill profiles, partitioned by cluster name and uuid.
local profile_store="gs://${dataproc_bucket}/profiles/${cluster_name}/${cluster_uuid}"
local gs_plugin_bucket="gs://${dataproc_bucket}"
# intelligently generate the zookeeper string
readonly zookeeper_cfg="/etc/zookeeper/conf/zoo.cfg"
readonly zookeeper_client_port=$(grep 'clientPort' ${zookeeper_cfg} |
tail -n 1 |
cut -d '=' -f 2)
readonly zookeeper_list=$(grep '^server\.' ${zookeeper_cfg} |
tac |
sort -u -t '=' -k1,1 |
cut -d '=' -f 2 |
cut -d ':' -f 1 |
sed "s/$/:${zookeeper_client_port}/" |
xargs echo |
sed "s/ /,/g")
# Get hive metastore thrift and HDFS URIs
local hivemeta=$(bdconfig get_property_value \
--configuration_file /etc/hive/conf/hive-site.xml \
--name hive.metastore.uris 2>/dev/null)
local hdfs=$(bdconfig get_property_value \
--configuration_file /etc/hadoop/conf/core-site.xml \
--name fs.default.name 2>/dev/null)
# Create drill pseudo-user.
useradd -r -m -d ${DRILL_USER_HOME} ${DRILL_USER} || echo
# Create drill home
mkdir -p ${DRILL_HOME} && chown ${DRILL_USER}:${DRILL_USER} ${DRILL_HOME}
# Download and unpack Drill as the pseudo-user.
wget -nv --timeout=30 --tries=5 --retry-connrefused \
https://archive.apache.org/dist/drill/drill-${DRILL_VERSION}/apache-drill-${DRILL_VERSION}.tar.gz
tar -xzf apache-drill-${DRILL_VERSION}.tar.gz -C ${DRILL_HOME} --strip 1
# Replace default configuration with cluster-specific.
sed -i "s/drillbits1/${cluster_name}/" ${DRILL_HOME}/conf/drill-override.conf
sed -i "s/localhost:2181/${zookeeper_list}/" ${DRILL_HOME}/conf/drill-override.conf
# Make the log directory
mkdir -p ${DRILL_LOG_DIR} && chown ${DRILL_USER}:${DRILL_USER} ${DRILL_LOG_DIR}
# Symlink drill conf dir to /etc
mkdir -p /etc/drill && ln -sf ${DRILL_HOME}/conf /etc/drill/
# Point drill logs to $DRILL_LOG_DIR
echo DRILL_LOG_DIR=${DRILL_LOG_DIR} >>${DRILL_HOME}/conf/drill-env.sh
# Link GCS connector to drill 3rdparty jars
local connector_dir
if [[ -d /usr/local/share/google/dataproc/lib ]]; then
connector_dir=/usr/local/share/google/dataproc/lib
else
connector_dir=/usr/lib/hadoop/lib
fi
ln -sf ${connector_dir}/gcs-connector-*.jar ${DRILL_HOME}/jars/3rdparty
# Symlink core-site.xml to $DRILL_HOME/conf
ln -sf /etc/hadoop/conf/core-site.xml /etc/drill/conf
# Symlink hdfs-site.xml to $DRILL_HOME/conf
ln -sf /etc/hadoop/conf/hdfs-site.xml /etc/drill/conf
# Set ZK PStore to use a GCS Bucket
# Using GCS makes all Drill profiles available from any drillbit, and also
# persists the profiles past the lifetime of a cluster.
cat >>${DRILL_HOME}/conf/drill-override.conf <<EOF
drill.exec: { sys.store.provider.zk.blobroot: "${profile_store}" }
EOF
chown -R drill:drill /etc/drill/conf/*
chmod +rx /etc/drill/conf/*
chmod 777 ${DRILL_HOME}/log/
start_drillbit || err "Failed to start drill"
# Clean up
rm -f /tmp/*_plugin.json
}
main || print_err_logs
|
GoogleCloudDataproc/initialization-actions
|
drill/drill.sh
|
Shell
|
apache-2.0
| 8,382 |
#!/bin/bash
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Copyright 2010-2013 SourceGear, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This script goes through a list of patterns and paths, checking for matches
# and failures. It's intended to "keep up honest" in the way Veracity handles
# wildcards.
# Usage: ./filespec.sh <path_to_source>
#
# The source is expected be a tab-delimited data list of the form:
# [1|0] "quoted_pattern" "quoted_path"
#
# If the source file has a .c suffix, then it is assumed to be the actual
# test code, in which case it will be parsed, and the data list will
# be generated on the fly.
# NOTE: source_parse doesn't handle embedded spaces
function BuildDataFile
{
# $1 is input_file - $2 is output_file
echo "- Generating test data file: $2 from source: $1 ..."
[ -e "$2" ] && rm "$2"
# ensure Unixy EOLs; grep just the TRUE|FALSE lines; eliminate lines with '\' 'NULL' and '""'
# also eliminate lines with the keyword 'NOBASH'
tr -d '\r' <"${1}" | grep '{ SG_TRUE, ".*", 0u \|{ SG_FALSE, ".*", 0u ' | grep -v '\\\|""\|NULL\|NOBASH' > $TEMPFILE
echo "# The following lines were removed from the SOURCE file ($1)">>$SKIPLOG
tr -d '\r' <"${1}" | grep '{ SG_TRUE, ".*", 0u \|{ SG_FALSE, ".*", 0u ' | grep '\\\|""\|NULL\|NOBASH' >>$SKIPLOG
while read x a b c d y
do
MATCH="${a:3:$(( ${#a} - 4 ))}"
MASK="${b:1:$(( ${#b} - 3 ))}"
SAMPLE="${c:1:$(( ${#c} - 3 ))}"
[ "$MATCH" == "TRUE" ] && MATCH=1 || MATCH=0
if [ "$d" == "0u" ] ; then
echo -e "${MATCH}\tx\t${MASK}\t${SAMPLE}" >> "$2"
else
echo "## ERROR parsing: $x $a $b $c $d $y" >> "$ERRORLOG"
echo "## ERROR parsing: $x $a $b $c $d $y"
EXITCODE=1
fi
done < $TEMPFILE
}
function FilterInputFile
{
# $1 is input_file - $2 is output_file
echo "- Filtering test data file: $2 from input: $1 ..."
[ -e "$2" ] && rm "$2"
# ensure Unixy EOLs; eliminate comments and lines with '\' 'NULL' and '""'
# also eliminate lines with the keyword 'NOBASH'
if [ "${BASH_VERSINFO[0]}" -ge "4" ] ; then
tr -d '\r' <"${1}" | grep -v '^#\|\\\|""\|NULL\|NOBASH' > $TEMPFILE
echo "# The following lines were removed from the TEST DATA file ($1)">>$SKIPLOG
tr -d '\r' <"${1}" | grep '^#\|\\\|""\|NULL\|NOBASH' >> $SKIPLOG
else
# for older BASH versions, skip the star-star
tr -d '\r' <"${1}" | grep -v '^#\|\\\|""\|\*\*\|NULL\|NOBASH' > $TEMPFILE
echo "# The following lines were removed from the TEST DATA file ($1)">>$SKIPLOG
echo "# tests using globstar wildcards (/**/) are removed for pre-4 versions of BASH">>$SKIPLOG
echo "# Your bash version is $BASH_VERSION"
tr -d '\r' <"${1}" | grep '^#\|\\\|""\|\*\*\|NULL\|NOBASH' >> $SKIPLOG
fi
# remove surrounding quotes
IFS=$' \n'
while read line
do
if [ -n "$line" ] ; then
MATCH=$(echo $line | cut -f 1)
MASK=$(echo $line | cut -f 3)
SAMPLE=$(echo $line | cut -f 4)
overflow=$(echo $line | cut -f 5)
if [ -z "$SAMPLE" ] || [ -n "$overflow" ] && [ "${overflow:0:1}" != "#" ] ; then
echo "## ERROR parsing: MATCH=[$MATCH]" >> "$ERRORLOG"
echo " MASK=[$MASK]" >> "$ERRORLOG"
echo " SAMPLE=[$SAMPLE]" >> "$ERRORLOG"
echo " overflow=[$overflow]" >> "$ERRORLOG"
echo "## ERROR parsing: MATCH=[$MATCH]"
echo " MASK=[$MASK]"
echo " SAMPLE=[$SAMPLE]"
echo " overflow=[$overflow]"
EXITCODE=1
else
[ "${MATCH:0:1}" == "\"" ] && MATCH="${MATCH:1}"
[ "${MATCH:(-1)}" == "\"" ] && MATCH="${MATCH:0:$(( ${#MATCH} - 1 ))}"
[ "${MASK:0:1}" == "\"" ] && MASK="${MASK:1}"
[ "${MASK:(-1)}" == "\"" ] && MASK="${MASK:0:$(( ${#MASK} - 1 ))}"
[ "${SAMPLE:0:1}" == "\"" ] && SAMPLE="${SAMPLE:1}"
[ "${SAMPLE:(-1)}" == "\"" ] && SAMPLE="${SAMPLE:0:$(( ${#SAMPLE} - 1 ))}"
echo -e "${MATCH}\tx\t${MASK}\t${SAMPLE}" >> "$2"
fi
fi
done < $TEMPFILE
}
function ProcessDataFile
{
# $1 is test_data_file
# ensure Unixy EOLs
tr -d '\r' <"${1}" > $TEMPFILE
IFS=$' \n'
while read line
do
MATCH=$(echo $line | cut -f 1)
MASK=$(echo $line | cut -f 3)
SAMPLE=$(echo $line | cut -f 4)
overflow=$(echo $line | cut -f 5)
if [ -z "$SAMPLE" ] || [ -n "$overflow" ] ; then
echo "## ERROR parsing: MATCH=[$MATCH]" >> "$ERRORLOG"
echo " MASK=[$MASK]" >> "$ERRORLOG"
echo " SAMPLE=[$SAMPLE]" >> "$ERRORLOG"
echo " overflow=[$overflow]" >> "$ERRORLOG"
echo "## ERROR parsing: MATCH=[$MATCH]"
echo " MASK=[$MASK]"
echo " SAMPLE=[$SAMPLE]"
echo " overflow=[$overflow]"
EXITCODE=1
else
TestSample
fi
done < $TEMPFILE
IFS=$' \t\n'
}
function TestSample
{
if [ "$MATCH" == "1" ] ; then
expect="=="
else
expect="<>"
fi
mkdir -p "$SAMPLE"
if [ ! -d "$SAMPLE" ] ; then
LogFsError creating directory
return
fi
TestMask dir
rm -rf * .?* >/dev/null 2>&1
if [ -d "$SAMPLE" ] ; then
[ "$(basename $SAMPLE)" == "." ] || LogFsError deleting directory
return
fi
mkdir -p "$(dirname $SAMPLE)"
touch "$SAMPLE"
# if [ ! -f "$SAMPLE" ] ; then ### this reports an error when checking for foo/. - can't create as file
if [ ! -e "$SAMPLE" ] ; then
LogFsError creating file
return
fi
TestMask file
rm -rf * .?* >/dev/null 2>&1
if [ -f "$SAMPLE" ] ; then
LogFsError deleting file
return
fi
}
function LogFsError
{
echo "## ERROR $1 $2: $SAMPLE" >> "$ERRORLOG"
echo "## ERROR $1 $2: $SAMPLE"
EXITCODE=1
}
function TestMask
{
# set non-matching patterns to return null, rather than the pattern
shopt -s nullglob
localresult="0"
for f in $MASK
do
# ignore trailing "/"
[ "${f:(-1)}" == "/" ] && f="${f:0:$(( ${#f} - 1 ))}"
[ "$f" == "$SAMPLE" ] && localresult="1"
done
if [ "$localresult" == "$MATCH" ] ; then
# passed
echo "Passed: \"$MASK\" $expect \"$SAMPLE\" ($1)" >>"$OUTLOG"
echo "Passed: \"$MASK\" $expect \"$SAMPLE\" ($1)"
else
# failed
EXITCODE=1
echo "FAILED: \"$MASK\" $expect \"$SAMPLE\" ($1)" >>"$OUTLOG"
echo " (" ${MASK} ")" >>"$OUTLOG"
echo "FAILED: \"$MASK\" $expect \"$SAMPLE\" ($1)"
echo " (" ${MASK} ")"
echo "FAILED: \"$MASK\" $expect \"$SAMPLE\" ($1)" >>"$ERRORLOG"
echo " (" ${MASK} ")" >>"$ERRORLOG"
fi
shopt -u nullglob
}
################## main() ###################
HOMEDIR="${PWD}/wildbash"
TESTDIR="${HOMEDIR}/testdir"
INTERFILE="${HOMEDIR}/filespec_intermediate_data.txt"
DATAFILE="${HOMEDIR}/filespec_test_data.txt"
OUTLOG="${HOMEDIR}/fs_output.txt"
ERRORLOG="${HOMEDIR}/fs_errors.txt"
SKIPLOG="${HOMEDIR}/fs_skipped.txt"
TEMPFILE="${HOMEDIR}/filespec_temp.txt"
EXITCODE=0
if [ -z "$1" ] ; then
echo "# ERROR: filespec.sh was called with no source argument"
echo "#"
echo "# Usage: ./filespec.sh <path_to_source>"
echo "#"
echo "# The source is expected be a tab-delimited data list of the form:"
echo '# [1|0] "quoted_pattern" "quoted_path"'
echo "#"
echo "# If the source file has a .c suffix, then it is assumed to be the actual"
echo "# test code, in which case it will be parsed, and the data list will"
echo "# be generated on the fly."
exit 1
fi
# convert the input file to an absolute path
D=`dirname "$1"`
B=`basename "$1"`
INPUTFILE="`cd \"$D\" 2>/dev/null && pwd || echo \"$D\"`/$B"
# enable /**/
shopt -s globstar
[ -e "$HOMEDIR" ] && rm -rf "$HOMEDIR"
mkdir -p "$TESTDIR"
pushd "$TESTDIR" >/dev/null
if [ "${INPUTFILE:(-2)}" == ".c" ] ; then
BuildDataFile "$INPUTFILE" "$INTERFILE"
FilterInputFile "$INTERFILE" "$DATAFILE"
else
FilterInputFile "$INPUTFILE" "$DATAFILE"
fi
ProcessDataFile "$DATAFILE"
popd >/dev/null
echo "== SKIPPED LINES: =================================================="
[ -e "$SKIPLOG" ] && cat "$SKIPLOG" || echo "(none)"
echo "== ERROR SUMMARY: =================================================="
[ -e "$ERRORLOG" ] && cat "$ERRORLOG" || echo "(no errors)"
exit $EXITCODE
|
glycerine/vj
|
src/veracity/testsuite/filespec_bash_ref.sh
|
Shell
|
apache-2.0
| 8,535 |
#!/bin/sh
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
if [ -f /etc/device.properties ];then
. /etc/device.properties
fi
if [ -d /etc/snmpv3/certs ]; then
mkdir -p /tmp/.snmp/tls/private
mkdir -p /tmp/.snmp/tls/ca-certs
mkdir -p /tmp/.snmp/tls/certs
cp /etc/snmpv3/certs/rdk-manager.crt /tmp/.snmp/tls/certs
cp /etc/snmpv3/certs/rdkv-snmpd.crt /tmp/.snmp/tls/certs
cp /etc/snmpv3/certs/RDK-SNMPV3-CA.crt /tmp/.snmp/tls/ca-certs
if [ -f /usr/bin/configparamgen ]; then
configparamgen jx /etc/snmpv3/certs/wekorwpap.jlg /tmp/.snmp/tls/private/rdkv-snmpd.key
if [ -f /tmp/.snmp/tls/private/rdkv-snmpd.key ]; then
chmod 600 /tmp/.snmp/tls/private/rdkv-snmpd.key
fi
fi
fi
|
rdkcmf/rdk-sysint
|
lib/rdk/prepare_snmpv3_env.sh
|
Shell
|
apache-2.0
| 1,569 |
#!/bin/bash
# Copyright 2015 Giuseppe Maxia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cd $(dirname $0)
for N in $(seq 1 9)
do
if [ -d node$N ]
then
NODES[$N]=node$N
fi
done
COUNT=1
for NODE in ${NODES[*]}
do
$NODE/use test -e "drop table if exists test_$NODE"
$NODE/use test -e "create table test_$NODE( id int not null primary key, serverid int, dbport int, node varchar(100), ts timestamp)"
# Insert a random number of records
RECS=$(($RANDOM%20+1))
# RECS=$(shuf -i1-20 -n1)
[ -z "$RECS" ] && RECS=$COUNT
for REC in $(seq 1 $RECS)
do
$NODE/use test -e "insert into test_$NODE values ($REC, @@server_id, @@port, '$NODE', null)"
done
COUNT=$(($COUNT+1))
echo "# NODE $NODE created table test_$NODE - inserted $RECS rows"
done
sleep 3
echo "# Data in all nodes"
for NODE in ${NODES[*]}
do
$NODE/use -BN -e 'select @@server_id'
for TABLE_NAME in ${NODES[*]}
do
$NODE/use test -BN -e "select 'test_$TABLE_NAME' as table_name, count(*) from test_$TABLE_NAME"
done
done
|
datacharmer/mysql-replication-samples
|
multi_source/test_all_masters_replication.sh
|
Shell
|
apache-2.0
| 1,589 |
#!/usr/bin/env bash
# Script used to collect and upload test coverage (mostly by travis).
# Usage ./test_coverage_upload.sh [log_file]
set -o pipefail
LOG_FILE=${1:-test-coverage.log}
# We collect the coverage
COVERDIR=covdir PASSES='build build_cov cov' ./scripts/test.sh 2>&1 | tee "${LOG_FILE}"
test_success="$?"
# We try to upload whatever we have:
bash <(curl -s https://codecov.io/bash) -f ./covdir/all.coverprofile -cF all || exit 2
# Expose the original status of the test coverage execution.
exit ${test_success}
|
etcd-io/etcd
|
scripts/codecov_upload.sh
|
Shell
|
apache-2.0
| 528 |
#!/bin/bash
#
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# usage: water_launcher.sh <nodecount> <noderam>
if [ ! -z $1 ]; then
NODES_NUMBER=$1
else
NODES_NUMBER=2
fi
if [ ! -z $2 ]; then
NODE_MEMORY=$2
else
NODE_MEMORY="1g"
fi
UUID="`uuidgen`"
TMP_DIR="/tmp/h2o/$UUID"
OUTPUT_DIR="/h2o/$UUID"
NOTIFY_FILE="$TMP_DIR/h2o_notify.txt"
TIMEOUT="60"
H2OUSER="h2ouser"
H2OPASS="h2opass"
if [ -f .water-driver ]; then
. .water-driver
fi
mkdir -p $TMP_DIR && hadoop jar water-driver.jar -username $H2OUSER -password $H2OPASS -timeout $TIMEOUT -nodes $NODES_NUMBER -mapperXmx $NODE_MEMORY -output $OUTPUT_DIR -notify $NOTIFY_FILE -disown
|
trustedanalytics/platform-ansible
|
roles/h2o/files/water_launcher.sh
|
Shell
|
apache-2.0
| 1,184 |
#!/usr/bin/env bash
set -e
export BASE=$(pwd)
export PATH=/usr/local/ruby/bin:/usr/local/go/bin:$PATH
export GOPATH=${BASE}/gopath
semver=`cat ${BASE}/version-semver/number`
goversion_suffix=""
if [ ! -z "${GOVERSION}" ]; then
goversion_suffix="-${GOVERSION}"
fi
filename="bosh-agent-${semver}${goversion_suffix}-${GOOS}-${GOARCH}"
if [[ $GOOS = 'windows' ]]; then
filename="${filename}.exe"
fi
timestamp=`date -u +"%Y-%m-%dT%H:%M:%SZ"`
go_ver=`go version | cut -d ' ' -f 3`
cd gopath/src/github.com/cloudfoundry/bosh-agent
git_rev=`git rev-parse --short HEAD`
version="${semver}-${git_rev}-${timestamp}-${go_ver}"
sed -i 's/\[DEV BUILD\]/'"$version"'/' main/version.go
bin/build
shasum -a 256 out/bosh-agent
cp out/bosh-agent "${BASE}/${DIRNAME}/${filename}"
|
mattcui/bosh-agent
|
ci/tasks/build.sh
|
Shell
|
apache-2.0
| 776 |
#!/bin/sh
if [ $# -ne 2 ]
then
echo "USAGE: hivetomysql.sh scripttype mappersnum"
exit 1
fi
scripttyp=$1 # script type : 0 full;1 whrite account ; 2 reg
mappersnum=$2
ls_date=$(date -d last-day +%Y%m%d)
temptable="ods_bhv_user_dynamic_expo_plat_filter"
# when scripttype is 2, you have to set regulations, regulation is required
regulations=" uid like \"%234567\" "
# set buckets' number
bucketsnum=2
# databases correlation 0
HOSTNAME[0]="10.77.113.59"
PORT[0]="4015"
USERNAME[0]="plat_recomment"
PASSWORD[0]="eb3d9517ee3381c"
DBNAME[0]="plat_recomment" #database
TABLENAME[0]="exposure" #table
# databases correlation 1
HOSTNAME[1]="10.77.113.59"
PORT[1]="4015"
USERNAME[1]="plat_recomment"
PASSWORD[1]="eb3d9517ee3381c"
DBNAME[1]="plat_recomment" #database
TABLENAME[1]="exposure" #table
echo $ls_date
echo "creating temptable name in hive is $temptable"
# create temptable
hive -e "create table if not exists $temptable(
id STRING,
auid STRING,
cuid STRING,
buid STRING,
scene STRING,
material STRING,
type STRING,
appkey STRING,
container STRING,
blank STRING,
time TIMESTAMP ) clustered by (auid) into $bucketsnum buckets
row format delimited fields terminated by '\t' stored as textfile"
# deal with datas into temptable
case $1 in
0 )
hive -hiveconf startdate=$ls_date -hiveconf temptable=$temptable -f full.sql ;;
1 )
hive -hiveconf startdate=$ls_date -hiveconf temptable=$temptable -f whitelist.sql ;;
2 )
hive -hiveconf startdate=$ls_date -hiveconf temptable=$temptable -hiveconf regulations="$regulations" -f regulation.sql ;;
* )
hive -hiveconf startdate=$ls_date -hiveconf temptable=$temptable -f whitelist.sql ;;
esac
# load datas of hive into mysql
for(( k=0 ; k < $bucketsnum ; k++))
do
/data0/weibo_plat/fenggong/etc/sqoop-1.4.6/bin/sqoop-export --connect jdbc:mysql://${HOSTNAME[$k]}:${PORT[$k]}/${DBNAME[$k]} --username ${USERNAME[$k]} --password ${PASSWORD[$k]} --table ${TABLENAME[$k]} --export-dir /user/weibo_plat/warehouse/$temptable/00000${k}_0 --fields-terminated-by '\t' --num-mappers $mappersnum
done
# truncate temptable
#hive -e "truncate table $temptable"
hive -e "drop table $temptable"
|
penetest/hivetomysql
|
hivetomysql.sh
|
Shell
|
apache-2.0
| 2,283 |
#!/bin/bash
#OLVer megabuilder
###############################################################################
# miss.c generator
###
generate_miss_c () {
cat >miss.c <<EOF
/*
* This file is generated by 'build_config.sh' script to handle missing functions under test.
*/
#include <stdio.h>
void sendExceptionFromCurrentThread(const char* buff);
void test_agent_recovery();
void __missing_function( const char* fname )
{
char buff[1024];
sprintf(buff, "Function not found in any library: %s", fname);
sendExceptionFromCurrentThread(buff );
test_agent_recovery();
}
EOF
}
###############################################################################
# Main
###
generate_miss_c
export OLVERTERM=$1
echo "Checking missing LSB symbols and libraries..."
# try making the package and collect the list of undefined library symbols
make >makelog 2>&1
cat makelog | grep "undefined reference to" |sed -n 's/\([^`]\+\)`\([a-zA-Z_][a-zA-Z0-9_]\+\)\([^$]\)/\2/p' > undefined_symbols.txt
#cat makelog | grep "ld: cannot find -l" | awk -F'ld: cannot find -l' 'print $2' > undefined_libs.txt
rm -f makelog
echo missing
# create "miss.c" containing list of missing symbols
cat undefined_symbols.txt | sed -n 's/\([a-zA-Z0-9_]\)\+/void \0 ( void ) {__missing_function("\0");} /p' >>miss.c
gcc -I. -c miss.c -o miss.o
rm undefined_symbols.txt
|
levenkov/olver
|
src/agent/build_config.sh
|
Shell
|
apache-2.0
| 1,357 |
if [ -f ${data_file} ]
then
rm ${data_file}
fi
echo ${head} >>${data_file}
while true
do
i=0
for dir in ${hw_dirs};
do
temp_var="hwmon_nodes_$i"
for t in $(eval "echo \$$temp_var")
do
echo -n "$(cat "${dir}/${t}")," >>${data_file}
done
((i++))
done
for bt_nd in ${battery_nodes}
do
echo -n "$(cat /sys/class/power_supply/battery/${bt_nd})," >>${data_file}
done
echo "" >>${data_file}
sleep 1
done
|
christingde/pmicplot
|
.target.sh
|
Shell
|
apache-2.0
| 429 |
#!/usr/bin/env bash
# Set colours
GREEN="\e[32m"
RED="\e[41m\e[37m\e[1m"
YELLOW="\e[33m"
WHITE="\e[0m"
USERNAME=$1
PASSWORD=$2
IS_PUBLIC_OS1=$3
if [ "x${IS_PUBLIC_OS1}" != "x" ]; then
echo -e $GREEN"Running on OS1 Public, updating DNS"$WHITE
sudo bash -c "cat >> /etc/sysconfig/network-scripts/ifcfg-eth0 <<EOT
DNS1=209.132.186.218
EOT"
sudo bash -c "cat > /etc/resolv.conf <<EOT
# Generated by NetworkManager
search osop.rhcloud.com
nameserver 209.132.186.218
EOT"
fi
echo -e $GREEN"Registering for: $USERNAME"$WHITE
sudo subscription-manager register --username $USERNAME --password $PASSWORD
sudo subscription-manager list --available --matches "Employee SKU"
# https://lists.openshift.redhat.com/openshift-archives/users/2015-April/msg00021.html
# Select the last employee SKU and hope its "good"
POOL_ID=$(sudo subscription-manager list --available | sed -n '/Employee SKU/,/System Type/p' | grep "Pool ID" | tail -1 | cut -d':' -f2 | xargs)
echo -e $GREEN"Trying PoolID: $POOL_ID"$WHITE
sudo subscription-manager attach --pool=$POOL_ID
sudo yum repolist
|
garethahealy/dune-world
|
install/openstack/1_rhelsubs.sh
|
Shell
|
apache-2.0
| 1,111 |
#!/bin/bash
set -e
set -x
which pkg-config
echo "PREFIX := $PREFIX" >> Makefile.conf
make install -j$CPU_COUNT
|
litex-hub/litex-conda-eda
|
syn/symbiflow-yosys-plugins/build.sh
|
Shell
|
apache-2.0
| 115 |
git clone https://github.com/robolectric/robolectric.git
|
android-opensource-library-56/android-opensource-library-56
|
09-04-Robolectric/setup.sh
|
Shell
|
apache-2.0
| 57 |
#!/usr/bin/env bash
# update parallels-tools
# /usr/lib/parallels-tools/install --install-unattended --restore-on-fail --progress
# install node & npm
curl --silent --location https://deb.nodesource.com/setup_0.12 | bash -
apt-get install --yes nodejs
# optional step for node
# apt-get install --yes build-essential
# install system packages
apt-get install -y htop git curl
# Run User Configurations
su -c "source /vagrant/user-bootstrap.sh" vagrant
|
publicarray/Real-time-CRUD
|
bootstrap.sh
|
Shell
|
apache-2.0
| 456 |
#!/bin/bash
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ROOT_DIR="${SCRIPT_DIR}/../../"
# freeze the spec version to make SemanticAttributes generation reproducible
SPEC_VERSION=v1.8.0
OTEL_SEMCONV_GEN_IMG_VERSION=0.9.0
cd ${SCRIPT_DIR}
rm -rf opentelemetry-specification || true
mkdir opentelemetry-specification
cd opentelemetry-specification
git init
git remote add origin https://github.com/open-telemetry/opentelemetry-specification.git
git fetch origin "$SPEC_VERSION"
git reset --hard FETCH_HEAD
cd ${SCRIPT_DIR}
docker run --rm \
-v ${SCRIPT_DIR}/opentelemetry-specification/semantic_conventions/trace:/source \
-v ${SCRIPT_DIR}/templates:/templates \
-v ${ROOT_DIR}/opentelemetry-semantic-conventions/src/opentelemetry/semconv/trace/:/output \
otel/semconvgen:$OTEL_SEMCONV_GEN_IMG_VERSION \
-f /source code \
--template /templates/semantic_attributes.j2 \
--output /output/__init__.py \
-Dclass=SpanAttributes
docker run --rm \
-v ${SCRIPT_DIR}/opentelemetry-specification/semantic_conventions/resource:/source \
-v ${SCRIPT_DIR}/templates:/templates \
-v ${ROOT_DIR}/opentelemetry-semantic-conventions/src/opentelemetry/semconv/resource/:/output \
otel/semconvgen:$OTEL_SEMCONV_GEN_IMG_VERSION \
-f /source code \
--template /templates/semantic_attributes.j2 \
--output /output/__init__.py \
-Dclass=ResourceAttributes
cd "$ROOT_DIR"
|
open-telemetry/opentelemetry-python
|
scripts/semconv/generate.sh
|
Shell
|
apache-2.0
| 1,404 |
LOGNAME='soslogs/agent'$*'.log'
mkdir soslogs
classpath=.:lib/jsi-1.0b2p1.jar:lib/log4j-1.2.15.jar:lib/trove-0.1.8.jar:lib/uncommons-maths-1.2.jar:lib/javaml/ajt-1.20.jar:lib/javaml/commons-math-1.1.jar:lib/javaml/javaml-0.1.4.jar:lib/javaml/weka.jar:lib/genetic/commons-math3-3.1.1/commons-math3-3.1.1.jar:lib/mysql-connector-java-5.1.18-bin.jar
java -Xms3G -Dfile.encoding=UTF-8 -classpath $classpath sos.LaunchPrecompute -h $1 2>&1 | tee "$LOGNAME"
|
alim1369/sos
|
start-precompute.sh
|
Shell
|
apache-2.0
| 457 |
#!/bin/bash
# run this script giving it a single argument eg. ./docker-cli.sh ronbo-was-here
# that will attempt to create an empty file name ronbo-was-here in the current directory
docker run --volume $(pwd):/src --workdir=/src --user=$(id -u $(whoami)):$(id -g $(whoami)) --env RUNAS=$USER --rm --attach stdout --attach stderr --interactive toolbox "$@"
|
kurron/docker-experiment
|
docker-as-tool/docker-cli.sh
|
Shell
|
apache-2.0
| 360 |
sudo apt-get update
sudo apt-get install samba
sudo cp /etc/samba/smb.conf ~
sudo nano /etc/samba/smb.conf
sudo service sambd restart
testparm
|
bayvictor/distributed-polling-system
|
bin/samba_commandline.sh
|
Shell
|
apache-2.0
| 152 |
#!/bin/bash
#FILE=20140822Introduction
FILE=20140822PitchBugFarm
# CREATE HANDOUTS HERE
#Convert ArduinoCourseAll.pdf to handout format (4 columns, 2 rows)
java -cp Multivalent.jar tool.pdf.Impose -dim 4x2 -paper 84x29.7cm $FILE".pdf"
#Rename the resulting PDF
mv $FILE"-up.pdf" $FILE"Handouts.pdf"
#Negate the resulting PDF
convert $FILE"Handouts.pdf" -negate $FILE"HandoutsNegated.pdf"
# SIMPLY INVERT HERE
convert $FILE".pdf" -negate $FILE"Negated.pdf"
|
DIYbioGroningen/Makercourse
|
Presentations/ProjectPitches/CreateHandouts.sh
|
Shell
|
artistic-2.0
| 463 |
#!/bin/sh
# Use shell script here, because
# RAKUDO: Can`t modify %*ENV;
echo -n "foo=bar" |
REQUEST_METHOD='POST' \
SERVER_NAME='test.foo' \
TEST_RESULT='{"foo" => "bar"}' \
TEST_NAME='Post foo=bar' \
./t/cgi_post_test;
echo -n "foo=bar&boo=her" |
REQUEST_METHOD='POST' \
SERVER_NAME='test.foo' \
TEST_RESULT='{"foo" => "bar", "boo" => "her"}' \
TEST_NAME='Post foo=bar&boo=her' \
./t/cgi_post_test;
echo -n "test=foo&test=bar" |
REQUEST_METHOD='POST' \
SERVER_NAME='test.foo' \
TEST_RESULT='{"test" => ["foo", "bar"] }' \
TEST_NAME='Post test=foo&test=bar' \
./t/cgi_post_test;
echo -n "test=foo&test=bar&foo=bar" |
REQUEST_METHOD='POST' \
SERVER_NAME='test.foo' \
TEST_RESULT='{"test" => ["foo", "bar"], "foo" => "bar"}' \
TEST_NAME='Post test=foo&test=bar&foo=bar' \
./t/cgi_post_test;
echo -n "test=foo" |
REQUEST_METHOD='POST' \
SERVER_NAME='test.foo' \
QUERY_STRING="boom=bar" \
TEST_RESULT='{"test" => "foo", "boom" => "bar" }' \
TEST_NAME='Post test=foo Get boom=bar (test get and post mix)' \
./t/cgi_post_test;
echo -n "test=foo" |
REQUEST_METHOD='POST' \
SERVER_NAME='test.foo' \
QUERY_STRING="test=bar" \
TEST_RESULT='{"test" => ["bar", "foo"] }' \
TEST_NAME='Post test=foo Get test=bar (test get and post mix)' \
./t/cgi_post_test;
|
ab5tract/november
|
p6w/t/02-cgi_post.t.sh
|
Shell
|
artistic-2.0
| 1,270 |
#!/bin/sh
# Copyright (c) 2015, Robert T Dowling
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
list=$(cd tests; echo test*.ic)
for i in $list ; do
echo "===" $i "============================="
./intcas < tests/$i
done > regress.out
diff regress.ok regress.out
|
RobertTDowling/java-caslib
|
test/regress.sh
|
Shell
|
bsd-2-clause
| 1,532 |
#!/bin/sh
${WL_HOME}/common/bin/wlst.sh -loadProperties ./setDomainTLS.prop ./setDomainTLS.py
|
kapfenho/iam-deployer
|
lib/weblogic/wlst/setDomainTLS/setDomainTLS.sh
|
Shell
|
bsd-2-clause
| 95 |
# -*- sh -*-
function gvman()
{
local manpage=$1
local tempfile=$(mktemp -t gvman.XXXXXXXX)
local man_exit_code=0
man -Tps $manpage > $tempfile
man_exit_code=$?
if [ "$man_exit_code" == "0" ]; then
gv --spartan $tempfile
fi
rm $tempfile
return $man_exit_code
}
|
jtgans/dotbash
|
lib/gvman.sh
|
Shell
|
bsd-2-clause
| 309 |
perl Scripts/sort-Xcode-project-file DCTAuth.xcodeproj/project.pbxproj
|
danielctull/DCTAuth
|
sort-xcode-project.sh
|
Shell
|
bsd-3-clause
| 70 |
dropdb pycon2013; createdb pycon2013 && gondor sqldump primary |./manage.py dbshell && ./manage.py upgradedb -e
|
eldarion/pycon
|
refresh.sh
|
Shell
|
bsd-3-clause
| 113 |
#!/bin/bash
#this script is for geoffrey, used for entropy
while read line
do
echo "$line"
done < $1
|
GeoffreyUniversitySSI/CryptoCIRCLEAN
|
entropy/Morceau Script/readFile.sh
|
Shell
|
bsd-3-clause
| 107 |
#!/usr/bin/env bash
# Copyright (c) 2014-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
function add_repo() {
REPO=$1
echo "Adding repository: $REPO"
if [[ $DISTRO = "lucid" ]]; then
package python-software-properties
sudo add-apt-repository $REPO
else
sudo add-apt-repository -y $REPO
fi
}
function main_ubuntu() {
if [[ $DISTRO = "precise" ]]; then
add_repo ppa:ubuntu-toolchain-r/test
elif [[ $DISTRO = "lucid" ]]; then
add_repo ppa:lucid-bleed/ppa
fi
sudo apt-get update -y
if [[ $DISTRO = "lucid" ]]; then
package git-core
else
package git
fi
package wget
package unzip
package build-essential
package flex
package devscripts
package debhelper
package python-pip
package python-dev
# package linux-headers-generic
package ruby-dev
package gcc
package doxygen
package autopoint
package libssl-dev
package liblzma-dev
package uuid-dev
package libpopt-dev
package libdpkg-dev
package libudev-dev
package libblkid-dev
package libbz2-dev
package libreadline-dev
package libcurl4-openssl-dev
if [[ $DISTRO = "precise" ]]; then
package ruby1.9.3
sudo update-alternatives --set ruby /usr/bin/ruby1.9.1
sudo update-alternatives --set gem /usr/bin/gem1.9.1
fi
if [[ $DISTRO = "lucid" ]]; then
package libopenssl-ruby
package clang
package g++-multilib
install_gcc
elif [[ $DISTRO = "precise" ]]; then
# Need gcc 4.8 from ubuntu-toolchain-r/test to compile RocksDB/osquery.
package gcc-4.8
package g++-4.8
sudo update-alternatives \
--install /usr/bin/gcc gcc /usr/bin/gcc-4.8 150 \
--slave /usr/bin/g++ g++ /usr/bin/g++-4.8
package clang-3.4
package clang-format-3.4
fi
if [[ $DISTRO = "precise" || $DISTRO = "lucid" || $DISTRO = "wily" ]]; then
package rubygems
fi
if [[ $DISTRO = "precise" || $DISTRO = "lucid" ]]; then
# Temporary removes (so we can override default paths).
package autotools-dev
#remove_package pkg-config
remove_package autoconf
remove_package automake
remove_package libtool
#install_pkgconfig
package pkg-config
install_autoconf
install_automake
install_libtool
else
package clang-3.6
package clang-format-3.6
sudo ln -sf /usr/bin/clang-3.6 /usr/bin/clang
sudo ln -sf /usr/bin/clang++-3.6 /usr/bin/clang++
sudo ln -sf /usr/bin/clang-format-3.6 /usr/bin/clang-format
sudo ln -sf /usr/bin/llvm-config-3.6 /usr/bin/llvm-config
sudo ln -sf /usr/bin/llvm-symbolizer-3.6 /usr/bin/llvm-symbolizer
package pkg-config
package autoconf
package automake
package libtool
fi
if [[ $DISTRO = "xenial" ]]; then
# Ubuntu bug 1578006
package plymouth-label
fi
set_cc gcc #-4.8
set_cxx g++ #-4.8
install_cmake
if [[ $DISTRO = "lucid" ]]; then
gem_install fpm -v 1.3.3
install_openssl
install_bison
else
# No clang++ on lucid
set_cc clang
set_cxx clang++
gem_install fpm
package bison
fi
if [[ $DISTRO = "xenial" ]]; then
remove_package libunwind-dev
fi
install_boost
install_gflags
install_glog
install_google_benchmark
install_snappy
install_rocksdb
install_thrift
install_yara
install_asio
install_cppnetlib
install_sleuthkit
# Need headers and PC macros
if [[ $DISTRO = "vivid" || $DISTRO = "wily" || $DISRO = "xenial" ]]; then
package libgcrypt20-dev
else
package libgcrypt-dev
fi
package libdevmapper-dev
package libaudit-dev
package libmagic-dev
install_libaptpkg
install_iptables_dev
install_libcryptsetup
if [[ $DISTRO = "lucid" ]]; then
package python-argparse
package python-jinja2
package python-psutil
elif [[ $DISTRO = "xenial" ]]; then
package python-setuptools
fi
install_aws_sdk
}
|
PickmanSec/osquery
|
tools/provision/ubuntu.sh
|
Shell
|
bsd-3-clause
| 4,072 |
#!/bin/bash
set -exo pipefail
export DEBEMAIL="[email protected]"
export DEBFULLNAME="Automatic Builder (github-actions)"
export DEB_BUILD_OPTIONS="noddebs"
export DIST="$(echo "${TARGET}" | cut -d- -f2)"
MY_P="${PN}_${PV}"
ARCH="$(echo "${TARGET}" | cut -d- -f3)"
REV=
case ${DIST} in
bullseye)
REV="1~11bullseye"
;;
bookworm)
REV="1~12bookworm"
;;
sid)
REV="1~sid"
;;
focal)
REV="1~11.0focal"
;;
impish)
REV="1~11.3impish"
;;
*)
echo "error: unsupported dist: ${DIST}"
exit 1
;;
esac
download_pbuilder_chroot() {
local index="$(wget -q -O- https://distfiles.rgm.io/pbuilder-chroots/LATEST/)"
local archive="$(echo "${index}" | sed -n "s/.*\(pbuilder-chroot-${DIST}-${ARCH}-.*\)\.sha512.*/\1/p")"
local p="$(echo "${index}" | sed -n "s/.*pbuilder-chroot-${DIST}-${ARCH}-\(.*\)\.tar.*\.sha512.*/pbuilder-chroots-\1/p")"
pushd "${SRCDIR}" > /dev/null
wget -c "https://distfiles.rgm.io/pbuilder-chroots/${p}/${archive}"{,.sha512}
sha512sum --check --status "${archive}.sha512"
sudo rm -rf /tmp/pbuilder
mkdir /tmp/pbuilder
fakeroot tar --checkpoint=1000 -xf "${archive}" -C /tmp/pbuilder
popd > /dev/null
}
download_orig() {
local i=0
local out=0
local url="https://distfiles.rgm.io/${PN}/${P}/${P}.tar.xz"
while [[ $i -lt 20 ]]; do
set +ex
((i++))
echo "waiting for ${P}.tar.xz: $i/20"
wget -q --spider --tries 1 "${url}"
out=$?
set -ex
if [[ $out -eq 0 ]]; then
wget -c "${url}"
mv "${P}.tar.xz" "${BUILDDIR}/${MY_P}.orig.tar.xz"
return
fi
if [[ $out -ne 8 ]]; then
exit $out
fi
sleep 30
done
echo "failed to find orig distfile. please check if that task succeeded."
exit 1
}
create_reprepro_conf() {
echo "Origin: blogc"
echo "Label: blogc"
echo "Codename: ${DIST}"
echo "Architectures: source amd64"
echo "Components: main"
echo "Description: Apt repository containing blogc snapshots"
echo
}
download_orig
rm -rf "${BUILDDIR}/${P}"
tar -xf "${BUILDDIR}/${MY_P}.orig.tar.xz" -C "${BUILDDIR}"
cp -r "${SRCDIR}/debian" "${BUILDDIR}/${P}/"
pushd "${BUILDDIR}/${P}" > /dev/null
## skip build silently when new version is older than last changelog version (version bump)
if ! dch \
--distribution "${DIST}" \
--newversion "${PV}-${REV}" \
"Automated build for ${DIST}"
then
exit 0
fi
download_pbuilder_chroot
sudo cowbuilder \
--update \
--basepath "/tmp/pbuilder/${DIST}-${ARCH}/base.cow"
RES="${BUILDDIR}/deb/${DIST}"
mkdir -p "${RES}"
pdebuild \
--pbuilder cowbuilder \
--buildresult "${RES}" \
--debbuildopts -sa \
-- --basepath "/tmp/pbuilder/${DIST}-${ARCH}/base.cow"
popd > /dev/null
mkdir -p "${BUILDDIR}/deb-repo/conf"
create_reprepro_conf > "${BUILDDIR}/deb-repo/conf/distributions"
pushd "${BUILDDIR}/deb-repo" > /dev/null
for i in "../deb/${DIST}"/*.changes; do
reprepro include "${DIST}" "${i}"
done
popd > /dev/null
tar \
-cJf "blogc-deb-repo-${DIST}-${ARCH}-${PV}.tar.xz" \
--exclude ./deb-repo/conf \
--exclude ./deb-repo/db \
./deb-repo
tar \
-cJf "blogc-deb-${DIST}-${ARCH}-${PV}.tar.xz" \
./deb
|
blogc/blogc
|
build-aux/build-debian.sh
|
Shell
|
bsd-3-clause
| 3,385 |
#!/bin/bash
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
EXECUTABLES=python${NACL_EXEEXT}
# Currently this package only builds on linux.
# The build relies on certain host binaries and python's configure
# requires us to set --build= as well as --host=.
HOST_BUILD_DIR=${WORK_DIR}/build-nacl-host
export PATH=${HOST_BUILD_DIR}/inst/usr/local/bin:${PATH}
BuildHostPython() {
MakeDir ${HOST_BUILD_DIR}
ChangeDir ${HOST_BUILD_DIR}
if [ -f python -a -f Parser/pgen ]; then
return
fi
LogExecute ${SRC_DIR}/configure
LogExecute make -j${OS_JOBS} build_all
LogExecute make install DESTDIR=inst
}
ConfigureStep() {
BuildHostPython
ChangeDir ${BUILD_DIR}
# We pre-seed configure with certain results that it cannot determine
# since we are doing a cross compile. The $CONFIG_SITE file is sourced
# by configure early on.
export CONFIG_SITE=${START_DIR}/config.site
# Disable ipv6 since configure claims it requires a working getaddrinfo
# which we do not provide. TODO(sbc): remove this once nacl_io supports
# getaddrinfo.
EXTRA_CONFIGURE_ARGS="--disable-ipv6"
EXTRA_CONFIGURE_ARGS+=" --with-suffix=${NACL_EXEEXT}"
EXTRA_CONFIGURE_ARGS+=" --build=x86_64-linux-gnu"
export LIBS="-ltermcap"
if [ "${NACL_LIBC}" = "newlib" ]; then
LIBS+=" -lglibc-compat"
fi
DefaultConfigureStep
if [ "${NACL_LIBC}" = "newlib" ]; then
LogExecute cp ${START_DIR}/Setup.local Modules/
fi
}
BuildStep() {
export CROSS_COMPILE=true
export MAKEFLAGS="PGEN=${WORK_DIR}/build-nacl-host/Parser/pgen"
SetupCrossEnvironment
DefaultBuildStep
}
TestStep() {
if [ ${NACL_ARCH} = "pnacl" ]; then
local pexe=python${NACL_EXEEXT}
TranslateAndWriteSelLdrScript ${pexe} x86-64 python.x86-64.nexe python
fi
}
|
kosyak/naclports_samsung-smart-tv
|
ports/python3/build.sh
|
Shell
|
bsd-3-clause
| 1,880 |
#!/bin/sh
mkpath.pl $CABIO_DATA_DIR/relative_clone/human
cd $CABIO_DATA_DIR/relative_clone/human
echo "Removing existing files from $CABIO_DATA_DIR/relative_clone/human"
rm -rf all_est*
rm -rf seq_gene.*
echo "\nDownloading EST Annotations for human from UCSC"
wget -nv ftp://hgdownload.cse.ucsc.edu/goldenPath/hg19/database/all_est.txt.gz
#wget -nv ftp://ftp.ncbi.nih.gov/genomes/MapView/Homo_sapiens/sequence/BUILD.37.1/initial_release/seq_gene.md.gz
wget -nv ftp://ftp.ncbi.nlm.nih.gov/genomes/MapView/Homo_sapiens/sequence/current/initial_release/seq_gene.md.gz
echo "Unzipping downloaded files"
gunzip -f all_est.txt.gz &
gunzip -f seq_gene.md.gz
wait
mkpath.pl $CABIO_DATA_DIR/relative_clone/mouse
cd $CABIO_DATA_DIR/relative_clone/mouse
echo "\nRemoving existing files from $CABIO_DATA_DIR/relative_clone/mouse"
rm -rf all_est*
rm -rf seq_gene.*
echo "Downloading EST Annotations for mouse from UCSC"
wget -nv ftp://hgdownload.cse.ucsc.edu/goldenPath/mm9/database/all_est.txt.gz
wget -nv ftp://ftp.ncbi.nih.gov/genomes/MapView/Mus_musculus/sequence/current/initial_release/seq_gene.md.gz
gunzip -f all_est.txt.gz &
gunzip -f seq_gene.md.gz
echo "\nFinished downloading the human and mouse EST Annotations from UCSC"
|
NCIP/cabio
|
software/cabio-database/scripts/download/download_ESTAnnotationsData_UCSC.sh
|
Shell
|
bsd-3-clause
| 1,234 |
#!/bin/sh
set -ex
# show available schemes
xcodebuild -list -project ./ResearchUXFactory.xcodeproj
# run on pull request
if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then
fastlane test scheme:"ResearchUXFactory"
exit $?
fi
|
syoung-smallwisdom/ResearchUXFactory-iOS
|
travis/run-tests.sh
|
Shell
|
bsd-3-clause
| 226 |
#!/bin/bash
gource --key -a 0.5 --loop -i 300 --title RB-PHD-Filter --start-position 0.1 -s 1
|
RangerKD/RFS-SLAM
|
scripts/misc/gource.sh
|
Shell
|
bsd-3-clause
| 94 |
# Copy from https://github.com/Bash-it/bash-it/blob/master/plugins/available/alias-completion.plugin.bash
# Load after the other completions to understand what needs to be completed
# BASH_IT_LOAD_PRIORITY: 203
cite about-plugin
about-plugin 'Automatic completion of aliases'
# References:
# http://superuser.com/a/437508/119764
# http://stackoverflow.com/a/1793178/1228454
# This needs to be a plugin so it gets executed after the completions and the aliases have been defined.
# Bash-it loads its components in the order
# 1) Aliases
# 2) Completions
# 3) Plugins
# 4) Custom scripts
# Automatically add completion for all aliases to commands having completion functions
function alias_completion {
local namespace="alias_completion"
# parse function based completion definitions, where capture group 2 => function and 3 => trigger
local compl_regex='complete( +[^ ]+)* -F ([^ ]+) ("[^"]+"|[^ ]+)'
# parse alias definitions, where capture group 1 => trigger, 2 => command, 3 => command arguments
local alias_regex="alias( -- | )([^=]+)='(\"[^\"]+\"|[^ ]+)(( +[^ ]+)*)'"
# create array of function completion triggers, keeping multi-word triggers together
eval "local completions=($(complete -p | sed -Ene "/$compl_regex/s//'\3'/p"))"
(( ${#completions[@]} == 0 )) && return 0
# create temporary file for wrapper functions and completions
local tmp_file; tmp_file="$(mktemp -t "${namespace}-${RANDOM}XXXXXX")" || return 1
local completion_loader; completion_loader="$(complete -p -D 2>/dev/null | sed -Ene 's/.* -F ([^ ]*).*/\1/p')"
# read in "<alias> '<aliased command>' '<command args>'" lines from defined aliases
local line; while read line; do
eval "local alias_tokens; alias_tokens=($line)" 2>/dev/null || continue # some alias arg patterns cause an eval parse error
local alias_name="${alias_tokens[0]}" alias_cmd="${alias_tokens[1]}" alias_args="${alias_tokens[2]# }"
# skip aliases to pipes, boolean control structures and other command lists
# (leveraging that eval errs out if $alias_args contains unquoted shell metacharacters)
eval "local alias_arg_words; alias_arg_words=($alias_args)" 2>/dev/null || continue
# avoid expanding wildcards
read -a alias_arg_words <<< "$alias_args"
# skip alias if there is no completion function triggered by the aliased command
if [[ ! " ${completions[*]} " =~ " $alias_cmd " ]]; then
if [[ -n "$completion_loader" ]]; then
# force loading of completions for the aliased command
eval "$completion_loader $alias_cmd"
# 124 means completion loader was successful
[[ $? -eq 124 ]] || continue
completions+=($alias_cmd)
else
continue
fi
fi
local new_completion="$(complete -p "$alias_cmd" 2>/dev/null)"
# create a wrapper inserting the alias arguments if any
if [[ -n $alias_args ]]; then
local compl_func="${new_completion/#* -F /}"; compl_func="${compl_func%% *}"
# avoid recursive call loops by ignoring our own functions
if [[ "${compl_func#_$namespace::}" == $compl_func ]]; then
local compl_wrapper="_${namespace}::${alias_name}"
echo "function $compl_wrapper {
local compl_word=\$2
local prec_word=\$3
# check if prec_word is the alias itself. if so, replace it
# with the last word in the unaliased form, i.e.,
# alias_cmd + ' ' + alias_args.
if [[ \$COMP_LINE == \"\$prec_word \$compl_word\" ]]; then
prec_word=\"$alias_cmd $alias_args\"
prec_word=\${prec_word#* }
fi
(( COMP_CWORD += ${#alias_arg_words[@]} ))
COMP_WORDS=($alias_cmd $alias_args \${COMP_WORDS[@]:1})
(( COMP_POINT -= \${#COMP_LINE} ))
COMP_LINE=\${COMP_LINE/$alias_name/$alias_cmd $alias_args}
(( COMP_POINT += \${#COMP_LINE} ))
$compl_func \"$alias_cmd\" \"\$compl_word\" \"\$prec_word\"
}" >> "$tmp_file"
new_completion="${new_completion/ -F $compl_func / -F $compl_wrapper }"
fi
fi
# replace completion trigger by alias
if [[ -n $new_completion ]]; then
new_completion="${new_completion% *} $alias_name"
echo "$new_completion" >> "$tmp_file"
fi
done < <(alias -p | sed -Ene "s/$alias_regex/\2 '\3' '\4'/p")
source "$tmp_file" && rm -f "$tmp_file"
}; alias_completion
|
adoyle-h/dotfiles
|
completions/alias_completion.bash
|
Shell
|
bsd-3-clause
| 4,847 |
#!/bin/bash
mkdir -p pinloc-1k-cm81
cd pinloc-1k-cm81
pins="
A1 A2 A3 A4 A6 A7 A8 A9
B1 B2 B3 B4 B5 B6 B7 B8 B9
C1 C2 C3 C4 C5 C9
D1 D2 D3 D5 D6 D7 D8 D9
E1 E2 E3 E4 E5 E7 E8
F1 F3 F7 F8
G1 G3 G4 G5 G6 G7 G8 G9
H1 H4 H5 H7 H9
J1 J2 J3 J4 J6 J7 J8 J9
"
if [ $(echo $pins | wc -w) -ne 63 ]; then
echo "Incorrect number of pins:" $(echo $pins | wc -w)
exit 1
fi
{
echo -n "all:"
for pin in $pins; do
id="pinloc-1k-cm81_${pin}"
echo -n " ${id}.exp"
done
echo
for pin in $pins; do
id="pinloc-1k-cm81_${pin}"
echo "module top(output y); assign y = 0; endmodule" > ${id}.v
echo "set_io y ${pin}" >> ${id}.pcf
echo; echo "${id}.exp:"
echo " ICEDEV=lp1k-cm81 bash ../../icecube.sh ${id} > ${id}.log 2>&1"
echo " ../../../icebox/icebox_explain.py ${id}.asc > ${id}.exp.new"
echo " ! grep '^Warning: pin' ${id}.log"
echo " rm -rf ${id}.tmp"
echo " mv ${id}.exp.new ${id}.exp"
done
} > pinloc-1k-cm81.mk
set -ex
make -f pinloc-1k-cm81.mk -j4
python3 ../pinlocdb.py pinloc-1k-cm81_*.exp > ../pinloc-1k-cm81.txt
|
SymbiFlow/icestorm
|
icefuzz/pinloc/pinloc-1k-cm81.sh
|
Shell
|
isc
| 1,045 |
#!/usr/bin/env sh
# generated from catkin/cmake/template/setup.sh.in
# Sets various environment variables and sources additional environment hooks.
# It tries it's best to undo changes from a previously sourced setup file before.
# Supported command line options:
# --extend: skips the undoing of changes from a previously sourced setup file
# --local: only considers this workspace but not the chained ones
# In plain sh shell which doesn't support arguments for sourced scripts you can
# set the environment variable `CATKIN_SETUP_UTIL_ARGS=--extend/--local` instead.
# since this file is sourced either use the provided _CATKIN_SETUP_DIR
# or fall back to the destination set at configure time
: ${_CATKIN_SETUP_DIR:=/home/sumukh/Documents/Drona/Src/Lib/ExternalMotionPlanners/OMPLOptimalPlanner/devel}
_SETUP_UTIL="$_CATKIN_SETUP_DIR/_setup_util.py"
unset _CATKIN_SETUP_DIR
if [ ! -f "$_SETUP_UTIL" ]; then
echo "Missing Python script: $_SETUP_UTIL"
return 22
fi
# detect if running on Darwin platform
_UNAME=`uname -s`
_IS_DARWIN=0
if [ "$_UNAME" = "Darwin" ]; then
_IS_DARWIN=1
fi
unset _UNAME
# make sure to export all environment variables
export CMAKE_PREFIX_PATH
if [ $_IS_DARWIN -eq 0 ]; then
export LD_LIBRARY_PATH
else
export DYLD_LIBRARY_PATH
fi
unset _IS_DARWIN
export PATH
export PKG_CONFIG_PATH
export PYTHONPATH
# remember type of shell if not already set
if [ -z "$CATKIN_SHELL" ]; then
CATKIN_SHELL=sh
fi
# invoke Python script to generate necessary exports of environment variables
# use TMPDIR if it exists, otherwise fall back to /tmp
if [ -d "${TMPDIR:-}" ]; then
_TMPDIR="${TMPDIR}"
else
_TMPDIR=/tmp
fi
_SETUP_TMP=`mktemp "${_TMPDIR}/setup.sh.XXXXXXXXXX"`
unset _TMPDIR
if [ $? -ne 0 -o ! -f "$_SETUP_TMP" ]; then
echo "Could not create temporary file: $_SETUP_TMP"
return 1
fi
CATKIN_SHELL=$CATKIN_SHELL "$_SETUP_UTIL" $@ ${CATKIN_SETUP_UTIL_ARGS:-} >> "$_SETUP_TMP"
_RC=$?
if [ $_RC -ne 0 ]; then
if [ $_RC -eq 2 ]; then
echo "Could not write the output of '$_SETUP_UTIL' to temporary file '$_SETUP_TMP': may be the disk if full?"
else
echo "Failed to run '\"$_SETUP_UTIL\" $@': return code $_RC"
fi
unset _RC
unset _SETUP_UTIL
rm -f "$_SETUP_TMP"
unset _SETUP_TMP
return 1
fi
unset _RC
unset _SETUP_UTIL
. "$_SETUP_TMP"
rm -f "$_SETUP_TMP"
unset _SETUP_TMP
# source all environment hooks
_i=0
while [ $_i -lt $_CATKIN_ENVIRONMENT_HOOKS_COUNT ]; do
eval _envfile=\$_CATKIN_ENVIRONMENT_HOOKS_$_i
unset _CATKIN_ENVIRONMENT_HOOKS_$_i
eval _envfile_workspace=\$_CATKIN_ENVIRONMENT_HOOKS_${_i}_WORKSPACE
unset _CATKIN_ENVIRONMENT_HOOKS_${_i}_WORKSPACE
# set workspace for environment hook
CATKIN_ENV_HOOK_WORKSPACE=$_envfile_workspace
. "$_envfile"
unset CATKIN_ENV_HOOK_WORKSPACE
_i=$((_i + 1))
done
unset _i
unset _CATKIN_ENVIRONMENT_HOOKS_COUNT
|
Drona-Org/Drona
|
Src/Lib/ExternalMotionPlanners/OMPLOptimalPlanner/devel/setup.sh
|
Shell
|
mit
| 2,842 |
#!/usr/bin/env node
var program = require('commander');
var pck = require('../package.json');
var path = require('path');
var standalone = require('../lib/standalone.js');
program
.version(pck.version)
.option('-i, --input <file>', 'Input jobs (json format)')
.option('-w, --worker <file>', 'Worker script')
.option('-o, --output <file>', 'Output json', 'standalone.json')
.option('-p, --port <port>', 'Port number for forerunner mananger', 2718)
.option('-n, --numThreads <numThreads>', 'Number of worker threads', 1)
.parse(process.argv);
if (!program.input) {
console.error('Must specify input job file');
process.exit(1);
}
if (!program.worker) {
console.error('Must specify worker script');
process.exit(1);
}
program.numThreads = parseInt(program.numThreads, 10);
if (program.numThreads < 1 || typeof program.numThreads !== 'number' || isNaN(program.numThreads)) {
console.error('Number of threads in incorrect ಠ_ಠ. %s', program.numThreads);
process.exit(1);
}
var input = path.resolve(process.cwd(), program.input);
var output = path.resolve(process.cwd(), program.output);
var worker = path.resolve(process.cwd(), program.worker);
var options = {
port: parseInt(program.port),
}
console.log('Forerunner - standalone: running %s on %s thread(s)', program.worker, program.numThreads);
// kick off the scripts
standalone(worker, input, output, program.numThreads, options, function(err) {
if (err) {
console.error(err);
process.exit(1);
}
process.exit(0);
});
|
kiernanmcgowan/forerunner-standalone
|
bin/standalone.sh
|
Shell
|
mit
| 1,518 |
#!/bin/sh
set -e
set -u
copy() {
src="$1"
dst="/usr/local/bin/$1"
echo cp "$src" "$dst"
cp "$src" "$dst"
}
copy portable-screencap
copy ipfs-screencap
|
lgierth/ipfs-screencap
|
install.sh
|
Shell
|
mit
| 163 |
#!/usr/bin/env bash
# Variables
readonly projectDir=$(realpath "$(dirname ${BASH_SOURCE[0]})/..")
readonly envHelpersPath="$projectDir/.circleci/env-helpers.inc.sh";
readonly bashEnvCachePath="$projectDir/.circleci/bash_env_cache";
# Load helpers and make them available everywhere (through `$BASH_ENV`).
source $envHelpersPath;
echo "source $envHelpersPath;" >> $BASH_ENV;
####################################################################################################
# Define PUBLIC environment variables for CircleCI.
####################################################################################################
# See https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables for more info.
####################################################################################################
setPublicVar CI "$CI"
setPublicVar PROJECT_ROOT "$projectDir";
setPublicVar CI_AIO_MIN_PWA_SCORE "95";
# This is the branch being built; e.g. `pull/12345` for PR builds.
setPublicVar CI_BRANCH "$CIRCLE_BRANCH";
setPublicVar CI_BUILD_URL "$CIRCLE_BUILD_URL";
setPublicVar CI_COMMIT "$CIRCLE_SHA1";
# `CI_COMMIT_RANGE` is only used on push builds (a.k.a. non-PR, non-scheduled builds and rerun
# workflows of such builds).
setPublicVar CI_GIT_BASE_REVISION "${CIRCLE_GIT_BASE_REVISION}";
setPublicVar CI_GIT_REVISION "${CIRCLE_GIT_REVISION}";
setPublicVar CI_COMMIT_RANGE "$CIRCLE_GIT_BASE_REVISION..$CIRCLE_GIT_REVISION";
setPublicVar CI_PULL_REQUEST "${CIRCLE_PR_NUMBER:-false}";
setPublicVar CI_REPO_NAME "$CIRCLE_PROJECT_REPONAME";
setPublicVar CI_REPO_OWNER "$CIRCLE_PROJECT_USERNAME";
setPublicVar CI_PR_REPONAME "$CIRCLE_PR_REPONAME";
setPublicVar CI_PR_USERNAME "$CIRCLE_PR_USERNAME";
####################################################################################################
# Define "lazy" PUBLIC environment variables for CircleCI.
# (I.e. functions to set an environment variable when called.)
####################################################################################################
createPublicVarSetter CI_STABLE_BRANCH "\$(npm info @angular/core dist-tags.latest | sed -r 's/^\\s*([0-9]+\\.[0-9]+)\\.[0-9]+.*$/\\1.x/')";
####################################################################################################
# Define SECRET environment variables for CircleCI.
####################################################################################################
setSecretVar CI_SECRET_AIO_DEPLOY_FIREBASE_TOKEN "$AIO_DEPLOY_TOKEN";
setSecretVar CI_SECRET_PAYLOAD_FIREBASE_TOKEN "$ANGULAR_PAYLOAD_TOKEN";
####################################################################################################
# Define SauceLabs environment variables for CircleCI.
####################################################################################################
setPublicVar SAUCE_USERNAME "angular-framework";
setSecretVar SAUCE_ACCESS_KEY "0c731274ed5f-cbc9-16f4-021a-9835e39f";
# TODO(josephperrott): Remove environment variables once all saucelabs tests are via bazel method.
setPublicVar SAUCE_LOG_FILE /tmp/angular/sauce-connect.log
setPublicVar SAUCE_READY_FILE /tmp/angular/sauce-connect-ready-file.lock
setPublicVar SAUCE_PID_FILE /tmp/angular/sauce-connect-pid-file.lock
setPublicVar SAUCE_TUNNEL_IDENTIFIER "angular-framework-${CIRCLE_BUILD_NUM}-${CIRCLE_NODE_INDEX}"
# Amount of seconds we wait for sauceconnect to establish a tunnel instance. In order to not
# acquire CircleCI instances for too long if sauceconnect failed, we need a connect timeout.
setPublicVar SAUCE_READY_FILE_TIMEOUT 120
####################################################################################################
# Define environment variables for the `angular/components` repo unit tests job.
####################################################################################################
# We specifically use a directory within "/tmp" here because we want the cloned repo to be
# completely isolated from angular/angular in order to avoid any bad interactions between
# their separate build setups. **NOTE**: When updating the temporary directory, also update
# the `save_cache` path configuration in `config.yml`
setPublicVar COMPONENTS_REPO_TMP_DIR "/tmp/angular-components-repo"
setPublicVar COMPONENTS_REPO_URL "https://github.com/angular/components.git"
setPublicVar COMPONENTS_REPO_BRANCH "master"
# **NOTE**: When updating the commit SHA, also update the cache key in the CircleCI `config.yml`.
setPublicVar COMPONENTS_REPO_COMMIT "a931de54a786597b34259e461c2cf3ab6edc590a"
####################################################################################################
# Decrypt GCP Credentials and store them as the Google default credentials.
####################################################################################################
mkdir -p "$HOME/.config/gcloud";
openssl aes-256-cbc -d -in "${projectDir}/.circleci/gcp_token" \
-md md5 -k "$CIRCLE_PROJECT_REPONAME" -out "$HOME/.config/gcloud/application_default_credentials.json"
####################################################################################################
# Set bazel configuration for CircleCI runs.
####################################################################################################
cp "${projectDir}/.circleci/bazel.linux.rc" "$HOME/.bazelrc";
####################################################################################################
# Create shell script in /tmp for Bazel actions to access CI envs without
# busting the cache. Used by payload-size.sh script in integration tests.
####################################################################################################
readonly bazelVarEnv="/tmp/bazel-ci-env.sh"
echo "# Setup by /.circle/env.sh" > $bazelVarEnv
echo "export PROJECT_ROOT=\"${PROJECT_ROOT}\";" >> $bazelVarEnv
echo "export CI_BRANCH=\"${CI_BRANCH}\";" >> $bazelVarEnv
echo "export CI_BUILD_URL=\"${CI_BUILD_URL}\";" >> $bazelVarEnv
echo "export CI_COMMIT=\"${CI_COMMIT}\";" >> $bazelVarEnv
echo "export CI_COMMIT_RANGE=\"${CI_COMMIT_RANGE}\";" >> $bazelVarEnv
echo "export CI_PULL_REQUEST=\"${CI_PULL_REQUEST}\";" >> $bazelVarEnv
echo "export CI_REPO_NAME=\"${CI_REPO_NAME}\";" >> $bazelVarEnv
echo "export CI_REPO_OWNER=\"${CI_REPO_OWNER}\";" >> $bazelVarEnv
echo "export CI_SECRET_PAYLOAD_FIREBASE_TOKEN=\"${CI_SECRET_PAYLOAD_FIREBASE_TOKEN}\";" >> $bazelVarEnv
####################################################################################################
####################################################################################################
## Source `$BASH_ENV` to make the variables available immediately. ##
## ***NOTE: This must remain the last action in this script*** ##
####################################################################################################
####################################################################################################
source $BASH_ENV;
|
ocombe/angular
|
.circleci/env.sh
|
Shell
|
mit
| 7,045 |
#!/bin/sh
# Base16 Atelier Plateau - Shell color setup script
# Bram de Haan (http://atelierbram.github.io/syntax-highlighting/atelier-schemes/plateau)
if [ "${TERM%%-*}" = 'linux' ]; then
# This script doesn't support linux console (use 'vconsole' template instead)
return 2>/dev/null || exit 0
fi
color00="1b/18/18" # Base 00 - Black
color01="ca/49/49" # Base 08 - Red
color02="4b/8b/8b" # Base 0B - Green
color03="a0/6e/3b" # Base 0A - Yellow
color04="72/72/ca" # Base 0D - Blue
color05="84/64/c4" # Base 0E - Magenta
color06="54/85/b6" # Base 0C - Cyan
color07="8a/85/85" # Base 05 - White
color08="65/5d/5d" # Base 03 - Bright Black
color09=$color01 # Base 08 - Bright Red
color10=$color02 # Base 0B - Bright Green
color11=$color03 # Base 0A - Bright Yellow
color12=$color04 # Base 0D - Bright Blue
color13=$color05 # Base 0E - Bright Magenta
color14=$color06 # Base 0C - Bright Cyan
color15="f4/ec/ec" # Base 07 - Bright White
color16="b4/5a/3c" # Base 09
color17="bd/51/87" # Base 0F
color18="29/24/24" # Base 01
color19="58/50/50" # Base 02
color20="7e/77/77" # Base 04
color21="e7/df/df" # Base 06
color_foreground="58/50/50" # Base 02
color_background="f4/ec/ec" # Base 07
color_cursor="58/50/50" # Base 02
if [ -n "$TMUX" ]; then
# tell tmux to pass the escape sequences through
# (Source: http://permalink.gmane.org/gmane.comp.terminal-emulators.tmux.user/1324)
printf_template="\033Ptmux;\033\033]4;%d;rgb:%s\007\033\\"
printf_template_var="\033Ptmux;\033\033]%d;rgb:%s\007\033\\"
printf_template_custom="\033Ptmux;\033\033]%s%s\007\033\\"
elif [ "${TERM%%-*}" = "screen" ]; then
# GNU screen (screen, screen-256color, screen-256color-bce)
printf_template="\033P\033]4;%d;rgb:%s\007\033\\"
printf_template_var="\033P\033]%d;rgb:%s\007\033\\"
printf_template_custom="\033P\033]%s%s\007\033\\"
else
printf_template="\033]4;%d;rgb:%s\033\\"
printf_template_var="\033]%d;rgb:%s\033\\"
printf_template_custom="\033]%s%s\033\\"
fi
# 16 color space
printf $printf_template 0 $color00
printf $printf_template 1 $color01
printf $printf_template 2 $color02
printf $printf_template 3 $color03
printf $printf_template 4 $color04
printf $printf_template 5 $color05
printf $printf_template 6 $color06
printf $printf_template 7 $color07
printf $printf_template 8 $color08
printf $printf_template 9 $color09
printf $printf_template 10 $color10
printf $printf_template 11 $color11
printf $printf_template 12 $color12
printf $printf_template 13 $color13
printf $printf_template 14 $color14
printf $printf_template 15 $color15
# 256 color space
printf $printf_template 16 $color16
printf $printf_template 17 $color17
printf $printf_template 18 $color18
printf $printf_template 19 $color19
printf $printf_template 20 $color20
printf $printf_template 21 $color21
# foreground / background / cursor color
if [ -n "$ITERM_SESSION_ID" ]; then
# iTerm2 proprietary escape codes
printf $printf_template_custom Pg 585050 # forground
printf $printf_template_custom Ph f4ecec # background
printf $printf_template_custom Pi 585050 # bold color
printf $printf_template_custom Pj 8a8585 # selection color
printf $printf_template_custom Pk 585050 # selected text color
printf $printf_template_custom Pl 585050 # cursor
printf $printf_template_custom Pm f4ecec # cursor text
else
printf $printf_template_var 10 $color_foreground
printf $printf_template_var 11 $color_background
printf $printf_template_custom 12 ";7" # cursor (reverse video)
fi
# clean up
unset printf_template
unset printf_template_var
unset color00
unset color01
unset color02
unset color03
unset color04
unset color05
unset color06
unset color07
unset color08
unset color09
unset color10
unset color11
unset color12
unset color13
unset color14
unset color15
unset color16
unset color17
unset color18
unset color19
unset color20
unset color21
unset color_foreground
unset color_background
unset color_cursor
|
atelierbram/syntax-highlighting
|
docs/atelier-schemes/output/shell/base16-atelierplateau.light.sh
|
Shell
|
mit
| 3,927 |
#!/bin/bash
# Bail if we are not running inside VMWare.
if [[ ! $(/usr/sbin/prtdiag) =~ "VMware" ]]; then
exit 0
fi
# Install the VMWare Tools from a solaris ISO.
#wget http://192.168.0.185/solaris.iso -P /tmp
mkdir -p /mnt/vmware
mount -o loop /home/vagrant/solaris.iso /mnt/vmware
cd /tmp
tar xzf /mnt/vmware/VMwareTools-*.tar.gz
umount /mnt/vmware
rm -fr /home/vagrant/solaris.iso
/tmp/vmware-tools-distrib/vmware-install.pl -d
rm -fr /tmp/vmware-tools-distrib
|
nanobox-io/vagrant-packer-templates
|
smartos-x86_64/scripts/vmware.sh
|
Shell
|
mit
| 473 |
#!/bin/bash
set -e
: ${CLUSTER:=ceph}
: ${RGW_NAME:=$(hostname -s)}
: ${MON_NAME:=$(hostname -s)}
: ${RGW_CIVETWEB_PORT:=80}
CEPH_OPTS="--cluster ${CLUSTER}"
#######
# MON #
#######
if [ ! -n "$CEPH_NETWORK" ]; then
echo "ERROR- CEPH_NETWORK must be defined as the name of the network for the OSDs"
exit 1
fi
if [ ! -n "$MON_IP" ]; then
echo "ERROR- MON_IP must be defined as the IP address of the monitor"
exit 1
fi
# bootstrap MON
if [ ! -e /etc/ceph/${CLUSTER}.conf ]; then
fsid=$(uuidgen)
cat <<ENDHERE >/etc/ceph/${CLUSTER}.conf
[global]
fsid = $fsid
mon initial members = ${MON_NAME}
mon host = ${MON_IP}
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd crush chooseleaf type = 0
osd journal size = 100
osd pool default pg num = 8
osd pool default pgp num = 8
osd pool default size = 1
public network = ${CEPH_NETWORK}
cluster network = ${CEPH_NETWORK}
ENDHERE
# Generate administrator key
ceph-authtool /etc/ceph/${CLUSTER}.client.admin.keyring --create-keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow'
# Generate the mon. key
ceph-authtool /etc/ceph/${CLUSTER}.mon.keyring --create-keyring --gen-key -n mon. --cap mon 'allow *'
# Generate initial monitor map
monmaptool --create --add ${MON_NAME} ${MON_IP} --fsid ${fsid} /etc/ceph/${CLUSTER}.monmap
fi
# If we don't have a monitor keyring, this is a new monitor
if [ ! -e /var/lib/ceph/mon/${CLUSTER}-${MON_NAME}/keyring ]; then
if [ ! -e /etc/ceph/${CLUSTER}.client.admin.keyring ]; then
echo "ERROR- /etc/ceph/${CLUSTER}.client.admin.keyring must exist; get it from your existing mon"
exit 2
fi
if [ ! -e /etc/ceph/${CLUSTER}.mon.keyring ]; then
echo "ERROR- /etc/ceph/${CLUSTER}.mon.keyring must exist. You can extract it from your current monitor by running 'ceph ${CEPH_OPTS} auth get mon. -o /tmp/${CLUSTER}.mon.keyring'"
exit 3
fi
if [ ! -e /etc/ceph/${CLUSTER}.monmap ]; then
echo "ERROR- /etc/ceph/${CLUSTER}.monmap must exist. You can extract it from your current monitor by running 'ceph ${CEPH_OPTS} mon getmap -o /tmp/monmap'"
exit 4
fi
# Import the client.admin keyring and the monitor keyring into a new, temporary one
ceph-authtool /tmp/${CLUSTER}.mon.keyring --create-keyring --import-keyring /etc/ceph/${CLUSTER}.client.admin.keyring
ceph-authtool /tmp/${CLUSTER}.mon.keyring --import-keyring /etc/ceph/${CLUSTER}.mon.keyring
# Make the monitor directory
mkdir -p /var/lib/ceph/mon/${CLUSTER}-${MON_NAME}
# Make user 'ceph' the owner of all the tree
chown ceph. /var/lib/ceph/bootstrap-{osd,mds,rgw}
# Prepare the monitor daemon's directory with the map and keyring
ceph-mon ${CEPH_OPTS} --mkfs -i ${MON_NAME} --monmap /etc/ceph/${CLUSTER}.monmap --keyring /tmp/${CLUSTER}.mon.keyring
ceph-mon ${CEPH_OPTS} --setuser ceph --setgroup ceph --mkfs -i ${MON_NAME} --monmap /etc/ceph/monmap --keyring /tmp/${CLUSTER}.mon.keyring --mon-data /var/lib/ceph/mon/${CLUSTER}-${MON_NAME}
# Clean up the temporary key
rm /tmp/${CLUSTER}.mon.keyring
fi
# start MON
ceph-mon ${CEPH_OPTS} -i ${MON_NAME} --public-addr "${MON_IP}:6789" --setuser ceph --setgroup ceph
# change replica size
ceph ${CEPH_OPTS} osd pool set rbd size 1
#######
# OSD #
#######
if [ ! -e /var/lib/ceph/osd/${CLUSTER}-0/keyring ]; then
# bootstrap OSD
mkdir -p /var/lib/ceph/osd/${CLUSTER}-0
chown ceph. /var/lib/ceph/osd/${CLUSTER}-0
ceph ${CEPH_OPTS} osd create
ceph-osd ${CEPH_OPTS} -i 0 --mkfs --setuser ceph --setgroup ceph
ceph ${CEPH_OPTS} auth get-or-create osd.0 osd 'allow *' mon 'allow profile osd' -o /var/lib/ceph/osd/${CLUSTER}-0/keyring
ceph ${CEPH_OPTS} osd crush add 0 1 root=default host=$(hostname -s)
ceph-osd ${CEPH_OPTS} -i 0 -k /var/lib/ceph/osd/${CLUSTER}-0/keyring
fi
# start OSD
ceph-osd ${CEPH_OPTS} -i 0 --setuser ceph --setgroup disk
#######
# MDS #
#######
if [ ! -e /var/lib/ceph/mds/${CLUSTER}-0/keyring ]; then
# create ceph filesystem
ceph ${CEPH_OPTS} osd pool create cephfs_data 8
ceph ${CEPH_OPTS} osd pool create cephfs_metadata 8
ceph ${CEPH_OPTS} fs new cephfs cephfs_metadata cephfs_data
# bootstrap MDS
mkdir -p /var/lib/ceph/mds/${CLUSTER}-0
chown ceph. /var/lib/ceph/mds/${CLUSTER}-0
ceph ${CEPH_OPTS} auth get-or-create mds.0 mds 'allow' osd 'allow *' mon 'allow profile mds' > /var/lib/ceph/mds/${CLUSTER}-0/keyring
fi
# start MDS
ceph-mds ${CEPH_OPTS} -i 0 --setuser ceph --setgroup ceph
#######
# RGW #
#######
if [ ! -e /var/lib/ceph/radosgw/${RGW_NAME}/keyring ]; then
# bootstrap RGW
mkdir -p /var/lib/ceph/radosgw/${RGW_NAME}
chown ceph. /var/lib/ceph/radosgw/${RGW_NAME}
ceph ${CEPH_OPTS} auth get-or-create client.radosgw.gateway osd 'allow rwx' mon 'allow rw' -o /var/lib/ceph/radosgw/${RGW_NAME}/keyring
fi
# start RGW
radosgw ${CEPH_OPTS} -c /etc/ceph/${CLUSTER}.conf -n client.radosgw.gateway -k /var/lib/ceph/radosgw/${RGW_NAME}/keyring --rgw-socket-path="" --rgw-frontends="civetweb port=${RGW_CIVETWEB_PORT}" --setuser ceph --setgroup ceph
#######
# API #
#######
# start ceph-rest-api
ceph-rest-api ${CEPH_OPTS} -n client.admin &
#########
# WATCH #
#########
exec ceph ${CEPH_OPTS} -w
|
fmeppo/ceph-docker
|
ceph-releases/jewel/ubuntu/14.04/demo/entrypoint.sh
|
Shell
|
mit
| 5,290 |
#!/bin/bash
FN="TxDb.Ggallus.UCSC.galGal4.refGene_3.11.0.tar.gz"
URLS=(
"https://bioconductor.org/packages/3.11/data/annotation/src/contrib/TxDb.Ggallus.UCSC.galGal4.refGene_3.11.0.tar.gz"
"https://bioarchive.galaxyproject.org/TxDb.Ggallus.UCSC.galGal4.refGene_3.11.0.tar.gz"
"https://depot.galaxyproject.org/software/bioconductor-txdb.ggallus.ucsc.galgal4.refgene/bioconductor-txdb.ggallus.ucsc.galgal4.refgene_3.11.0_src_all.tar.gz"
)
MD5="bfd9c30488b075729bca1f8c4346dee4"
# Use a staging area in the conda dir rather than temp dirs, both to avoid
# permission issues as well as to have things downloaded in a predictable
# manner.
STAGING=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $STAGING
TARBALL=$STAGING/$FN
SUCCESS=0
for URL in ${URLS[@]}; do
curl $URL > $TARBALL
[[ $? == 0 ]] || continue
# Platform-specific md5sum checks.
if [[ $(uname -s) == "Linux" ]]; then
if md5sum -c <<<"$MD5 $TARBALL"; then
SUCCESS=1
break
fi
else if [[ $(uname -s) == "Darwin" ]]; then
if [[ $(md5 $TARBALL | cut -f4 -d " ") == "$MD5" ]]; then
SUCCESS=1
break
fi
fi
fi
done
if [[ $SUCCESS != 1 ]]; then
echo "ERROR: post-link.sh was unable to download any of the following URLs with the md5sum $MD5:"
printf '%s\n' "${URLS[@]}"
exit 1
fi
# Install and clean up
R CMD INSTALL --library=$PREFIX/lib/R/library $TARBALL
rm $TARBALL
rmdir $STAGING
|
roryk/recipes
|
recipes/bioconductor-txdb.ggallus.ucsc.galgal4.refgene/post-link.sh
|
Shell
|
mit
| 1,417 |
#!/bin/bash
#
# Run a single test file with coverage.
#
PREFIX=$(pwd)
COVERAGE_FILE=.coveragerc
COVERAGE_DIR=${PREFIX}/.coverage_tests
coverage erase --rcfile=${COVERAGE_FILE}
rm -rf ${COVERAGE_DIR}
mkdir -p ${COVERAGE_DIR}
coverage run manage.py test $1 $2
mv .coverage.* ${COVERAGE_DIR}
coverage combine --rcfile=${COVERAGE_FILE} ${COVERAGE_DIR}
coverage report -m --rcfile=${COVERAGE_FILE}
|
cnobile2012/dcolumn
|
run_test.sh
|
Shell
|
mit
| 395 |
go get github.com/alexandrevicenzi/go-sse
go build gossed.go
|
benas/ssed
|
build.sh
|
Shell
|
mit
| 60 |
#!/bin/sh
#使用root账户更新
#echo -e "使用root账户更新"
#设置配置文件夹
mkdir -p /etc/judge/conf.d
mkdir -p /etc/judge/bin
cp -r config/judge.conf /etc/judge/conf.d
cp -r bin/* /etc/judge/bin
#finish
echo -e "Anything is already!"
echo -e "Have a nice day"
|
Alrash/OnlineJudge
|
judge/config.sh
|
Shell
|
mit
| 281 |
#!/bin/sh
set -e
/usr/sbin/sshd -D &
dockerd --storage-driver=vfs --host=unix:///var/run/docker.sock "$@"
|
toshke/ciinabox-containers
|
jenkins-dind-edge-slave/jenkins-docker-slave.sh
|
Shell
|
mit
| 109 |
lua ~/depthSCANN/input/depthTuning/generate_downsample_depth.lua > ~/depthSCANN/input/depthTuning/generated/generate_downsample_depth.params;
#One GPU run
~/depthSCANN/Release/depthSCANN -p ~/depthSCANN/input/depthTuning/generated/generate_downsample_depth.params -t 8
#MultiGPU run
#mpirun -np 4 --bind-to none ~/depthSCANN/Release/depthSCANN -p ~/depthSCANN/input/benchmarkEncoding/generated/encode_LCA.params -t 8;
|
dpaiton/OpenPV
|
projects/depthSCANN/runScripts/depthTuning/runBenchmarkEncodeLCA.sh
|
Shell
|
epl-1.0
| 419 |
#
gcc -Wall -O2 -fomit-frame-pointer -DNDEBUG -D_REENTRANT -m32 -march=i586 -I. -I.. -s -o fpsieve-x86-linux ../main.c ../sieve.c ../clock.c ../util.c app.c have_sse2.S factorial4_x86.S factorial4_x86_sse2.S primorial4_x86.S primorial4_x86_sse2.S -lm -lpthread
#
i586-mingw32msvc-gcc -Wall -O2 -fomit-frame-pointer -DNDEBUG -D_REENTRANT -m32 -march=i586 -I. -I.. -s -o fpsieve-x86-windows.exe ../main.c ../sieve.c ../clock.c ../util.c app.c have_sse2.S factorial4_x86.S factorial4_x86_sse2.S primorial4_x86.S primorial4_x86_sse2.S -lm
#
gcc -Wall -O2 -DNDEBUG -D_REENTRANT -m64 -march=k8 -I. -I.. -s -o fpsieve-x86_64-linux ../main.c ../sieve.c ../clock.c ../util.c app.c factorial4_x86_64.S primorial4_x86_64.S -lm -lpthread
#
ssh coo "(cd `pwd`; x86_64-pc-mingw32-gcc -Wall -O2 -DNDEBUG -D_REENTRANT -m64 -march=k8 -I. -I.. -s -o fpsieve-x86_64-windows.exe ../main.c ../sieve.c ../clock.c ../util.c app.c factorial4_x86_64.S primorial4_x86_64.S -lm)"
#
rm -f fpsieve-0.2.4-bin.zip
zip -9 fpsieve-0.2.4-bin.zip README.txt CHANGES.txt fpconfig.txt fpsieve-x86-linux fpsieve-x86_64-linux fpsieve-x86-windows.exe fpsieve-x86_64-windows.exe
|
Ken-g6/PSieve
|
fpsieve/make-bins.sh
|
Shell
|
gpl-2.0
| 1,138 |
# L'utilisation de ce script est documentée à cette adresse :
# https://github.com/dorian-marchal/spawnkill/blob/master/documentation/LocalUserscriptDev.md
# répertoire de développement du script
dev_script_path="/path/to/www/spawnkill/"
# nom du fichier principal du script
main_script_file="jvc-spawnkill.user.js"
# Spécifique à Firefox, inutile sous Chrome
# Répertoire greasemonkey du script
# /!\ Tout ce qui se trouve dans ce répertoire sera supprimé !
# Veillez à ne pas vous tromper en saisissant cette variable !
gm_script_path="/home/<user>/.mozilla/firefox/<profile_name>/gm_scripts/JVC_SpawnKill/"
|
Delgan/spawnkill
|
other/local-dev/local-dev-config.default.sh
|
Shell
|
gpl-2.0
| 622 |
#!/bin/bash
# redirect stdout and stderr to logfile
#rm /home/hivetool/hivetool.log
exec >>/home/hivetool/hivetool.log 2>&1
HOST=`hostname`
DATE=`date +"%Y/%m/%d %H:%M"`
COUNTER=0
while [[ $COUNTER -lt 10 && $DATA_GOOD -eq 0 ]]; do
TEMPerHUM=`/usr/local/bin/tempered -s F /dev/hidraw1`
if [[ -n $TEMPerHUM ]]
then
HUMIDITY=`echo $TEMPerHUM | grep -o "[0-9]*\.[0-9]\%" | grep -o "[0-9]*\.[0-9]"`
TEMP=`echo $TEMPerHUM | grep -o "temperature \-*[0-9]*\.[0-9]" | grep -o "\-*[0-9]*\.[0-9]"`
if [[ -n "$TEMP" && -n "$HUMIDITY" ]]
# if [[ $TEMP && $HUMIDITY ]]
then
DATA_GOOD=1
fi
fi
let COUNTER=COUNTER+1
done
echo $HOST $COUNTER $TEMP $HUMIDITY
if [[ $COUNTER -gt 9 ]]
then
echo "$DATE ERROR reading /dev/hidraw1" >> error.log
fi
if [[ $COUNTER -lt 10 && $COUNTER -gt 1 ]]
then
echo "$DATE WARNING reading /dev/hidraw1: retried $COUNTER" >> error.log
fi
|
rcrum003/HiveControl
|
scripts/temp/temper_test.sh
|
Shell
|
gpl-2.0
| 997 |
#!/bin/sh
[[ -d release ]] || {
echo "must be in kernel root dir"
exit 1;
}
echo "packaging it up"
TYPE=$1
[[ "$TYPE" == '' ]] && TYPE=SGS
RELVER=$2
[[ "$RELVER" == '' ]] && RELVER="0"
#REL=CM7_${TYPE}_$(date +%Y%m%d-%H)_platypus.zip
REL=CM7_${TYPE}_$(date +%Y%m%d)_NEO_3.0_VC-led-notif.zip
rm -r release/system 2> /dev/null
mkdir -p release/system/bin || exit 1
mkdir -p release/system/lib/modules || exit 1
mkdir -p release/system/lib/hw || exit 1
mkdir -p release/system/etc/init.d || exit 1
#cp release/logger.module release/system/lib/modules/logger.ko
find . -name "*.ko" -exec cp {} release/system/lib/modules/ \; 2>/dev/null || exit 1
cd release && {
# cp 91logger system/etc/init.d/ || exit 1
# cp S98system_tweak system/etc/init.d/ || exit 1
# cp 98crunchengine system/etc/init.d/ || exit 1
cp S70zipalign system/etc/init.d/ || exit 1
cp lights.aries.so system/lib/hw/ || exit 1
# cp lights.aries.so.BLN system/lib/hw/lights.aries.so || exit 1
mkdir -p system/bin
# cp bin/rild_old system/bin/rild
# cp libril.so_old system/lib/libril.so
# cp libsecril-client.so_old system/lib/libsecril-client.so
zip -q -r ${REL} system boot.img META-INF erase_image flash_image bml_over_mtd bml_over_mtd.sh || exit 1
sha256sum ${REL} > ${REL}.sha256sum
rm -rf ${TYPE} || exit 1
mkdir -p ${TYPE} || exit 1
mv ${REL}* ${TYPE} || exit 1
} || exit 1
echo ${REL}
rm system/lib/modules/*
rm system/lib/hw/*
rm system/etc/init.d/*
rm system/bin/*
exit 0
|
ngiordano/chimera_kernel
|
release/doit.sh
|
Shell
|
gpl-2.0
| 1,472 |
#!/bin/bash
function addCategory {
CA_NAME="$1"
CA_DESC="$2"
CA_URL="$3"
params_add_ca="<methodCall><methodName>addCategory</methodName><params><param><struct><member><name>name</name><value>$CA_NAME</value></member><member><name>description</name><value>$CA_DESC</value></member><member><name>url</name><value>$CA_URL</value></member></struct></param></params></methodCall>"
response_add_ca=`curl -s -d "$params_add_ca" http://geo2tag.cs.prv/gets/geo2tag.php`
echo "Response (add category): "
echo "$response_add_ca"
}
function getCategories {
params_get_ca="<methodCall><methodName>getCategories</methodName></methodCall>"
response_get_ca=`curl -s -d "$params_get_ca" http://geo2tag.cs.prv/gets/geo2tag.php`
echo "Response (get categories): "
echo "$response_get_ca"
}
function removeCategory {
LOGIN="gets2"
PASS="getsPWD"
CA_ID="$1"
params_remove_ca="<methodCall><methodName>deleteCategory</methodName><params><param><struct><member><name>id</name><value>$CA_ID</value></member><member><name>login</name><value>$LOGIN</value></member><member><name>password</name><value>$PASS</value></member></struct></param></params></methodCall>"
response_remove_ca=`curl -s -d "$params_remove_ca" http://geo2tag.cs.prv/gets/geo2tag.php`
echo "Response (remove category): "
echo "$response_remove_ca"
}
if { [ $# == 4 ] && [ "$1" == "--add-category" ]; } then
addCategory $2 $3 $4
exit 0
fi
if { [ $# == 1 ] && [ "$1" == "--get-categories" ]; } then
getCategories
exit 0
fi
if { [ $# == 2 ] && [ "$1" == "--remove-category" ]; } then
removeCategory $2
exit 0
fi
echo "Usage: "
echo "Add category - ./categories.sh --add-category name description url"
echo "Get categories - ./categories.sh --get-categories"
echo "Remove category - ./categories.sh --remove-category id"
exit 0
|
oss-fruct-org/gets
|
service/support/scripts/categories.sh
|
Shell
|
gpl-2.0
| 1,873 |
#
# Bug 1520569: 2.3 creates empty dir after upgrading
#
start_server
# check that directory with timestamp as a name is not created
# (--stream should imply --no-timestamp)
innobackupex --stream=tar $topdir/backup > $topdir/xbs
if [ "$(ls -A $topdir/backup)" ] ; then
die "Directory is created!"
fi
|
janlindstrom/percona-xtrabackup
|
storage/innobase/xtrabackup/test/t/bug1520569.sh
|
Shell
|
gpl-2.0
| 304 |
#!/bin/sh
APPBUNDLE=ioquake3.app
BINARY=ioquake3.ub
DEDBIN=ioq3ded.ub
PKGINFO=APPLIOQ3
ICNS=misc/quake3.icns
DESTDIR=build/release-darwin-ub
BASEDIR=baseq3
MPACKDIR=missionpack
BIN_OBJ="
build/release-darwin-ppc/ioquake3-smp.ppc
build/release-darwin-i386/ioquake3-smp.i386
"
BIN_DEDOBJ="
build/release-darwin-ub/ioq3ded.ppc
build/release-darwin-i386/ioq3ded.i386
"
BASE_OBJ="
build/release-darwin-ppc/$BASEDIR/cgameppc.dylib
build/release-darwin-i386/$BASEDIR/cgamei386.dylib
build/release-darwin-ppc/$BASEDIR/uippc.dylib
build/release-darwin-i386/$BASEDIR/uii386.dylib
build/release-darwin-ppc/$BASEDIR/qagameppc.dylib
build/release-darwin-i386/$BASEDIR/qagamei386.dylib
"
MPACK_OBJ="
build/release-darwin-ppc/$MPACKDIR/cgameppc.dylib
build/release-darwin-i386/$MPACKDIR/cgamei386.dylib
build/release-darwin-ppc/$MPACKDIR/uippc.dylib
build/release-darwin-i386/$MPACKDIR/uii386.dylib
build/release-darwin-ppc/$MPACKDIR/qagameppc.dylib
build/release-darwin-i386/$MPACKDIR/qagamei386.dylib
"
cd `dirname $0`
if [ ! -f Makefile ]; then
echo "This script must be run from the ioquake3 build directory"
exit 1
fi
Q3_VERSION=`grep '^VERSION=' Makefile | sed -e 's/.*=\(.*\)/\1/'`
# We only care if we're >= 10.4, not if we're specifically Tiger.
# "8" is the Darwin major kernel version.
#TIGERHOST=`uname -r | grep ^8.`
TIGERHOST=`uname -r |perl -w -p -e 's/\A(\d+)\..*\Z/$1/; $_ = (($_ >= 8) ? "1" : "0");'`
# we want to use the oldest available SDK for max compatiblity
unset PPC_CLIENT_SDK
PPC_CLIENT_CC=gcc
unset PPC_CLIENT_CFLAGS
unset PPC_CLIENT_LDFLAGS
unset PPC_SERVER_SDK
unset PPC_SERVER_CFLAGS
unset PPC_SERVER_LDFLAGS
unset X86_SDK
unset X86_CFLAGS
unset X86_LDFLAGS
if [ -d /Developer/SDKs/MacOSX10.5.sdk ]; then
PPC_CLIENT_SDK=/Developer/SDKs/MacOSX10.5.sdk
PPC_CLIENT_CC=gcc-4.0
PPC_CLIENT_CFLAGS="-arch ppc -isysroot /Developer/SDKs/MacOSX10.5.sdk \
-DMAC_OS_X_VERSION_MIN_REQUIRED=1050"
PPC_CLIENT_LDFLAGS="-arch ppc \
-isysroot /Developer/SDKs/MacOSX10.5.sdk \
-mmacosx-version-min=10.5"
PPC_SERVER_SDK=/Developer/SDKs/MacOSX10.5.sdk
PPC_SERVER_CFLAGS=$PPC_CLIENT_CFLAGS
PPC_SERVER_LDFLAGS=$PPC_CLIENT_LDFLAGS
X86_SDK=/Developer/SDKs/MacOSX10.5.sdk
X86_CFLAGS="-arch i386 -isysroot /Developer/SDKs/MacOSX10.5.sdk \
-DMAC_OS_X_VERSION_MIN_REQUIRED=1050"
X86_LDFLAGS="-arch i386 \
-isysroot /Developer/SDKs/MacOSX10.5.sdk \
-mmacosx-version-min=10.5"
X86_ENV="CFLAGS=$CFLAGS LDFLAGS=$LDFLAGS"
fi
if [ -d /Developer/SDKs/MacOSX10.4u.sdk ]; then
PPC_CLIENT_SDK=/Developer/SDKs/MacOSX10.4u.sdk
PPC_CLIENT_CC=gcc-4.0
PPC_CLIENT_CFLAGS="-arch ppc -isysroot /Developer/SDKs/MacOSX10.4u.sdk \
-DMAC_OS_X_VERSION_MIN_REQUIRED=1040"
PPC_CLIENT_LDFLAGS="-arch ppc \
-isysroot /Developer/SDKs/MacOSX10.4u.sdk \
-mmacosx-version-min=10.4"
PPC_SERVER_SDK=/Developer/SDKs/MacOSX10.4u.sdk
PPC_SERVER_CFLAGS=$PPC_CLIENT_CFLAGS
PPC_SERVER_LDFLAGS=$PPC_CLIENT_LDFLAGS
X86_SDK=/Developer/SDKs/MacOSX10.4u.sdk
X86_CFLAGS="-arch i386 -isysroot /Developer/SDKs/MacOSX10.4u.sdk \
-DMAC_OS_X_VERSION_MIN_REQUIRED=1040"
X86_LDFLAGS="-arch i386 \
-isysroot /Developer/SDKs/MacOSX10.4u.sdk \
-mmacosx-version-min=10.4"
X86_ENV="CFLAGS=$CFLAGS LDFLAGS=$LDFLAGS"
fi
if [ -d /Developer/SDKs/MacOSX10.3.9.sdk ] && [ $TIGERHOST ]; then
PPC_CLIENT_SDK=/Developer/SDKs/MacOSX10.3.9.sdk
PPC_CLIENT_CC=gcc-4.0
PPC_CLIENT_CFLAGS="-arch ppc -isysroot /Developer/SDKs/MacOSX10.3.9.sdk \
-DMAC_OS_X_VERSION_MIN_REQUIRED=1030"
PPC_CLIENT_LDFLAGS="-arch ppc \
-isysroot /Developer/SDKs/MacOSX10.3.9.sdk \
-mmacosx-version-min=10.3"
PPC_SERVER_SDK=/Developer/SDKs/MacOSX10.3.9.sdk
PPC_SERVER_CFLAGS=$PPC_CLIENT_CFLAGS
PPC_SERVER_LDFLAGS=$PPC_CLIENT_LDFLAGS
fi
if [ -d /Developer/SDKs/MacOSX10.2.8.sdk ] && [ -x /usr/bin/gcc-3.3 ] && [ $TIGERHOST ]; then
PPC_CLIENT_SDK=/Developer/SDKs/MacOSX10.2.8.sdk
PPC_CLIENT_CC=gcc-3.3
PPC_CLIENT_CFLAGS="-arch ppc \
-nostdinc \
-F/Developer/SDKs/MacOSX10.2.8.sdk/System/Library/Frameworks \
-I/Developer/SDKs/MacOSX10.2.8.sdk/usr/include/gcc/darwin/3.3 \
-isystem /Developer/SDKs/MacOSX10.2.8.sdk/usr/include \
-DMAC_OS_X_VERSION_MIN_REQUIRED=1020"
PPC_CLIENT_LDFLAGS="-arch ppc \
-L/Developer/SDKs/MacOSX10.2.8.sdk/usr/lib/gcc/darwin/3.3 \
-F/Developer/SDKs/MacOSX10.2.8.sdk/System/Library/Frameworks \
-Wl,-syslibroot,/Developer/SDKs/MacOSX10.2.8.sdk,-m"
fi
if [ -z $PPC_CLIENT_SDK ] || [ -z $PPC_SERVER_SDK ] || [ -z $X86_SDK ]; then
echo "\
ERROR: This script is for building a Universal Binary. You cannot build
for a different architecture unless you have the proper Mac OS X SDKs
installed. If you just want to to compile for your own system run
'make' instead of this script."
exit 1
fi
echo "Building PPC Dedicated Server against \"$PPC_SERVER_SDK\""
echo "Building PPC Client against \"$PPC_CLIENT_SDK\""
echo "Building X86 Client/Dedicated Server against \"$X86_SDK\""
if [ "$PPC_CLIENT_SDK" != "/Developer/SDKs/MacOSX10.2.8.sdk" ] || \
[ "$PPC_SERVER_SDK" != "/Developer/SDKs/MacOSX10.3.9.sdk" ] || \
[ "$X86_SDK" != "/Developer/SDKs/MacOSX10.4u.sdk" ]; then
echo "\
WARNING: in order to build a binary with maximum compatibility you must
build on Mac OS X 10.4 using Xcode 2.3 or 2.5 and have the
MacOSX10.2.8, MacOSX10.3.9, and MacOSX10.4u SDKs installed
from the Xcode install disk Packages folder."
fi
sleep 3
if [ ! -d $DESTDIR ]; then
mkdir -p $DESTDIR
fi
# For parallel make on multicore boxes...
NCPU=`sysctl -n hw.ncpu`
# ppc dedicated server
echo "Building Dedicated Server using $PPC_SERVER_SDK"
sleep 2
if [ -d build/release-darwin-ppc ]; then
rm -r build/release-darwin-ppc
fi
(ARCH=ppc BUILD_CLIENT_SMP=0 BUILD_CLIENT=0 BUILD_GAME_VM=0 BUILD_GAME_SO=0 \
CFLAGS=$PPC_SERVER_CFLAGS LDFLAGS=$PPC_SERVER_LDFLAGS make -j$NCPU) || exit 1;
cp build/release-darwin-ppc/ioq3ded.ppc $DESTDIR
# ppc client
if [ -d build/release-darwin-ppc ]; then
rm -r build/release-darwin-ppc
fi
(ARCH=ppc USE_OPENAL_DLOPEN=1 BUILD_SERVER=0 CC=$PPC_CLIENT_CC \
CFLAGS=$PPC_CLIENT_CFLAGS LDFLAGS=$PPC_CLIENT_LDFLAGS make -j$NCPU) || exit 1;
# intel client and server
if [ -d build/release-darwin-i386 ]; then
rm -r build/release-darwin-i386
fi
(ARCH=i386 CFLAGS=$X86_CFLAGS LDFLAGS=$X86_LDFLAGS make -j$NCPU) || exit 1;
echo "Creating .app bundle $DESTDIR/$APPBUNDLE"
if [ ! -d $DESTDIR/$APPBUNDLE/Contents/MacOS/$BASEDIR ]; then
mkdir -p $DESTDIR/$APPBUNDLE/Contents/MacOS/$BASEDIR || exit 1;
fi
if [ ! -d $DESTDIR/$APPBUNDLE/Contents/MacOS/$MPACKDIR ]; then
mkdir -p $DESTDIR/$APPBUNDLE/Contents/MacOS/$MPACKDIR || exit 1;
fi
if [ ! -d $DESTDIR/$APPBUNDLE/Contents/Resources ]; then
mkdir -p $DESTDIR/$APPBUNDLE/Contents/Resources
fi
cp $ICNS $DESTDIR/$APPBUNDLE/Contents/Resources/ioquake3.icns || exit 1;
echo $PKGINFO > $DESTDIR/$APPBUNDLE/Contents/PkgInfo
echo "
<?xml version=\"1.0\" encoding=\"UTF-8\"?>
<!DOCTYPE plist
PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\"
\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">
<plist version=\"1.0\">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<string>$BINARY</string>
<key>CFBundleGetInfoString</key>
<string>ioquake3 $Q3_VERSION</string>
<key>CFBundleIconFile</key>
<string>ioquake3.icns</string>
<key>CFBundleIdentifier</key>
<string>org.icculus.quake3</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>ioquake3</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>$Q3_VERSION</string>
<key>CFBundleSignature</key>
<string>$PKGINFO</string>
<key>CFBundleVersion</key>
<string>$Q3_VERSION</string>
<key>NSExtensions</key>
<dict/>
<key>NSPrincipalClass</key>
<string>NSApplication</string>
</dict>
</plist>
" > $DESTDIR/$APPBUNDLE/Contents/Info.plist
lipo -create -o $DESTDIR/$APPBUNDLE/Contents/MacOS/$BINARY $BIN_OBJ
lipo -create -o $DESTDIR/$APPBUNDLE/Contents/MacOS/$DEDBIN $BIN_DEDOBJ
rm $DESTDIR/ioq3ded.ppc
cp $BASE_OBJ $DESTDIR/$APPBUNDLE/Contents/MacOS/$BASEDIR/
cp $MPACK_OBJ $DESTDIR/$APPBUNDLE/Contents/MacOS/$MPACKDIR/
cp code/libs/macosx/*.dylib $DESTDIR/$APPBUNDLE/Contents/MacOS/
|
raspberrypi/quake3
|
make-macosx-ub.sh
|
Shell
|
gpl-2.0
| 8,277 |
if [ -z "$1" ]; then
# no argument - run all tests found in current dir
set -e
for i in *.ref; do sh $0 "${i/.ref/}"; done
exit 0
fi
KEEP_OUTPUT=0
if [ "x$1" = "x-k" ]; then
KEEP_OUTPUT=1
shift
fi
if [ ! -f "$1.pd" ]; then
echo -e "error: $1.pd does not exist" 1>&2
exit 1
fi
if [ ! -f "$1.ref" ]; then
echo -e "error: $1.ref does not exist" 1>&2
exit 1
fi
sed -e "s|%TESTCASE%|$1|" -e "s|%OUTPUT%|$1.out|" runtest.pd.in > "runtest-$1.pd" || exit 1
echo -n "Running test '$1'... ";
"$PD_PATH/bin/pd" -noprefs -nogui -path .. -lib tclpd "runtest-$1.pd"
diff --strip-trailing-cr "$1.ref" "$1.out" 1>/dev/null 2>&1
RESULT=$?
if [ $RESULT -eq 0 ]; then
echo "OK"
else
echo "FAIL"
# show differences:
diff -u --strip-trailing-cr "$1.ref" "$1.out"
fi
rm -f "runtest-$1.pd"
if [ $KEEP_OUTPUT -eq 0 ]; then
rm -f "$1.out"
fi
exit $RESULT
|
rvega/morphasynth
|
vendors/pd-extended-0.43.4/externals/loaders/tclpd/tests/runtest.sh
|
Shell
|
gpl-3.0
| 860 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.